index
int64
0
0
repo_id
stringlengths
48
65
file_path
stringlengths
62
122
content
stringlengths
27
3.15M
__index_level_0__
int64
0
10k
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/dpr.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;dpr&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.DPRConfig&quot;,&quot;title&quot;:&quot;DPRConfig&quot;},{&quot;local&quot;:&quot;transformers.DPRContextEncoderTokenizer&quot;,&quot;title&quot;:&quot;DPRContextEncoderTokenizer&quot;},{&quot;local&quot;:&quot;transformers.DPRContextEncoderTokenizerFast&quot;,&quot;title&quot;:&quot;DPRContextEncoderTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.DPRQuestionEncoderTokenizer&quot;,&quot;title&quot;:&quot;DPRQuestionEncoderTokenizer&quot;},{&quot;local&quot;:&quot;transformers.DPRQuestionEncoderTokenizerFast&quot;,&quot;title&quot;:&quot;DPRQuestionEncoderTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.DPRReaderTokenizer&quot;,&quot;title&quot;:&quot;DPRReaderTokenizer&quot;},{&quot;local&quot;:&quot;transformers.DPRReaderTokenizerFast&quot;,&quot;title&quot;:&quot;DPRReaderTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput&quot;,&quot;title&quot;:&quot;DPR specific outputs&quot;},{&quot;local&quot;:&quot;transformers.DPRContextEncoder&quot;,&quot;title&quot;:&quot;DPRContextEncoder&quot;},{&quot;local&quot;:&quot;transformers.DPRQuestionEncoder&quot;,&quot;title&quot;:&quot;DPRQuestionEncoder&quot;},{&quot;local&quot;:&quot;transformers.DPRReader&quot;,&quot;title&quot;:&quot;DPRReader&quot;},{&quot;local&quot;:&quot;transformers.TFDPRContextEncoder&quot;,&quot;title&quot;:&quot;TFDPRContextEncoder&quot;},{&quot;local&quot;:&quot;transformers.TFDPRQuestionEncoder&quot;,&quot;title&quot;:&quot;TFDPRQuestionEncoder&quot;},{&quot;local&quot;:&quot;transformers.TFDPRReader&quot;,&quot;title&quot;:&quot;TFDPRReader&quot;}],&quot;title&quot;:&quot;DPR&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/dpr.mdx-c851abb6.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="dpr" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#dpr"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPR </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>Dense Passage Retrieval (DPR) is a set of tools and models for state-of-the-art open-domain Q&amp;A research. It was introduced in <a href="https://arxiv.org/abs/2004.04906" rel="nofollow">Dense Passage Retrieval for Open-Domain Question Answering</a> by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, Wen-tau Yih.</p> <p>The abstract from the paper is the following:</p> <p><em>Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets, our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA benchmarks.</em></p> <p>This model was contributed by <a href="https://huggingface.co/lhoestq" rel="nofollow">lhoestq</a>. The original code can be found <a href="https://github.com/facebookresearch/DPR" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.DPRConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/configuration_dpr.py#L33" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 30522</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_embedding_type<span class="opacity-60"> = &#39;absolute&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">projection_dim<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the DPR model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel">BertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <em>token_type_ids</em> passed into <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel">BertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.position_embedding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.position_embedding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRConfig.projection_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRConfig.projection_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Dimension of the projection for the context and question encoders. If it is set to zero (default), then no projection is done.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a> is the configuration class to store the configuration of a <em>DPRModel</em>.</p> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRContextEncoder">DPRContextEncoder</a>, <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder">DPRQuestionEncoder</a>, or a <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReader">DPRReader</a>. It is used to instantiate the components of the DPR model.</p> <p>This class is a subclass of <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertConfig">BertConfig</a>. Please check the superclass for the documentation of all kwargs.</p></div> <h2 class="relative group"><a id="transformers.DPRContextEncoderTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRContextEncoderTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRContextEncoderTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRContextEncoderTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRContextEncoderTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRContextEncoderTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRContextEncoderTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/tokenization_dpr.py#L90" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a DPRContextEncoder tokenizer.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRContextEncoderTokenizer">DPRContextEncoderTokenizer</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.DPRContextEncoderTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRContextEncoderTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRContextEncoderTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRContextEncoderTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRContextEncoderTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRContextEncoderTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRContextEncoderTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/tokenization_dpr_fast.py#L91" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a “fast” DPRContextEncoder tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRContextEncoderTokenizerFast">DPRContextEncoderTokenizerFast</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.DPRQuestionEncoderTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRQuestionEncoderTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRQuestionEncoderTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRQuestionEncoderTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRQuestionEncoderTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRQuestionEncoderTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRQuestionEncoderTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/tokenization_dpr.py#L106" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Constructs a DPRQuestionEncoder tokenizer.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoderTokenizer">DPRQuestionEncoderTokenizer</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.DPRQuestionEncoderTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRQuestionEncoderTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRQuestionEncoderTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRQuestionEncoderTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRQuestionEncoderTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRQuestionEncoderTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRQuestionEncoderTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/tokenization_dpr_fast.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Constructs a “fast” DPRQuestionEncoder tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoderTokenizerFast">DPRQuestionEncoderTokenizerFast</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.DPRReaderTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRReaderTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRReaderTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRReaderTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRReaderTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRReaderTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/tokenization_dpr.py#L370" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, List[List[int]]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.questions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.questions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>questions</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like <code>[questions] * n_passages</code>. Otherwise you have to specify as many questions as in <code>titles</code> or <code>texts</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.titles" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.titles"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>titles</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The passages titles to be encoded. This can be a string or a list of strings if there are several passages.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.texts" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.texts"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>texts</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The passages texts to be encoded. This can be a string or a list of strings if there are several passages.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizer.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizer.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DPRReaderTokenizer.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, List[List[int]]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary with the following keys:</p> <ul> <li><code>input_ids</code>: List of token ids to be fed to a model.</li> <li><code>attention_mask</code>: List of indices specifying which tokens should be attended to by the model.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Construct a DPRReader tokenizer.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReaderTokenizer">DPRReaderTokenizer</a> is almost identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> and runs end-to-end tokenization: punctuation splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are combined to be fed to the <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReader">DPRReader</a> model.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> for usage examples and documentation concerning parameters.</p> <p>Return a dictionary with the token ids of the input strings and other information to give to <code>.decode_best_spans</code>. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting <code>input_ids</code> is a matrix of size <code>(n_passages, sequence_length)</code> with the format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->[CLS] <span class="hljs-tag">&lt;<span class="hljs-name">question</span> <span class="hljs-attr">token</span> <span class="hljs-attr">ids</span>&gt;</span> [SEP] <span class="hljs-tag">&lt;<span class="hljs-name">titles</span> <span class="hljs-attr">ids</span>&gt;</span> [SEP] <span class="hljs-tag">&lt;<span class="hljs-name">texts</span> <span class="hljs-attr">ids</span>&gt;</span><!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.DPRReaderTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRReaderTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRReaderTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRReaderTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRReaderTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRReaderTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/tokenization_dpr_fast.py#L371" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, List[List[int]]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.questions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.questions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>questions</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like <code>[questions] * n_passages</code>. Otherwise you have to specify as many questions as in <code>titles</code> or <code>texts</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.titles" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.titles"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>titles</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The passages titles to be encoded. This can be a string or a list of strings if there are several passages.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.texts" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.texts"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>texts</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The passages texts to be encoded. This can be a string or a list of strings if there are several passages.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderTokenizerFast.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderTokenizerFast.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DPRReaderTokenizerFast.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, List[List[int]]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary with the following keys:</p> <ul> <li><code>input_ids</code>: List of token ids to be fed to a model.</li> <li><code>attention_mask</code>: List of indices specifying which tokens should be attended to by the model.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Constructs a “fast” DPRReader tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReaderTokenizerFast">DPRReaderTokenizerFast</a> is almost identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are combined to be fed to the <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReader">DPRReader</a> model.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> for usage examples and documentation concerning parameters.</p> <p>Return a dictionary with the token ids of the input strings and other information to give to <code>.decode_best_spans</code>. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting <code>input_ids</code> is a matrix of size <code>(n_passages, sequence_length)</code> with the format:</p> <p>[CLS] &lt;question token ids&gt; [SEP] &lt;titles ids&gt; [SEP] &lt;texts ids&gt;</p></div> <h2 class="relative group"><a id="transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPR specific outputs </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.dpr.modeling_dpr.</span><span class="font-semibold">DPRContextEncoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L62" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, embeddings_size)</code>) &#x2014; The DPR encoder outputs the <em>pooler_output</em> that corresponds to the context representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Class for outputs of <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder">DPRQuestionEncoder</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.dpr.modeling_dpr.</span><span class="font-semibold">DPRQuestionEncoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L90" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, embeddings_size)</code>) &#x2014; The DPR encoder outputs the <em>pooler_output</em> that corresponds to the question representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed questions for nearest neighbors queries with context embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Class for outputs of <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder">DPRQuestionEncoder</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRReaderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRReaderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRReaderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRReaderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L118" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">relevance_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, sequence_length)</code>) &#x2014; Logits of the start index of the span for each passage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, sequence_length)</code>) &#x2014; Logits of the end index of the span for each passage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderOutput.relevance_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderOutput.relevance_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>relevance_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, )</code>) &#x2014; Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the question, compared to all the other passages.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReaderOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReaderOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Class for outputs of <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder">DPRQuestionEncoder</a>.</p></div> <h2 class="relative group"><a id="transformers.DPRContextEncoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRContextEncoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRContextEncoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRContextEncoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRContextEncoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRContextEncoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRContextEncoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L445" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: DPRConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRContextEncoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRContextEncoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DPRContextEncoder transformer outputting pooler outputs as context representations.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRContextEncoder.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DPRContextEncoder.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRContextEncoder.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L453" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput" >transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRContextEncoder.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRContextEncoder.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be formatted with [CLS] and [SEP] tokens as follows:</p> <p>(a) For sequence pairs (for a pair title+text for example):<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DPRContextEncoder.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput" >transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput" >transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig" >DPRConfig</a>) and inputs.</p> <ul> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, embeddings_size)</code>) — The DPR encoder outputs the <em>pooler_output</em> that corresponds to the context representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRContextEncoder">DPRContextEncoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DPRContextEncoder, DPRContextEncoderTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DPRContextEncoderTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DPRContextEncoder.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Hello, is my dog cute ?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = model(input_ids).pooler_output<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.DPRQuestionEncoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRQuestionEncoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRQuestionEncoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRQuestionEncoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRQuestionEncoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRQuestionEncoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRQuestionEncoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L526" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: DPRConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRQuestionEncoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRQuestionEncoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRQuestionEncoder.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DPRQuestionEncoder.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRQuestionEncoder.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L534" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput" >transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRQuestionEncoder.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRQuestionEncoder.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be formatted with [CLS] and [SEP] tokens as follows:</p> <p>(a) For sequence pairs (for a pair title+text for example):<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DPRQuestionEncoder.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput" >transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput" >transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig" >DPRConfig</a>) and inputs.</p> <ul> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, embeddings_size)</code>) — The DPR encoder outputs the <em>pooler_output</em> that corresponds to the question representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed questions for nearest neighbors queries with context embeddings.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder">DPRQuestionEncoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DPRQuestionEncoder, DPRQuestionEncoderTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-question_encoder-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DPRQuestionEncoder.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-question_encoder-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Hello, is my dog cute ?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = model(input_ids).pooler_output<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.DPRReader" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReader"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPRReader </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRReader"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DPRReader</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DPRReader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRReader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L607" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: DPRConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReader.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReader.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DPRReader transformer outputting span predictions.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DPRReader.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DPRReader.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DPRReader.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_dpr.py#L615" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: bool = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: bool = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReaderOutput" >transformers.models.dpr.modeling_dpr.DPRReaderOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReader.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReader.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReader.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReader.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReader.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReader.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReader.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReader.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DPRReader.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DPRReader.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DPRReader.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReaderOutput" >transformers.models.dpr.modeling_dpr.DPRReaderOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReaderOutput" >transformers.models.dpr.modeling_dpr.DPRReaderOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig" >DPRConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, sequence_length)</code>) — Logits of the start index of the span for each passage.</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, sequence_length)</code>) — Logits of the end index of the span for each passage.</p> </li> <li> <p><strong>relevance_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(n_passages, )</code>) — Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the question, compared to all the other passages.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRReader">DPRReader</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DPRReader, DPRReaderTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DPRReaderTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-reader-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DPRReader.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-reader-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_inputs = tokenizer( <span class="hljs-meta">... </span> questions=[<span class="hljs-string">&quot;What is love ?&quot;</span>], <span class="hljs-meta">... </span> titles=[<span class="hljs-string">&quot;Haddaway&quot;</span>], <span class="hljs-meta">... </span> texts=[<span class="hljs-string">&quot;&#x27;What Is Love&#x27; is a song recorded by the artist Haddaway&quot;</span>], <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoded_inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>relevance_logits = outputs.relevance_logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDPRContextEncoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRContextEncoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDPRContextEncoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDPRContextEncoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDPRContextEncoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDPRContextEncoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDPRContextEncoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_tf_dpr.py#L585" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRContextEncoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRContextEncoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DPRContextEncoder transformer outputting pooler outputs as context representations.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Tensorflow <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDPRContextEncoder.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDPRContextEncoder.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDPRContextEncoder.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_tf_dpr.py#L597" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.dpr.modeling_tf_dpr.TFDPRContextEncoderOutput</code>or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRContextEncoder.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRContextEncoder.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be formatted with [CLS] and [SEP] tokens as follows:</p> <p>(a) For sequence pairs (for a pair title+text for example):<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDPRContextEncoder.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.dpr.modeling_tf_dpr.TFDPRContextEncoderOutput</code>or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.dpr.modeling_tf_dpr.TFDPRContextEncoderOutput</code>or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig" >DPRConfig</a>) and inputs.</p> <ul> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, embeddings_size)</code>) — The DPR encoder outputs the <em>pooler_output</em> that corresponds to the context representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRContextEncoder">TFDPRContextEncoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDPRContextEncoder, DPRContextEncoderTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DPRContextEncoderTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDPRContextEncoder.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Hello, is my dog cute ?&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = model(input_ids).pooler_output<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDPRQuestionEncoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRQuestionEncoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDPRQuestionEncoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDPRQuestionEncoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDPRQuestionEncoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDPRQuestionEncoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDPRQuestionEncoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_tf_dpr.py#L686" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRQuestionEncoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRQuestionEncoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Tensorflow <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDPRQuestionEncoder.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDPRQuestionEncoder.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDPRQuestionEncoder.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_tf_dpr.py#L698" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.dpr.modeling_tf_dpr.TFDPRQuestionEncoderOutput</code>or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRQuestionEncoder.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRQuestionEncoder.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be formatted with [CLS] and [SEP] tokens as follows:</p> <p>(a) For sequence pairs (for a pair title+text for example):<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDPRQuestionEncoder.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.dpr.modeling_tf_dpr.TFDPRQuestionEncoderOutput</code>or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.dpr.modeling_tf_dpr.TFDPRQuestionEncoderOutput</code>or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig" >DPRConfig</a>) and inputs.</p> <ul> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, embeddings_size)</code>) — The DPR encoder outputs the <em>pooler_output</em> that corresponds to the question representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed questions for nearest neighbors queries with context embeddings.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRQuestionEncoder">TFDPRQuestionEncoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDPRQuestionEncoder, DPRQuestionEncoderTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-question_encoder-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDPRQuestionEncoder.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-question_encoder-single-nq-base&quot;</span>, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(<span class="hljs-string">&quot;Hello, is my dog cute ?&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = model(input_ids).pooler_output<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDPRReader" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRReader"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDPRReader </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDPRReader"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDPRReader</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDPRReader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDPRReader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_tf_dpr.py#L786" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRReader.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRReader.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig">DPRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DPRReader transformer outputting span predictions.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Tensorflow <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDPRReader.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDPRReader.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDPRReader.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/dpr/modeling_tf_dpr.py#L798" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: bool = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: bool = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.dpr.modeling_tf_dpr.TFDPRReaderOutput</code>or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRReader.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRReader.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(n_passages, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRReader.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRReader.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(n_passages, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRReader.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRReader.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRReader.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRReader.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDPRReader.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDPRReader.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDPRReader.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.dpr.modeling_tf_dpr.TFDPRReaderOutput</code>or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.dpr.modeling_tf_dpr.TFDPRReaderOutput</code>or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRConfig" >DPRConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(n_passages, sequence_length)</code>) — Logits of the start index of the span for each passage.</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(n_passages, sequence_length)</code>) — Logits of the end index of the span for each passage.</p> </li> <li> <p><strong>relevance_logits</strong> (<code>tf.Tensor</code> of shape <code>(n_passages, )</code>) — Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the question, compared to all the other passages.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRReader">TFDPRReader</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDPRReader, DPRReaderTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DPRReaderTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-reader-single-nq-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDPRReader.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-reader-single-nq-base&quot;</span>, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_inputs = tokenizer( <span class="hljs-meta">... </span> questions=[<span class="hljs-string">&quot;What is love ?&quot;</span>], <span class="hljs-meta">... </span> titles=[<span class="hljs-string">&quot;Haddaway&quot;</span>], <span class="hljs-meta">... </span> texts=[<span class="hljs-string">&quot;&#x27;What Is Love&#x27; is a song recorded by the artist Haddaway&quot;</span>], <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(encoded_inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>relevance_logits = outputs.relevance_logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1irgync"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1irgync"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/dpr.mdx-c851abb6.js") ], params: {} } }); </script>
200
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/layoutlmv2.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;layoutlmv2&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;usage-layoutlmv2processor&quot;,&quot;title&quot;:&quot;Usage: LayoutLMv2Processor&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2Config&quot;,&quot;title&quot;:&quot;LayoutLMv2Config&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2FeatureExtractor&quot;,&quot;title&quot;:&quot;LayoutLMv2FeatureExtractor&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2Tokenizer&quot;,&quot;title&quot;:&quot;LayoutLMv2Tokenizer&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2TokenizerFast&quot;,&quot;title&quot;:&quot;LayoutLMv2TokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2Processor&quot;,&quot;title&quot;:&quot;LayoutLMv2Processor&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2Model&quot;,&quot;title&quot;:&quot;LayoutLMv2Model&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2ForSequenceClassification&quot;,&quot;title&quot;:&quot;LayoutLMv2ForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2ForTokenClassification&quot;,&quot;title&quot;:&quot;LayoutLMv2ForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.LayoutLMv2ForQuestionAnswering&quot;,&quot;title&quot;:&quot;LayoutLMv2ForQuestionAnswering&quot;}],&quot;title&quot;:&quot;LayoutLMV2&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/layoutlmv2.mdx-fb521e03.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="layoutlmv2" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#layoutlmv2"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMV2 </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The LayoutLMV2 model was proposed in <a href="https://arxiv.org/abs/2012.14740" rel="nofollow">LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. LayoutLMV2 improves <a href="layoutlm">LayoutLM</a> to obtain state-of-the-art results across several document image understanding benchmarks:</p> <ul><li>information extraction from scanned documents: the <a href="https://guillaumejaume.github.io/FUNSD/" rel="nofollow">FUNSD</a> dataset (a collection of 199 annotated forms comprising more than 30,000 words), the <a href="https://github.com/clovaai/cord" rel="nofollow">CORD</a> dataset (a collection of 800 receipts for training, 100 for validation and 100 for testing), the <a href="https://rrc.cvc.uab.es/?ch=13" rel="nofollow">SROIE</a> dataset (a collection of 626 receipts for training and 347 receipts for testing) and the <a href="https://github.com/applicaai/kleister-nda" rel="nofollow">Kleister-NDA</a> dataset (a collection of non-disclosure agreements from the EDGAR database, including 254 documents for training, 83 documents for validation, and 203 documents for testing).</li> <li>document image classification: the <a href="https://www.cs.cmu.edu/~aharley/rvl-cdip/" rel="nofollow">RVL-CDIP</a> dataset (a collection of 400,000 images belonging to one of 16 classes).</li> <li>document visual question answering: the <a href="https://arxiv.org/abs/2007.00398" rel="nofollow">DocVQA</a> dataset (a collection of 50,000 questions defined on 12,000+ document images).</li></ul> <p>The abstract from the paper is the following:</p> <p><em>Pre-training of text and layout has proved effective in a variety of visually-rich document understanding tasks due to its effective model architecture and the advantage of large-scale unlabeled scanned/digital-born documents. In this paper, we present LayoutLMv2 by pre-training text, layout and image in a multi-modal framework, where new model architectures and pre-training tasks are leveraged. Specifically, LayoutLMv2 not only uses the existing masked visual-language modeling task but also the new text-image alignment and text-image matching tasks in the pre-training stage, where cross-modality interaction is better learned. Meanwhile, it also integrates a spatial-aware self-attention mechanism into the Transformer architecture, so that the model can fully understand the relative positional relationship among different text blocks. Experiment results show that LayoutLMv2 outperforms strong baselines and achieves new state-of-the-art results on a wide variety of downstream visually-rich document understanding tasks, including FUNSD (0.7895 -&gt; 0.8420), CORD (0.9493 -&gt; 0.9601), SROIE (0.9524 -&gt; 0.9781), Kleister-NDA (0.834 -&gt; 0.852), RVL-CDIP (0.9443 -&gt; 0.9564), and DocVQA (0.7295 -&gt; 0.8672). The pre-trained LayoutLMv2 model is publicly available at this https URL.</em></p> <p>Tips:</p> <ul><li>The main difference between LayoutLMv1 and LayoutLMv2 is that the latter incorporates visual embeddings during pre-training (while LayoutLMv1 only adds visual embeddings during fine-tuning).</li> <li>LayoutLMv2 adds both a relative 1D attention bias as well as a spatial 2D attention bias to the attention scores in the self-attention layers. Details can be found on page 5 of the <a href="https://arxiv.org/abs/2012.14740" rel="nofollow">paper</a>.</li> <li>Demo notebooks on how to use the LayoutLMv2 model on RVL-CDIP, FUNSD, DocVQA, CORD can be found <a href="https://github.com/NielsRogge/Transformers-Tutorials" rel="nofollow">here</a>.</li> <li>LayoutLMv2 uses Facebook AI’s <a href="https://github.com/facebookresearch/detectron2/" rel="nofollow">Detectron2</a> package for its visual backbone. See <a href="https://detectron2.readthedocs.io/en/latest/tutorials/install.html" rel="nofollow">this link</a> for installation instructions.</li> <li>In addition to <code>input_ids</code>, <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model.forward">forward()</a> expects 2 additional inputs, namely <code>image</code> and <code>bbox</code>. The <code>image</code> input corresponds to the original document image in which the text tokens occur. The model expects each document image to be of size 224x224. This means that if you have a batch of document images, <code>image</code> should be a tensor of shape (batch_size, 3, 224, 224). This can be either a <code>torch.Tensor</code> or a <code>Detectron2.structures.ImageList</code>. You don’t need to normalize the channels, as this is done by the model. Important to note is that the visual backbone expects BGR channels instead of RGB, as all models in Detectron2 are pre-trained using the BGR format. The <code>bbox</code> input are the bounding boxes (i.e. 2D-positions) of the input text tokens. This is identical to <a href="/docs/transformers/pr_16143/en/model_doc/layoutlm#transformers.LayoutLMModel">LayoutLMModel</a>. These can be obtained using an external OCR engine such as Google’s <a href="https://github.com/tesseract-ocr/tesseract" rel="nofollow">Tesseract</a> (there’s a <a href="https://pypi.org/project/pytesseract/" rel="nofollow">Python wrapper</a> available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000 scale. To normalize, you can use the following function:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">def</span> <span class="hljs-title function_">normalize_bbox</span>(<span class="hljs-params">bbox, width, height</span>): <span class="hljs-keyword">return</span> [ <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">0</span>] / width)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">1</span>] / height)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">2</span>] / width)), <span class="hljs-built_in">int</span>(<span class="hljs-number">1000</span> * (bbox[<span class="hljs-number">3</span>] / height)), ]<!-- HTML_TAG_END --></pre></div> <p>Here, <code>width</code> and <code>height</code> correspond to the width and height of the original document in which the token occurs (before resizing the image). Those can be obtained using the Python Image Library (PIL) library for example, as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>) width, height = image.size<!-- HTML_TAG_END --></pre></div> <p>However, this model includes a brand new <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor">LayoutLMv2Processor</a> which can be used to directly prepare data for the model (including applying OCR under the hood). More information can be found in the “Usage” section below.</p> <ul><li>Internally, <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a> will send the <code>image</code> input through its visual backbone to obtain a lower-resolution feature map, whose shape is equal to the <code>image_feature_pool_shape</code> attribute of <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>. This feature map is then flattened to obtain a sequence of image tokens. As the size of the feature map is 7x7 by default, one obtains 49 image tokens. These are then concatenated with the text tokens, and send through the Transformer encoder. This means that the last hidden states of the model will have a length of 512 + 49 = 561, if you pad the text tokens up to the max length. More generally, the last hidden states will have a shape of <code>seq_length</code> + <code>image_feature_pool_shape[0]</code> * <code>config.image_feature_pool_shape[1]</code>.</li> <li>When calling <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a>, a warning will be printed with a long list of parameter names that are not initialized. This is not a problem, as these parameters are batch normalization statistics, which are going to have values when fine-tuning on a custom dataset.</li> <li>If you want to train the model in a distributed environment, make sure to call <code>synchronize_batch_norm</code> on the model in order to properly synchronize the batch normalization layers of the visual backbone.</li></ul> <p>In addition, there’s LayoutXLM, which is a multilingual version of LayoutLMv2. More information can be found on <a href="layoutxlm">LayoutXLM’s documentation page</a>.</p> <h2 class="relative group"><a id="usage-layoutlmv2processor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#usage-layoutlmv2processor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Usage: LayoutLMv2Processor </span></h2> <p>The easiest way to prepare data for the model is to use <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor">LayoutLMv2Processor</a>, which internally combines a feature extractor (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a>) and a tokenizer (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a> or <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast">LayoutLMv2TokenizerFast</a>). The feature extractor handles the image modality, while the tokenizer handles the text modality. A processor combines both, which is ideal for a multi-modal model like LayoutLMv2. Note that you can still use both separately, if you only want to handle one modality.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2FeatureExtractor, LayoutLMv2TokenizerFast, LayoutLMv2Processor feature_extractor = LayoutLMv2FeatureExtractor() <span class="hljs-comment"># apply_ocr is set to True by default</span> tokenizer = LayoutLMv2TokenizerFast.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) processor = LayoutLMv2Processor(feature_extractor, tokenizer)<!-- HTML_TAG_END --></pre></div> <p>In short, one can provide a document image (and possibly additional data) to <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor">LayoutLMv2Processor</a>, and it will create the inputs expected by the model. Internally, the processor first uses <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a> to apply OCR on the image to get a list of words and normalized bounding boxes, as well to resize the image to a given size in order to get the <code>image</code> input. The words and normalized bounding boxes are then provided to <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a> or <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast">LayoutLMv2TokenizerFast</a>, which converts them to token-level <code>input_ids</code>, <code>attention_mask</code>, <code>token_type_ids</code>, <code>bbox</code>. Optionally, one can provide word labels to the processor, which are turned into token-level <code>labels</code>.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor">LayoutLMv2Processor</a> uses <a href="https://pypi.org/project/pytesseract/" rel="nofollow">PyTesseract</a>, a Python wrapper around Google’s Tesseract OCR engine, under the hood. Note that you can still use your own OCR engine of choice, and provide the words and normalized boxes yourself. This requires initializing <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a> with <code>apply_ocr</code> set to <code>False</code>.</p> <p>In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs).</p> <p><strong>Use case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True</strong></p> <p>This is the simplest case, in which the processor (actually the feature extractor) will perform OCR on the image to get the words and normalized bounding boxes.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) encoding = processor( image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> ) <span class="hljs-comment"># you can also add all tokenizer parameters here such as padding, truncation</span> <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span><!-- HTML_TAG_END --></pre></div> <p><strong>Use case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False</strong></p> <p>In case one wants to do OCR themselves, one can initialize the feature extractor with <code>apply_ocr</code> set to <code>False</code>. In that case, one should provide the words and corresponding (normalized) bounding boxes themselves to the processor.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> encoding = processor(image, words, boxes=boxes, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span><!-- HTML_TAG_END --></pre></div> <p><strong>Use case 3: token classification (training), apply_ocr=False</strong></p> <p>For token classification tasks (such as FUNSD, CORD, SROIE, Kleister-NDA), one can also provide the corresponding word labels in order to train a model. The processor will then convert these into token-level <code>labels</code>. By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the <code>ignore_index</code> of PyTorch’s CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can initialize the tokenizer with <code>only_label_first_subword</code> set to <code>False</code>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> word_labels = [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>] encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;labels&#x27;, &#x27;image&#x27;])</span><!-- HTML_TAG_END --></pre></div> <p><strong>Use case 4: visual question answering (inference), apply_ocr=True</strong></p> <p>For visual question answering tasks (such as DocVQA), you can provide a question to the processor. By default, the processor will apply OCR on the image, and create [CLS] question tokens [SEP] word tokens [SEP].</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) question = <span class="hljs-string">&quot;What&#x27;s his name?&quot;</span> encoding = processor(image, question, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span><!-- HTML_TAG_END --></pre></div> <p><strong>Use case 5: visual question answering (inference), apply_ocr=False</strong></p> <p>For visual question answering tasks (such as DocVQA), you can provide a question to the processor. If you want to perform OCR yourself, you can provide your own words and (normalized) bounding boxes to the processor.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) question = <span class="hljs-string">&quot;What&#x27;s his name?&quot;</span> words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> encoding = processor(image, question, words, boxes=boxes, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-comment"># dict_keys([&#x27;input_ids&#x27;, &#x27;token_type_ids&#x27;, &#x27;attention_mask&#x27;, &#x27;bbox&#x27;, &#x27;image&#x27;])</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="transformers.LayoutLMv2Config" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2Config </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Config"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2Config</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Config" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Config"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py#L35" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 30522</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_2d_position_embeddings<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_rel_pos<span class="opacity-60"> = 128</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rel_pos_bins<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fast_qkv<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_rel_2d_pos<span class="opacity-60"> = 256</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rel_2d_pos_bins<span class="opacity-60"> = 64</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">convert_sync_batchnorm<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_feature_pool_shape<span class="opacity-60"> = [7, 7, 256]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">coordinate_size<span class="opacity-60"> = 128</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">shape_size<span class="opacity-60"> = 128</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">has_relative_attention_bias<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">has_spatial_attention_bias<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">has_visual_segment_embedding<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">detectron2_config_args<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the LayoutLMv2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a> or <code>TFLayoutLMv2Model</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a> or <code>TFLayoutLMv2Model</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.max_2d_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.max_2d_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_2d_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum value that the 2D position embedding might ever be used with. Typically set this to something large just in case (e.g., 1024).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.max_rel_pos" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.max_rel_pos"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_rel_pos</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The maximum number of relative positions to be used in the self-attention mechanism.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.rel_pos_bins" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.rel_pos_bins"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rel_pos_bins</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of relative position bins to be used in the self-attention mechanism.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.fast_qkv" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.fast_qkv"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fast_qkv</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a single matrix for the queries, keys, values in the self-attention layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.max_rel_2d_pos" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.max_rel_2d_pos"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_rel_2d_pos</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The maximum number of relative 2D positions in the self-attention mechanism.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.rel_2d_pos_bins" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.rel_2d_pos_bins"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rel_2d_pos_bins</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The number of 2D relative position bins in the self-attention mechanism.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.image_feature_pool_shape" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.image_feature_pool_shape"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_feature_pool_shape</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [7, 7, 256]) &#x2014; The shape of the average-pooled feature map.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.coordinate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.coordinate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>coordinate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Dimension of the coordinate embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.shape_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.shape_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>shape_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Dimension of the width and height embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.has_relative_attention_bias" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.has_relative_attention_bias"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>has_relative_attention_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a relative attention bias in the self-attention mechanism.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.has_spatial_attention_bias" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.has_spatial_attention_bias"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>has_spatial_attention_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a spatial attention bias in the self-attention mechanism.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.has_visual_segment_embedding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.has_visual_segment_embedding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>has_visual_segment_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add visual segment embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Config.detectron2_config_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Config.detectron2_config_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>detectron2_config_args</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to <a href="https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py" rel="nofollow">this file</a> for details regarding default values.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a>. It is used to instantiate an LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLMv2 <a href="https://huggingface.co/microsoft/layoutlmv2-base-uncased" rel="nofollow">microsoft/layoutlmv2-base-uncased</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Model, LayoutLMv2Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = LayoutLMv2Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the microsoft/layoutlmv2-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2Model(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2FeatureExtractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2FeatureExtractor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2FeatureExtractor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2FeatureExtractor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2FeatureExtractor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2FeatureExtractor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py#L83" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_resize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"> = 224</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resample<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">apply_ocr<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ocr_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2FeatureExtractor.do_resize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor.do_resize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2FeatureExtractor.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2FeatureExtractor.resample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor.resample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2FeatureExtractor.apply_ocr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor.apply_ocr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>apply_ocr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2FeatureExtractor.ocr_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor.ocr_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ocr_lang</strong> (<code>Optional[str]</code>, <em>optional</em>) &#x2014; The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>LayoutLMv2FeatureExtractor uses Google&#x2019;s Tesseract OCR engine under the hood.</p> </div><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a LayoutLMv2 feature extractor. This can be used to resize document images to the same size, as well as to apply OCR on them in order to get a list of words and normalized bounding boxes.</p> <p>This feature extractor inherits from <code>PreTrainedFeatureExtractor()</code>which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2FeatureExtractor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2FeatureExtractor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2FeatureExtractor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py#L124" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef(&#39;torch.Tensor&#39;), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef(&#39;torch.Tensor&#39;)]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2FeatureExtractor.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2FeatureExtractor.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2FeatureExtractor.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LayoutLMv2FeatureExtractor.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> — Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> <li><strong>words</strong> — Optional words as identified by Tesseract OCR (only when <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor" >LayoutLMv2FeatureExtractor</a> was initialized with <code>apply_ocr</code> set to <code>True</code>).</li> <li><strong>boxes</strong> — Optional bounding boxes as identified by Tesseract OCR, normalized based on the image size (only when <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor" >LayoutLMv2FeatureExtractor</a> was initialized with <code>apply_ocr</code> set to <code>True</code>).</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to prepare for the model one or several image(s).</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># option 1: with apply_ocr=True (default)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = LayoutLMv2FeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># dict_keys([&#x27;pixel_values&#x27;, &#x27;words&#x27;, &#x27;boxes&#x27;])</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># option 2: with apply_ocr=False</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding.keys()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># dict_keys([&#x27;pixel_values&#x27;])</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2Tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2Tokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Tokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2Tokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Tokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Tokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py#L146" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token_box<span class="opacity-60"> = [0, 0, 0, 0]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token_box<span class="opacity-60"> = [1000, 1000, 1000, 1000]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_box<span class="opacity-60"> = [0, 0, 0, 0]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_label<span class="opacity-60"> = -100</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">only_label_first_subword<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_max_length<span class="opacity-60">: int = 512</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">additional_special_tokens<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a LayoutLMv2 tokenizer. Based on WordPiece. <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a> can be used to turn words, word-level bounding boxes and optional word labels to token-level <code>input_ids</code>, <code>attention_mask</code>, <code>token_type_ids</code>, <code>bbox</code>, and optional <code>labels</code> (for token classification).</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a> runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Tokenizer.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Tokenizer.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Tokenizer.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py#L366" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">boxes<span class="opacity-60">: typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_labels<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.boxes" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.boxes"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>boxes</strong> (<code>List[List[int]]</code>, <code>List[List[List[int]]]</code>) &#x2014; Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.word_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.word_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>word_labels</strong> (<code>List[int]</code>, <code>List[List[int]]</code>, <em>optional</em>) &#x2014; Word-level integer labels (for token classification tasks such as FUNSD, CORD).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Tokenizer.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Tokenizer.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Tokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Tokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Tokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py#L346" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2TokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2TokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2TokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2TokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2TokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2TokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py#L62" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token_box<span class="opacity-60"> = [0, 0, 0, 0]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token_box<span class="opacity-60"> = [1000, 1000, 1000, 1000]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_box<span class="opacity-60"> = [0, 0, 0, 0]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_label<span class="opacity-60"> = -100</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">only_label_first_subword<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.cls_token_box" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.cls_token_box"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [CLS] token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.sep_token_box" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.sep_token_box"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[1000, 1000, 1000, 1000]</code>) &#x2014; The bounding box to use for the special [SEP] token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.pad_token_box" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.pad_token_box"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_box</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[0, 0, 0, 0]</code>) &#x2014; The bounding box to use for the special [PAD] token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.pad_token_label" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.pad_token_label"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_label</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The label to use for padding tokens. Defaults to -100, which is the <code>ignore_index</code> of PyTorch&#x2019;s CrossEntropyLoss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.only_label_first_subword" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.only_label_first_subword"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>only_label_first_subword</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to only label the first subword, in case word labels are provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.tokenize_chinese_chars" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.tokenize_chinese_chars"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">this issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original LayoutLMv2).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a “fast” LayoutLMv2 tokenizer (backed by HuggingFace’s <em>tokenizers</em> library). Based on WordPiece.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2TokenizerFast.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2TokenizerFast.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2TokenizerFast.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py#L171" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">boxes<span class="opacity-60">: typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_labels<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.boxes" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.boxes"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>boxes</strong> (<code>List[List[int]]</code>, <code>List[List[List[int]]]</code>) &#x2014; Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.word_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.word_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>word_labels</strong> (<code>List[int]</code>, <code>List[List[int]]</code>, <em>optional</em>) &#x2014; Word-level integer labels (for token classification tasks such as FUNSD, CORD).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2TokenizerFast.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2TokenizerFast.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels.</p></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2Processor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Processor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2Processor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Processor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2Processor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Processor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Processor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/processing_layoutlmv2.py#L25" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Processor.feature_extractor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Processor.feature_extractor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_extractor</strong> (<code>LayoutLMv2FeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a>. The feature extractor is a required input.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Processor.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Processor.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<code>LayoutLMv2Tokenizer</code> or <code>LayoutLMv2TokenizerFast</code>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a> or <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast">LayoutLMv2TokenizerFast</a>. The tokenizer is a required input.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a LayoutLMv2 processor which combines a LayoutLMv2 feature extractor and a LayoutLMv2 tokenizer into a single processor.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Processor">LayoutLMv2Processor</a> offers all the functionalities you need to prepare data for the model.</p> <p>It first uses <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a> to resize document images to a fixed size, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a> or <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2TokenizerFast">LayoutLMv2TokenizerFast</a>, which turns the words and bounding boxes into token-level <code>input_ids</code>, <code>attention_mask</code>, <code>token_type_ids</code>, <code>bbox</code>. Optionally, one can provide integer <code>word_labels</code>, which are turned into token-level <code>labels</code> for token classification tasks (such as FUNSD, CORD).</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Processor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Processor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Processor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/processing_layoutlmv2.py#L47" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">boxes<span class="opacity-60">: typing.Union[typing.List[typing.List[int]], typing.List[typing.List[typing.List[int]]]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_labels<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This method first forwards the <code>images</code> argument to <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor.__call__"><strong>call</strong>()</a>. In case <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a> was initialized with <code>apply_ocr</code> set to <code>True</code>, it passes the obtained words and bounding boxes along with the additional arguments to <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer.__call__"><strong>call</strong>()</a> and returns the output, together with resized <code>images</code>. In case <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2FeatureExtractor">LayoutLMv2FeatureExtractor</a> was initialized with <code>apply_ocr</code> set to <code>False</code>, it passes the words (<code>text</code>/<code>text_pair`) and `boxes` specified by the user along with the additional arguments to [__call__()](/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer.__call__) and returns the output, together with resized `images</code>.</p> <p>Please refer to the docstring of the above two methods for more information.</p></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2Model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2Model </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Model"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2Model</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Model" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Model"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L708" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2Model.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2Model.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2Model.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L806" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bbox<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.bbox" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.bbox"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>((batch_size, sequence_length), 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2Model.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2Model.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LayoutLMv2Model.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Model">LayoutLMv2Model</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2Model.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2ForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2ForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2ForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2ForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2ForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2ForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L953" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual embeddings, e.g. for document image classification tasks such as the <a href="https://www.cs.cmu.edu/~aharley/rvl-cdip/" rel="nofollow">RVL-CDIP</a> dataset.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2ForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2ForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2ForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L967" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bbox<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.bbox" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.bbox"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LayoutLMv2ForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForSequenceClassification">LayoutLMv2ForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2ForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_label = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=sequence_label) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2ForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2ForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2ForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2ForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2ForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2ForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1122" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden states) e.g. for sequence labeling (information extraction) tasks such as <a href="https://guillaumejaume.github.io/FUNSD/" rel="nofollow">FUNSD</a>, <a href="https://rrc.cvc.uab.es/?ch=13" rel="nofollow">SROIE</a>, <a href="https://github.com/clovaai/cord" rel="nofollow">CORD</a> and <a href="https://github.com/applicaai/kleister-nda" rel="nofollow">Kleister-NDA</a>.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2ForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2ForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2ForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1136" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bbox<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.bbox" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.bbox"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LayoutLMv2ForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForTokenClassification">LayoutLMv2ForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2ForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>, revision=<span class="hljs-string">&quot;no_ocr&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2ForTokenClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>words = [<span class="hljs-string">&quot;hello&quot;</span>, <span class="hljs-string">&quot;world&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>boxes = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]] <span class="hljs-comment"># make sure to normalize your bounding boxes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_labels = [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LayoutLMv2ForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LayoutLMv2ForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2ForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LayoutLMv2ForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2ForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2ForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1231" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">has_visual_segment_embedding<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config">LayoutLMv2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as <a href="https://rrc.cvc.uab.es/?ch=17" rel="nofollow">DocVQA</a> (a linear layer on top of the text part of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LayoutLMv2ForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LayoutLMv2ForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LayoutLMv2ForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py#L1245" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bbox<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Tokenizer">LayoutLMv2Tokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.bbox" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.bbox"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bbox</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, 4)</code>, <em>optional</em>) &#x2014; Bounding boxes of each input sequence tokens. Selected in the range <code>[0, config.max_2d_position_embeddings-1]</code>. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code> or <code>detectron.structures.ImageList</code> whose <code>tensors</code> is of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Batch of document images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LayoutLMv2ForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LayoutLMv2ForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LayoutLMv2ForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2Config" >LayoutLMv2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/layoutlmv2#transformers.LayoutLMv2ForQuestionAnswering">LayoutLMv2ForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LayoutLMv2Processor, LayoutLMv2ForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = LayoutLMv2Processor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LayoutLMv2ForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(<span class="hljs-string">&quot;name_of_your_document - can be a png file, pdf, etc.&quot;</span>).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question = <span class="hljs-string">&quot;what&#x27;s his name?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = processor(image, question, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1tnmaa8"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1tnmaa8"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/layoutlmv2.mdx-fb521e03.js") ], params: {} } }); </script>
201
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/speech-encoder-decoder.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;speech-encoder-decoder-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.SpeechEncoderDecoderConfig&quot;,&quot;title&quot;:&quot;SpeechEncoderDecoderConfig&quot;},{&quot;local&quot;:&quot;transformers.SpeechEncoderDecoderModel&quot;,&quot;title&quot;:&quot;SpeechEncoderDecoderModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxSpeechEncoderDecoderModel&quot;,&quot;title&quot;:&quot;FlaxSpeechEncoderDecoderModel&quot;}],&quot;title&quot;:&quot;Speech Encoder Decoder Models&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/speech-encoder-decoder.mdx-a4b010d9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="speech-encoder-decoder-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#speech-encoder-decoder-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Speech Encoder Decoder Models </span></h1> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderModel">SpeechEncoderDecoderModel</a> can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder (<em>e.g.</em> <a href="wav2vec2">Wav2Vec2</a>, <a href="hubert">Hubert</a>) and any pretrained autoregressive model as the decoder.</p> <p>The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech recognition and speech translation has <em>e.g.</em> been shown in <a href="https://arxiv.org/abs/2104.06678" rel="nofollow">Large-Scale Self- and Semi-Supervised Learning for Speech Translation</a> by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.</p> <p>An example of how to use a <a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderModel">SpeechEncoderDecoderModel</a> for inference can be seen in <a href="speech_to_text_2">Speech2Text2</a>.</p> <h2 class="relative group"><a id="transformers.SpeechEncoderDecoderConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SpeechEncoderDecoderConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpeechEncoderDecoderConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SpeechEncoderDecoderConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SpeechEncoderDecoderConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpeechEncoderDecoderConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py#L27" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderConfig.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderConfig.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments. Notably:</p> <ul> <li><strong>encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the encoder config.</li> <li><strong>decoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the decoder config.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig">SpeechEncoderDecoderConfig</a> is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderModel">SpeechEncoderDecoderModel</a>. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Wav2Vec2 &amp; BERT style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = Wav2Vec2Config() <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Wav2Vec2Bert model from a Wav2Vec2 &amp; bert-base-uncased style configurations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = model.config.encoder <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = model.config.decoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set decoder config to causal lm</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.add_cross_attention = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>, config=encoder_decoder_config)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpeechEncoderDecoderConfig.from_encoder_decoder_configs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_encoder_decoder_configs</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpeechEncoderDecoderConfig.from_encoder_decoder_configs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpeechEncoderDecoderConfig.from_encoder_decoder_configs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py#L92" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig" >SpeechEncoderDecoderConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SpeechEncoderDecoderConfig.from_encoder_decoder_configs.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig" >SpeechEncoderDecoderConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>An instance of a configuration object</p> <!-- HTML_TAG_END --></p></div></div> <p>Instantiate a <a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig">SpeechEncoderDecoderConfig</a> (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpeechEncoderDecoderConfig.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpeechEncoderDecoderConfig.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpeechEncoderDecoderConfig.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py#L109" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SpeechEncoderDecoderConfig.to_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance,</p> <!-- HTML_TAG_END --></p></div></div> <p>Serializes this instance to a Python dictionary. Override the default <em>to_dict()</em> from <em>PretrainedConfig</em>.</p></div></div> <h2 class="relative group"><a id="transformers.SpeechEncoderDecoderModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SpeechEncoderDecoderModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpeechEncoderDecoderModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SpeechEncoderDecoderModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SpeechEncoderDecoderModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpeechEncoderDecoderModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py#L174" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: typing.Optional[transformers.configuration_utils.PretrainedConfig] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder<span class="opacity-60">: typing.Optional[transformers.modeling_utils.PreTrainedModel] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder<span class="opacity-60">: typing.Optional[transformers.modeling_utils.PreTrainedModel] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig">SpeechEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via <code>from_pretrained()</code>function and the decoder is loaded via <code>from_pretrained()</code>function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.</p> <p>The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.</p> <p>Additionally, in <a href="https://arxiv.org/abs/2104.06678" rel="nofollow">Large-Scale Self- and Semi-Supervised Learning for Speech Translation</a> it is shown how leveraging large pretrained speech models for speech translation yields a significant performance improvement.</p> <p>After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderModel">SpeechEncoderDecoderModel</a> is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth<em>~transformers.AutoModel.from_pretrained</em> class method for the encoder and :meth<em>~transformers.AutoModelForCausalLM.from_pretrained</em> class method for the decoder.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpeechEncoderDecoderModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpeechEncoderDecoderModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpeechEncoderDecoderModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py#L442" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_features<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code> or <code>(batch_size, sequence_length, feature_dim)</code>, <em>optional</em>) &#x2014; Float values of input raw speech waveform or speech features. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>inputs</em>, either the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> or <a href="/docs/transformers/pr_16143/en/model_doc/speech_to_text#transformers.Speech2TextProcessor">Speech2TextProcessor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For training, <code>decoder_input_ids</code> are automatically created by the model by shifting the <code>labels</code> to the right, replacing -100 by the <code>pad_token_id</code> and prepending them with the <code>decoder_start_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.input_features" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.input_features"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_features</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length, feature_size)</code>, <em>optional</em>) &#x2014; Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a <code>.flac</code> or <code>.wav</code> audio file into an array of type <code>List[float]</code> or a <code>numpy.ndarray</code>, <em>e.g.</em> via the soundfile library (<code>pip install soundfile</code>). To prepare the array into <code>input_features</code>, the <a href="/docs/transformers/pr_16143/en/model_doc/speech_to_text#transformers.Speech2TextTokenizer">Speech2TextTokenizer</a> should be used for extracting the fbank features, padding and conversion into a tensor of type <code>torch.FloatTensor</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple. kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SpeechEncoderDecoderModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig" >SpeechEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderModel">SpeechEncoderDecoderModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SpeechEncoderDecoderModel, Wav2Vec2Processor <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-xls-r-300m-en-to-15&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-xls-r-300m-en-to-15&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Inference: Translate English speech to German</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(input_values) <span class="hljs-meta">&gt;&gt;&gt; </span>decoded = processor.batch_decode(generated, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>decoded <span class="hljs-string">&#x27;Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können.&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Training: Train model on English transcription</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> labels = processor(ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span>loss.backward()<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_encoder_decoder_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py#L286" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_pretrained_model_name_or_path<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_pretrained_model_name_or_path<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpeechEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.</p> <p>The model is set in evaluation mode by default using <code>model.eval()</code> (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with <code>model.train()</code>.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SpeechEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a wav2vec2bert from a pretrained Wav2Vec2 and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./wav2vec2bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./wav2vec2bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxSpeechEncoderDecoderModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSpeechEncoderDecoderModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxSpeechEncoderDecoderModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxSpeechEncoderDecoderModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxSpeechEncoderDecoderModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxSpeechEncoderDecoderModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py#L321" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: SpeechEncoderDecoderConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Optional[typing.Tuple] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig">SpeechEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via <code>from_pretrained()</code>function and the decoder is loaded via <code>from_pretrained()</code>function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization.</p> <p>The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.</p> <p>Additionally, in <a href="https://arxiv.org/abs/2104.06678" rel="nofollow">Large-Scale Self- and Semi-Supervised Learning for Speech Translation</a> it is shown how leveraging large pretrained speech models for speech translation yields a significant performance improvement.</p> <p>After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.FlaxSpeechEncoderDecoderModel">FlaxSpeechEncoderDecoderModel</a> is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as decoder module when created with the :meth<em>~transformers.FlaxAutoModel.from_pretrained</em> class method for the encoder and :meth<em>~transformers.FlaxAutoModelForCausalLM.from_pretrained</em> class method for the decoder.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxSpeechEncoderDecoderModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxSpeechEncoderDecoderModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py#L632" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">freeze_feature_encoder<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.__call__.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code> or <code>(batch_size, sequence_length, feature_dim)</code>, <em>optional</em>) &#x2014; Float values of input raw speech waveform or speech features. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>inputs</em>, either the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> or <a href="/docs/transformers/pr_16143/en/model_doc/speech_to_text#transformers.Speech2TextProcessor">Speech2TextProcessor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For sequence to sequence training, <code>decoder_input_ids</code> should be provided. <code>decoder_input_ids</code> should be created outside of the model by shifting the <code>labels</code> to the right, replacing -100 by the <code>pad_token_id</code> and prepending them with the <code>decoder_start_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.decoder.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>FlaxSeq2SeqLMOutput</code> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxSpeechEncoderDecoderModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderConfig" >SpeechEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/speech-encoder-decoder#transformers.FlaxSpeechEncoderDecoderModel">FlaxSpeechEncoderDecoderModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxSpeechEncoderDecoderModel, BartTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load a fine-tuned wav2vec2-2-bart model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxSpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-2-bart-large&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load output tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_output = BartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/bart-large&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = jnp.ones((<span class="hljs-number">2</span>, <span class="hljs-number">5000</span>), dtype=jnp.float32) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use bart&#x27;s special bos, pad and eos tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.decoder_start_token_id = model.decoder.config.bos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.decoder.config.pad_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.eos_token_id = model.decoder.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(inputs) <span class="hljs-comment"># Assert something? More interesting input? dtype correct?</span><!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_encoder_decoder_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py#L753" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_pretrained_model_name_or_path</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_pretrained_model_name_or_path</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxSpeechEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>, <span class="hljs-string">&quot;facebook/bart-large&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./wav2vec2-2-bart-large&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxSpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./wav2vec2-2-bart-large&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1w5mwts"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1w5mwts"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/speech-encoder-decoder.mdx-a4b010d9.js") ], params: {} } }); </script>
202
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/vision-text-dual-encoder.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;visiontextdualencoder&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.VisionTextDualEncoderConfig&quot;,&quot;title&quot;:&quot;VisionTextDualEncoderConfig&quot;},{&quot;local&quot;:&quot;transformers.VisionTextDualEncoderProcessor&quot;,&quot;title&quot;:&quot;VisionTextDualEncoderProcessor&quot;},{&quot;local&quot;:&quot;transformers.VisionTextDualEncoderModel&quot;,&quot;title&quot;:&quot;VisionTextDualEncoderModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxVisionTextDualEncoderModel&quot;,&quot;title&quot;:&quot;FlaxVisionTextDualEncoderModel&quot;}],&quot;title&quot;:&quot;VisionTextDualEncoder&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/vision-text-dual-encoder.mdx-bcc9f3e9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="visiontextdualencoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#visiontextdualencoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>VisionTextDualEncoder </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel">VisionTextDualEncoderModel</a> can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder (<em>e.g.</em> <a href="vit">ViT</a>, <a href="beit">BEiT</a>, <a href="deit">DeiT</a>) and any pretrained text autoencoding model as the text encoder (<em>e.g.</em> <a href="roberta">RoBERTa</a>, <a href="bert">BERT</a>). Two projection layers are added on top of both the vision and text encoder to project the output embeddings to a shared latent space. The projection layers are randomly initialized so the model should be fine-tuned on a downstream task. This model can be used to align the vision-text embeddings using CLIP like contrastive image-text training and then can be used for zero-shot vision tasks such image-classification or retrieval.</p> <p>In <a href="https://arxiv.org/abs/2111.07991" rel="nofollow">LiT: Zero-Shot Transfer with Locked-image Text Tuning</a> it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.</p> <h2 class="relative group"><a id="transformers.VisionTextDualEncoderConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>VisionTextDualEncoderConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">VisionTextDualEncoderConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L28" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">projection_dim<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logit_scale_init_value<span class="opacity-60"> = 2.6592</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderConfig.text_config_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderConfig.text_config_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_config_dict</strong> (<code>dict</code>) &#x2014; Dictionary of configuration options that defines text model config.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderConfig.vision_config_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderConfig.vision_config_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_config_dict</strong> (<code>dict</code>) &#x2014; Dictionary of configuration options that defines vison model config.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderConfig.projection_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderConfig.projection_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimentionality of text and vision projection layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderConfig.logit_scale_init_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderConfig.logit_scale_init_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logit_scale_init_value</strong> (<code>float</code>, <em>optional</em>, defaults to 2.6592) &#x2014; The inital value of the <em>logit_scale</em> paramter. Default is used as per the original CLIP implementation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderConfig.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderConfig.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig">VisionTextDualEncoderConfig</a> is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel">VisionTextDualEncoderModel</a>. It is used to instantiate <a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel">VisionTextDualEncoderModel</a> model according to the specified arguments, defining the text model and vision model configs.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT and ViT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_vision = ViTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_text = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=<span class="hljs-number">512</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT and ViT model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_vision = model.config.vision_config <span class="hljs-meta">&gt;&gt;&gt; </span>config_text = model.config.text_config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>vision_text_config = VisionTextDualEncoderConfig.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>, config=vision_text_config)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderConfig.from_vision_text_configs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_vision_text_configs</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderConfig.from_vision_text_configs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderConfig.from_vision_text_configs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.VisionTextDualEncoderConfig.from_vision_text_configs.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>An instance of a configuration object</p> <!-- HTML_TAG_END --></p></div></div> <p>Instantiate a <a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig">VisionTextDualEncoderConfig</a> (or a derived class) from text model configuration and vision model configuration.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderConfig.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderConfig.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderConfig.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L117" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.VisionTextDualEncoderConfig.to_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance,</p> <!-- HTML_TAG_END --></p></div></div> <p>Serializes this instance to a Python dictionary. Override the default <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.to_dict">to_dict()</a>.</p></div></div> <h2 class="relative group"><a id="transformers.VisionTextDualEncoderProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>VisionTextDualEncoderProcessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">VisionTextDualEncoderProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L23" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderProcessor.feature_extractor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderProcessor.feature_extractor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_extractor</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>) &#x2014; The feature extractor is a required input.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderProcessor.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderProcessor.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer is a required input.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a VisionTextDualEncoder processor which wraps a vision feature extractor and a tokenizer into a single processor.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderProcessor">VisionTextDualEncoderProcessor</a> offers all the functionalities of <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a> and <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. See the <code>__call__()</code>and <a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderProcessor.decode">decode()</a> for more information.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderProcessor.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderProcessor.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderProcessor.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L98" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This method forwards all its arguments to VisionTextDualEncoderTokenizer’s <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode">batch_decode()</a>. Please refer to the docstring of this method for more information.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderProcessor.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderProcessor.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderProcessor.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This method forwards all its arguments to VisionTextDualEncoderTokenizer’s <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode">decode()</a>. Please refer to the docstring of this method for more information.</p></div></div> <h2 class="relative group"><a id="transformers.VisionTextDualEncoderModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>VisionTextDualEncoderModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">VisionTextDualEncoderModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py#L163" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: typing.Optional[transformers.models.vision_text_dual_encoder.configuration_vision_text_dual_encoder.VisionTextDualEncoderConfig] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_model<span class="opacity-60">: typing.Optional[transformers.modeling_utils.PreTrainedModel] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_model<span class="opacity-60">: typing.Optional[transformers.modeling_utils.PreTrainedModel] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the <code>from_pretrained()</code>method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.</p> <p>In <a href="https://arxiv.org/abs/2111.07991" rel="nofollow">LiT: Zero-Shot Transfer with Locked-image Text Tuning</a> it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.</p> <p>After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionTextDualEncoderModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionTextDualEncoderModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionTextDualEncoderModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py#L295" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_loss<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.clip.modeling_clip.CLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.return_loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.return_loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the contrastive loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionTextDualEncoderModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionTextDualEncoderModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.VisionTextDualEncoderModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.clip.modeling_clip.CLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.clip.modeling_clip.CLIPOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>return_loss</code> is <code>True</code>) — Contrastive loss for image-text similarity.</li> <li><strong>logits_per_image:(<code>torch.FloatTensor</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) — The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>torch.FloatTensor</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) — The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) — The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>image_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) — The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel">VisionTextDualEncoderModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> VisionTextDualEncoderModel, <span class="hljs-meta">... </span> VisionTextDualEncoderProcessor, <span class="hljs-meta">... </span> ViTFeatureExtractor, <span class="hljs-meta">... </span> BertTokenizer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_vision_text_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># contrastive training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>urls = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>images = [Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-keyword">for</span> url <span class="hljs-keyword">in</span> urls] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor( <span class="hljs-meta">... </span> text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=images, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> input_ids=inputs.input_ids, <span class="hljs-meta">... </span> attention_mask=inputs.attention_mask, <span class="hljs-meta">... </span> pixel_values=inputs.pixel_values, <span class="hljs-meta">... </span> return_loss=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>loss, logits_per_image = outputs.loss, outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits_per_image.softmax(dim=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxVisionTextDualEncoderModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxVisionTextDualEncoderModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxVisionTextDualEncoderModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxVisionTextDualEncoderModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxVisionTextDualEncoderModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxVisionTextDualEncoderModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py#L219" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: VisionTextDualEncoderConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Optional[typing.Tuple] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig">VisionTextDualEncoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the <code>from_pretrained()</code>method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.</p> <p>In <a href="https://arxiv.org/abs/2111.07991" rel="nofollow">LiT: Zero-Shot Transfer with Locked-image Text Tuning</a> it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.</p> <p>After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxVisionTextDualEncoderModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxVisionTextDualEncoderModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxVisionTextDualEncoderModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py#L251" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.__call__.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.__call__.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionTextDualEncoderModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionTextDualEncoderModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxVisionTextDualEncoderModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a>) and inputs.</p> <ul> <li><strong>logits_per_image:(<code>jnp.ndarray</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) — The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>jnp.ndarray</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) — The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) — The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>image_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) — The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.FlaxVisionTextDualEncoderModel">FlaxVisionTextDualEncoderModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> FlaxVisionTextDualEncoderModel, <span class="hljs-meta">... </span> VisionTextDualEncoderProcessor, <span class="hljs-meta">... </span> ViTFeatureExtractor, <span class="hljs-meta">... </span> BertTokenizer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># contrastive training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>urls = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>images = [Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-keyword">for</span> url <span class="hljs-keyword">in</span> urls] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor( <span class="hljs-meta">... </span> text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=images, return_tensors=<span class="hljs-string">&quot;np&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> input_ids=inputs.input_ids, <span class="hljs-meta">... </span> attention_mask=inputs.attention_mask, <span class="hljs-meta">... </span> pixel_values=inputs.pixel_values, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionTextDualEncoderModel.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = jax.nn.softmax(logits_per_image, axis=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span><!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1v8jtbq"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1v8jtbq"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/vision-text-dual-encoder.mdx-bcc9f3e9.js") ], params: {} } }); </script>
203
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/xglm.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;xglm&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.XGLMConfig&quot;,&quot;title&quot;:&quot;XGLMConfig&quot;},{&quot;local&quot;:&quot;transformers.XGLMTokenizer&quot;,&quot;title&quot;:&quot;XGLMTokenizer&quot;},{&quot;local&quot;:&quot;transformers.XGLMTokenizerFast&quot;,&quot;title&quot;:&quot;XGLMTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.XGLMModel&quot;,&quot;title&quot;:&quot;XGLMModel&quot;},{&quot;local&quot;:&quot;transformers.XGLMForCausalLM&quot;,&quot;title&quot;:&quot;XGLMForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.FlaxXGLMModel&quot;,&quot;title&quot;:&quot;FlaxXGLMModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxXGLMForCausalLM&quot;,&quot;title&quot;:&quot;FlaxXGLMForCausalLM&quot;}],&quot;title&quot;:&quot;XGLM&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/xglm.mdx-b5ed5977.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="xglm" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xglm"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XGLM </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The XGLM model was proposed in <a href="https://arxiv.org/abs/2112.10668" rel="nofollow">Few-shot Learning with Multilingual Language Models</a> by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O’Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.</p> <p>The abstract from the paper is the following:</p> <p><em>Large-scale autoregressive language models such as GPT-3 are few-shot learners that can perform a wide range of language tasks without fine-tuning. While these models are known to be able to jointly represent many different languages, their training data is dominated by English, potentially limiting their cross-lingual generalization. In this work, we train multilingual autoregressive language models on a balanced corpus covering a diverse set of languages, and study their few- and zero-shot learning capabilities in a wide range of tasks. Our largest model with 7.5 billion parameters sets new state of the art in few-shot learning in more than 20 representative languages, outperforming GPT-3 of comparable size in multilingual commonsense reasoning (with +7.4% absolute accuracy improvement in 0-shot settings and +9.4% in 4-shot settings) and natural language inference (+5.4% in each of 0-shot and 4-shot settings). On the FLORES-101 machine translation benchmark, our model outperforms GPT-3 on 171 out of 182 translation directions with 32 training examples, while surpassing the official supervised baseline in 45 directions. We present a detailed analysis of where the model succeeds and fails, showing in particular that it enables cross-lingual in-context learning on some tasks, while there is still room for improvement on surface form robustness and adaptation to tasks that do not have a natural cloze form. Finally, we evaluate our models in social value tasks such as hate speech detection in five languages and find it has limitations similar to comparable sized GPT-3 models.</em></p> <p>This model was contributed by <a href="https://huggingface.co/valhalla" rel="nofollow">Suraj</a>. The original code can be found <a href="https://github.com/pytorch/fairseq/tree/main/examples/xglm" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.XGLMConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XGLMConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XGLMConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XGLMConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/configuration_xglm.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 256008</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 2048</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d_model<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_layers<span class="opacity-60"> = 24</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_function<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_std<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_embedding<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256008) &#x2014; Vocabulary size of the XGLM model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMModel">XGLMModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.FlaxXGLMModel">FlaxXGLMModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.d_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.d_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimension of the layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.num_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.num_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers Transformer decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.activation_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.activation_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, dencoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.activation_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.activation_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer. layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.init_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.init_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.scale_embedding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.scale_embedding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMModel">XGLMModel</a>. It is used to instantiate an XGLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XGLM <a href="https://huggingface.co/facebook/xglm-564M" rel="nofollow">facebook/xglm-564M</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XGLMModel, XGLMConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a XGLM facebook/xglm-564M style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = XGLMConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/xglm-564M style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = XGLMModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.XGLMTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XGLMTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XGLMTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm.py#L43" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sp_model_kwargs<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.sp_model_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.sp_model_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.sp_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.sp_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sp_model</strong> (<code>SentencePieceProcessor</code>) &#x2014; The <em>SentencePiece</em> processor that is used for every conversion (string, tokens and IDs).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Adapted from <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a> and <a href="/docs/transformers/pr_16143/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. Based on <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a>.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm.py#L189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XGLMTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:</p> <ul><li>single sequence: <code>&lt;s&gt; X &lt;/s&gt;</code></li> <li>pair of sequences: <code>&lt;s&gt; A &lt;/s&gt;&lt;/s&gt; B &lt;/s&gt;</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm.py#L214" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XGLMTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm.py#L242" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XGLMTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of zeros.</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm.py#L298" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.XGLMTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XGLMTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XGLMTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm_fast.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a “fast” XGLM tokenizer (backed by HuggingFace’s <em>tokenizers</em> library). Adapted from <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a> and <a href="/docs/transformers/pr_16143/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. Based on <a href="https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models" rel="nofollow">BPE</a>.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm_fast.py#L140" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XGLMTokenizerFast.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format:</p> <ul><li>single sequence: <code>&lt;s&gt; X &lt;/s&gt;</code></li> <li>pair of sequences: <code>&lt;s&gt; A &lt;/s&gt;&lt;/s&gt; B &lt;/s&gt;</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/tokenization_xglm_fast.py#L165" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XGLMTokenizerFast.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of zeros.</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned.</p></div></div> <h2 class="relative group"><a id="transformers.XGLMModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XGLMModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XGLMModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XGLMModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_xglm.py#L531" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: XGLMConfig</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">embed_tokens<span class="opacity-60">: typing.Optional[torch.nn.modules.sparse.Embedding] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig">XGLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights. config &#x2014; XGLMConfig<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.embed_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.embed_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>embed_tokens</strong> (nn.Embedding) &#x2014; output embedding<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare XGLM Model transformer outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p>Transformer decoder consisting of <em>config.num_layers</em> layers. Each layer is a <code>XGLMDecoderLayer</code></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_xglm.py#L589" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMTokenizer">XGLMTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all `<code>input_ids``` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.</p> <p> Args &#x2014; input_ids (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p> Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMTokenizer">~XGLMTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p> <a href="../glossary#input-ids">What are input IDs?</a> attention_mask (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>): Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li> <p>1 for tokens that are <strong>not masked</strong>,</p> </li> <li> <p>0 for tokens that are <strong>masked</strong>.</p> <p> <a href="../glossary#attention-mask">What are attention masks?</a> encoder_hidden_states (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (<code>torch.LongTensor</code> of shape <code>(batch_size, encoder_sequence_length)</code>, <em>optional</em>): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in <code>[0, 1]</code>:</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.not masked" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.not masked"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START -->1 for tokens that are <strong>not masked</strong>,<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMModel.forward.masked" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMModel.forward.masked"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START -->0 for tokens that are <strong>masked</strong>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XGLMModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig" >XGLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMModel">XGLMModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XGLMTokenizer, XGLMModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XGLMTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XGLMModel.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.XGLMForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XGLMForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XGLMForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XGLMForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_xglm.py#L817" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig">XGLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XGLMForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XGLMForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XGLMForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_xglm.py#L847" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMTokenizer">XGLMTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all `<code>input_ids``` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XGLMForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XGLMForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XGLMForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig" >XGLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMForCausalLM">XGLMForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XGLMTokenizer, XGLMForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XGLMTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XGLMForCausalLM.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxXGLMModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxXGLMModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxXGLMModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxXGLMModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxXGLMModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxXGLMModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_flax_xglm.py#L687" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: XGLMConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig">XGLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMModel.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMModel.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare XGLM Model transformer outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxXGLMPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxXGLMPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxXGLMPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_flax_xglm.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMTokenizer">~XGLMTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxXGLMPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig" >XGLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxXGLMPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XGLMTokenizer, FlaxXGLMModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XGLMTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxXGLMModel.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxXGLMForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxXGLMForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxXGLMForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxXGLMForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxXGLMForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxXGLMForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_flax_xglm.py#L766" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: XGLMConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig">XGLMConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMForCausalLM.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMForCausalLM.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxXGLMPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxXGLMPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxXGLMPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xglm/modeling_flax_xglm.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMTokenizer">~XGLMTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxXGLMPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxXGLMPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxXGLMPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xglm#transformers.XGLMConfig" >XGLMConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxXGLMPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XGLMTokenizer, FlaxXGLMForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XGLMTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxXGLMForCausalLM.from_pretrained(<span class="hljs-string">&quot;facebook/xglm-564M&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve logts for next token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs.logits[:, -<span class="hljs-number">1</span>]<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1tsazdn"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1tsazdn"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/xglm.mdx-b5ed5977.js") ], params: {} } }); </script>
204
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/led.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;led&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.LEDConfig&quot;,&quot;title&quot;:&quot;LEDConfig&quot;},{&quot;local&quot;:&quot;transformers.LEDTokenizer&quot;,&quot;title&quot;:&quot;LEDTokenizer&quot;},{&quot;local&quot;:&quot;transformers.LEDTokenizerFast&quot;,&quot;title&quot;:&quot;LEDTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.models.led.modeling_led.LEDEncoderBaseModelOutput&quot;,&quot;title&quot;:&quot;LED specific outputs&quot;},{&quot;local&quot;:&quot;transformers.LEDModel&quot;,&quot;title&quot;:&quot;LEDModel&quot;},{&quot;local&quot;:&quot;transformers.LEDForConditionalGeneration&quot;,&quot;title&quot;:&quot;LEDForConditionalGeneration&quot;},{&quot;local&quot;:&quot;transformers.LEDForSequenceClassification&quot;,&quot;title&quot;:&quot;LEDForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.LEDForQuestionAnswering&quot;,&quot;title&quot;:&quot;LEDForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFLEDModel&quot;,&quot;title&quot;:&quot;TFLEDModel&quot;},{&quot;local&quot;:&quot;transformers.TFLEDForConditionalGeneration&quot;,&quot;title&quot;:&quot;TFLEDForConditionalGeneration&quot;}],&quot;title&quot;:&quot;LED&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/led.mdx-d4a9a769.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="led" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#led"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LED </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The LED model was proposed in <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a> by Iz Beltagy, Matthew E. Peters, Arman Cohan.</p> <p>The abstract from the paper is the following:</p> <p><em>Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA. We finally introduce the Longformer-Encoder-Decoder (LED), a Longformer variant for supporting long document generative sequence-to-sequence tasks, and demonstrate its effectiveness on the arXiv summarization dataset.</em></p> <p>Tips:</p> <ul><li><a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDForConditionalGeneration">LEDForConditionalGeneration</a> is an extension of <a href="/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration">BartForConditionalGeneration</a> exchanging the traditional <em>self-attention</em> layer with <em>Longformer</em>’s <em>chunked self-attention</em> layer. <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a> is an alias of <a href="/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a>.</li> <li>LED works very well on long-range <em>sequence-to-sequence</em> tasks where the <code>input_ids</code> largely exceed a length of 1024 tokens.</li> <li>LED pads the <code>input_ids</code> to be a multiple of <code>config.attention_window</code> if required. Therefore a small speed-up is gained, when <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a> is used with the <code>pad_to_multiple_of</code> argument.</li> <li>LED makes use of <em>global attention</em> by means of the <code>global_attention_mask</code> (see <a href="/docs/transformers/pr_16143/en/model_doc/longformer#transformers.LongformerModel">LongformerModel</a>). For summarization, it is advised to put <em>global attention</em> only on the first <code>&lt;s&gt;</code> token. For question answering, it is advised to put <em>global attention</em> on all tokens of the question.</li> <li>To fine-tune LED on all 16384, it is necessary to enable <em>gradient checkpointing</em> by executing <code>model.gradient_checkpointing_enable()</code>.</li> <li>A notebook showing how to evaluate LED, can be accessed <a href="https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing" rel="nofollow">here</a>.</li> <li>A notebook showing how to fine-tune LED, can be accessed <a href="https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing" rel="nofollow">here</a>.</li></ul> <p>This model was contributed by <a href="https://huggingface.co/patrickvonplaten" rel="nofollow">patrickvonplaten</a>.</p> <h2 class="relative group"><a id="transformers.LEDConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LEDConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LEDConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LEDConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/configuration_led.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 50265</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_encoder_position_embeddings<span class="opacity-60"> = 16384</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_decoder_position_embeddings<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_function<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d_model<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_std<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_window<span class="opacity-60">: typing.Union[typing.List[int], int] = 512</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDModel">LEDModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.TFLEDModel">TFLEDModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.d_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.d_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.encoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.encoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.decoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.decoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.encoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.encoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.decoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.decoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.decoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.decoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.encoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.encoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.activation_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.activation_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.activation_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.activation_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.classifier_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.classifier_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.max_encoder_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.max_encoder_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_encoder_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 16384) &#x2014; The maximum sequence length that the encoder might ever be used with.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.max_decoder_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.max_decoder_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_decoder_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 16384) &#x2014; The maximum sequence length that the decoder might ever be used with.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.init_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.init_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDModel">LEDModel</a>. It is used to instantiate an LED model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LED <a href="https://huggingface.co/allenai/led-base-16384" rel="nofollow">allenai/led-base-16384</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><!-- HTML_TAG_END --></pre></div> <blockquote><blockquote><blockquote><p>from transformers import LEDModel, LEDConfig</p></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><h1 class="relative group"><a id="initializing-a-led-allenai/led-base-16384-style-configuration->>>-configuration-=-ledconfig()" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#initializing-a-led-allenai/led-base-16384-style-configuration->>>-configuration-=-ledconfig()"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Initializing a LED allenai/led-base-16384 style configuration &gt;&gt;&gt; configuration = LEDConfig() </span></h1></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><h1 class="relative group"><a id="initializing-a-model-from-the-allenai/led-base-16384-style-configuration->>>-model-=" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#initializing-a-model-from-the-allenai/led-base-16384-style-configuration->>>-model-="><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Initializing a model from the allenai/led-base-16384 style configuration &gt;&gt;&gt; model = </span></h1> <p>LEDModel(configuration)</p></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><h1 class="relative group"><a id="accessing-the-model-configuration->>>-configuration-=-model.config" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#accessing-the-model-configuration->>>-configuration-=-model.config"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Accessing the model configuration &gt;&gt;&gt; configuration = model.config </span></h1></blockquote></blockquote></blockquote></div> <h2 class="relative group"><a id="transformers.LEDTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LEDTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LEDTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LEDTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/tokenization_led.py#L39" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a LED tokenizer.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartTokenizer">BartTokenizer</a> for usage examples and documentation concerning parameters.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BartTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BartTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BartTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bart/tokenization_bart.py#L338" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BartTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BartTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BartTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BartTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BartTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BART sequence has the following format:</p> <ul><li>single sequence: <code>&lt;s&gt; X &lt;/s&gt;</code></li> <li>pair of sequences: <code>&lt;s&gt; A &lt;/s&gt;&lt;/s&gt; B &lt;/s&gt;</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BartTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.BartTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BartTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bart/tokenization_bart.py#L363" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BartTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BartTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BartTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BartTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BartTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BartTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BartTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BartTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.BartTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BartTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bart/tokenization_bart.py#L390" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BartTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BartTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BartTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BartTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BartTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of zeros.</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. BART does not make use of token type ids, therefore a list of zeros is returned.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BartTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.BartTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BartTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bart/tokenization_bart.py#L309" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.LEDTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LEDTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LEDTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LEDTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/tokenization_led_fast.py#L40" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trim_offsets<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a “fast” LED tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizerFast">LEDTokenizerFast</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartTokenizerFast">BartTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartTokenizerFast">BartTokenizerFast</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.models.led.modeling_led.LEDEncoderBaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDEncoderBaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LED specific outputs </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_led.LEDEncoderBaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_led.</span><span class="font-semibold">LEDEncoderBaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_led.LEDEncoderBaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_led.LEDEncoderBaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L1126" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining `attention_window</p> <ul> <li>1<code>values). Note that the first</code>x<code>values refer to tokens with fixed positions in the text, but the remaining</code>attention_window + 1<code>values refer to tokens with relative positions: the attention weight of a token to itself is located at index</code>x + attention_window / 2<code>and the</code>attention_window / 2<code>preceding (succeeding) values are the attention weights to the</code>attention_window / 2<code>preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first</code>x<code>attention weights. If a token has global attention, the attention weights to all other tokens in</code>attentions<code>is set to 0, the values should be accessed from</code>global_attentions`.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDEncoderBaseModelOutput.global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for LEDEncoder’s outputs, with potential hidden states, local and global attentions.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_led.</span><span class="font-semibold">LEDSeq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L1169" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqModelOutput.encoder_global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_led.</span><span class="font-semibold">LEDSeq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L1237" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqLMOutput.encoder_global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_led.</span><span class="font-semibold">LEDSeq2SeqSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L1304" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput.encoder_global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence sentence classification models.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_led.</span><span class="font-semibold">LEDSeq2SeqQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L1371" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput.encoder_global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_global_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence question answering models.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_tf_led.</span><span class="font-semibold">TFLEDEncoderBaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_tf_led.py#L1330" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x + attention_window + 1)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first <code>x</code> values) and to every token in the attention window (remaining `attention_window</p> <ul> <li>1<code>values). Note that the first</code>x<code>values refer to tokens with fixed positions in the text, but the remaining</code>attention_window + 1<code>values refer to tokens with relative positions: the attention weight of a token to itself is located at index</code>x + attention_window / 2<code>and the</code>attention_window / 2<code>preceding (succeeding) values are the attention weights to the</code>attention_window / 2<code>preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first</code>x<code>attention weights. If a token has global attention, the attention weights to all other tokens in</code>attentions<code>is set to 0, the values should be accessed from</code>global_attentions`.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput.global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for Longformer’s outputs, with potential hidden states, local and global attentions.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_tf_led.</span><span class="font-semibold">TFLEDSeq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_tf_led.py#L1373" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput.encoder_global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.led.modeling_tf_led.</span><span class="font-semibold">TFLEDSeq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_tf_led.py#L1441" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_global_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_global_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput.encoder_global_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p></div> <h2 class="relative group"><a id="transformers.LEDModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LEDModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LEDModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LEDModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2154" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: LEDConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare LED Model outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LEDModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2181" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.global_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.global_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all `<code>decoder_input_ids``` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LEDModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDModel">LEDModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDModel.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LEDForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LEDForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LEDForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LEDForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2277" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: LEDConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The LED Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDForConditionalGeneration.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LEDForConditionalGeneration.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDForConditionalGeneration.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2321" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.global_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.global_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all `<code>decoder_input_ids``` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForConditionalGeneration.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForConditionalGeneration.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LEDForConditionalGeneration.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDForConditionalGeneration">LEDForConditionalGeneration</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Conditional generation example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&quot;My friends are &lt;mask&gt; but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer([TXT], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>prediction = model.generate(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(prediction, skip_special_tokens=<span class="hljs-literal">True</span>))<!-- HTML_TAG_END --></pre></div> <p>Summarization example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;allenai/led-large-16384-arxiv&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/led-large-16384-arxiv&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE_TO_SUMMARIZE = <span class="hljs-string">&#x27;&#x27;&#x27;Transformers (Vaswani et al., 2017) have achieved state-of-the-art <span class="hljs-meta">... </span> results in a wide range of natural language tasks including generative language modeling <span class="hljs-meta">... </span> (Dai et al., 2019; Radford et al., 2019) and discriminative ... language understanding (Devlin et al., 2019). <span class="hljs-meta">... </span> This success is partly due to the self-attention component which enables the network to capture contextual <span class="hljs-meta">... </span> information from the entire sequence. While powerful, the memory and computational requirements of <span class="hljs-meta">... </span> self-attention grow quadratically with sequence length, making it infeasible (or very expensive) to <span class="hljs-meta">... </span> process long sequences. To address this limitation, we present Longformer, a modified Transformer <span class="hljs-meta">... </span> architecture with a self-attention operation that scales linearly with the sequence length, making it <span class="hljs-meta">... </span> versatile for processing long documents (Fig 1). This is an advantage for natural language tasks such as <span class="hljs-meta">... </span> long document classification, question answering (QA), and coreference resolution, where existing approaches <span class="hljs-meta">... </span> partition or shorten the long context into smaller sequences that fall within the typical 512 token limit <span class="hljs-meta">... </span> of BERT-style pretrained models. Such partitioning could potentially result in loss of important <span class="hljs-meta">... </span> cross-partition information, and to mitigate this problem, existing methods often rely on complex <span class="hljs-meta">... </span> architectures to address such interactions. On the other hand, our proposed Longformer is able to build <span class="hljs-meta">... </span> contextual representations of the entire context using multiple layers of attention, reducing the need for <span class="hljs-meta">... </span> task-specific architectures.&#x27;&#x27;&#x27;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Global attention on the first token (cf. Beltagy et al. 2020)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>global_attention_mask = torch.zeros_like(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>global_attention_mask[:, <span class="hljs-number">0</span>] = <span class="hljs-number">1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Generate Summary</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary_ids = model.generate(inputs, global_attention_mask=global_attention_mask, num_beams=<span class="hljs-number">3</span>, max_length=<span class="hljs-number">32</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(summary_ids[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>, clean_up_tokenization_spaces=<span class="hljs-literal">True</span>))<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LEDForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LEDForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LEDForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LEDForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2468" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: LEDConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>LED model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LEDForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2481" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.global_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.global_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all `<code>decoder_input_ids``` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LEDForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDForSequenceClassification">LEDForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LEDForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LEDForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LEDForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LEDForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2596" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>LED Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LEDForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LEDForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LEDForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_led.py#L2608" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDTokenizer">LEDTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should read <code>modeling_led._prepare_decoder_inputs</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.global_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.global_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to decide the attention given on each token, local attention or global attention for the encoder. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer paper</a> for more details. Mask values selected in <code>[0, 1]</code>:</s></p> <ul> <li>0 for local attention (a sliding window attention),</li> <li>1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all `<code>decoder_input_ids``` of shape </code>(batch_size, sequence_length)<code>. inputs_embeds (</code>torch.FloatTensor<code>of shape</code>(batch_size, sequence_length, hidden_size)<code>, *optional*): Optionally, instead of passing </code>input_ids<code>you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert</code>input_ids` indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LEDForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LEDForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LEDForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDForQuestionAnswering">LEDForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, LEDForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LEDForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFLEDModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFLEDModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLEDModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLEDModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLEDModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLEDModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_tf_led.py#L2256" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare LED Model outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(input_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLEDModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLEDModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLEDModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_tf_led.py#L2268" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60">: typing.Union[typing.Tuple, transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>tf.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFLEDModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.TFLEDModel">TFLEDModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, TFLEDModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLEDModel.from_pretrained(<span class="hljs-string">&quot;allenai/led-base-16384&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFLEDForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFLEDForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLEDForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLEDForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLEDForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLEDForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_tf_led.py#L2363" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig">LEDConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The LED Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(input_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLEDForConditionalGeneration.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLEDForConditionalGeneration.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLEDForConditionalGeneration.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/led/modeling_tf_led.py#L2396" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60">: typing.Optional[transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>tf.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <code>LedTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>LED uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLEDForConditionalGeneration.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLEDForConditionalGeneration.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFLEDForConditionalGeneration.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput" >transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.LEDConfig" >LEDConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>encoder_global_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, x)</code>, where <code>x</code> is the number of tokens with global attention mask.</p> <p>Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/led#transformers.TFLEDForConditionalGeneration">TFLEDForConditionalGeneration</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LEDTokenizer, TFLEDForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>mname = <span class="hljs-string">&quot;allenai/led-base-16384&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LEDTokenizer.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&quot;My friends are &lt;mask&gt; but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLEDForConditionalGeneration.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>batch = tokenizer([TXT], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(inputs=batch.input_ids).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probs = tf.nn.softmax(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># probs[5] is associated with the mask token</span><!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1bdnuzz"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1bdnuzz"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/led.mdx-d4a9a769.js") ], params: {} } }); </script>
205
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/blenderbot.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;blenderbot&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;implementation-notes&quot;,&quot;title&quot;:&quot;Implementation Notes&quot;},{&quot;local&quot;:&quot;usage&quot;,&quot;title&quot;:&quot;Usage&quot;},{&quot;local&quot;:&quot;transformers.BlenderbotConfig&quot;,&quot;title&quot;:&quot;BlenderbotConfig&quot;},{&quot;local&quot;:&quot;transformers.BlenderbotTokenizer&quot;,&quot;title&quot;:&quot;BlenderbotTokenizer&quot;},{&quot;local&quot;:&quot;transformers.BlenderbotTokenizerFast&quot;,&quot;title&quot;:&quot;BlenderbotTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.BlenderbotModel&quot;,&quot;title&quot;:&quot;BlenderbotModel&quot;},{&quot;local&quot;:&quot;transformers.BlenderbotForConditionalGeneration&quot;,&quot;title&quot;:&quot;BlenderbotForConditionalGeneration&quot;},{&quot;local&quot;:&quot;transformers.BlenderbotForCausalLM&quot;,&quot;title&quot;:&quot;BlenderbotForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.TFBlenderbotModel&quot;,&quot;title&quot;:&quot;TFBlenderbotModel&quot;},{&quot;local&quot;:&quot;transformers.TFBlenderbotForConditionalGeneration&quot;,&quot;title&quot;:&quot;TFBlenderbotForConditionalGeneration&quot;},{&quot;local&quot;:&quot;transformers.FlaxBlenderbotModel&quot;,&quot;title&quot;:&quot;FlaxBlenderbotModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxBlenderbotForConditionalGeneration&quot;,&quot;title&quot;:&quot;FlaxBlenderbotForConditionalGeneration&quot;}],&quot;title&quot;:&quot;Blenderbot&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/blenderbot.mdx-2f4c855d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="blenderbot" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#blenderbot"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Blenderbot </span></h1> <p><strong>DISCLAIMER:</strong> If you see something strange, file a <a href="https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title" rel="nofollow">Github Issue</a> .</p> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The Blender chatbot model was proposed in <a href="https://arxiv.org/pdf/2004.13637.pdf" rel="nofollow">Recipes for building an open-domain chatbot</a> Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.</p> <p>The abstract of the paper is the following:</p> <p><em>Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.</em></p> <p>This model was contributed by <a href="https://huggingface.co/sshleifer" rel="nofollow">sshleifer</a>. The authors’ code can be found <a href="https://github.com/facebookresearch/ParlAI" rel="nofollow">here</a> .</p> <h2 class="relative group"><a id="implementation-notes" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implementation-notes"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implementation Notes </span></h2> <ul><li>Blenderbot uses a standard <a href="https://arxiv.org/pdf/1706.03762.pdf" rel="nofollow">seq2seq model transformer</a> based architecture.</li> <li>Available checkpoints can be found in the <a href="https://huggingface.co/models?search=blenderbot" rel="nofollow">model hub</a>.</li> <li>This is the <em>default</em> Blenderbot model class. However, some smaller checkpoints, such as <code>facebook/blenderbot_small_90M</code>, have a different architecture and consequently should be used with <a href="blenderbot-small">BlenderbotSmall</a>.</li></ul> <h2 class="relative group"><a id="usage" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#usage"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Usage </span></h2> <p>Here is an example of model usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>mname = <span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForConditionalGeneration.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>UTTERANCE = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([UTTERANCE], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>reply_ids = model.generate(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.batch_decode(reply_ids)) [<span class="hljs-string">&quot;&lt;s&gt; That&#x27;s unfortunate. Are they trying to lose weight or are they just trying to be healthier?&lt;/s&gt;&quot;</span>]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="transformers.BlenderbotConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BlenderbotConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BlenderbotConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BlenderbotConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/configuration_blenderbot.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 8008</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 128</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_layers<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_ffn_dim<span class="opacity-60"> = 10240</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_heads<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layers<span class="opacity-60"> = 24</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ffn_dim<span class="opacity-60"> = 10240</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_heads<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_function<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d_model<span class="opacity-60"> = 2560</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_std<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_embedding<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_no_repeat_ngram_size<span class="opacity-60"> = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotModel">BlenderbotModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.TFBlenderbotModel">TFBlenderbotModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.d_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.d_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.encoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.encoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.decoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.decoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.encoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.encoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.decoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.decoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.decoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.decoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.encoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.encoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.activation_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.activation_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.activation_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.activation_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.classifier_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.classifier_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.init_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.init_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.scale_embedding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.scale_embedding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotConfig.forced_eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotConfig.forced_eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotModel">BlenderbotModel</a>. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot <a href="https://huggingface.co/facebook/blenderbot-3B" rel="nofollow">facebook/blenderbot-3B</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotModel, BlenderbotConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Blenderbot facebook/blenderbot-3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BlenderbotConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/blenderbot-3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.BlenderbotTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BlenderbotTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BlenderbotTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BlenderbotTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot.py#L46" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a Blenderbot tokenizer.</p> <p><code>Blenderbot</code> is nearly identical to <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a> and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn’t add BOS token to the beginning of sequences.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a> for usage examples and documentation concerning parameters.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BlenderbotTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot.py#L59" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Will be ignored<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:</p> <ul><li>single sequence: <code>X &lt;/s&gt;</code></li></ul></div></div> <h2 class="relative group"><a id="transformers.BlenderbotTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BlenderbotTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BlenderbotTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BlenderbotTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py#L47" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trim_offsets<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a “fast” Blenderbot tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><code>BlenderbotFast</code> is nearly identical to <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizerFast">RobertaTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn’t add BOS token to the beginning of sequences.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizerFast">RobertaTokenizerFast</a> for usage examples and documentation concerning parameters.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Will be ignored<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:</p> <ul><li>single sequence: <code>X &lt;/s&gt;</code></li></ul></div></div> <h2 class="relative group"><a id="transformers.BlenderbotModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BlenderbotModel </span></h2> <p>See <code>transformers.BartModel</code> for arguments to <em>forward</em> and <em>generate</em></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BlenderbotModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BlenderbotModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1080" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: BlenderbotConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare Blenderbot Model outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.BlenderbotModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1118" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BlenderbotModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotModel">BlenderbotModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">6</span>, <span class="hljs-number">1280</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.BlenderbotForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BlenderbotForConditionalGeneration </span></h2> <p>See <a href="/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration">BartForConditionalGeneration</a> for arguments to <em>forward</em> and <em>generate</em></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BlenderbotForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BlenderbotForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1216" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: BlenderbotConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotForConditionalGeneration.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.BlenderbotForConditionalGeneration.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotForConditionalGeneration.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1273" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForConditionalGeneration.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForConditionalGeneration.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BlenderbotForConditionalGeneration.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotForConditionalGeneration">BlenderbotForConditionalGeneration</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Conversation example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>mname = <span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForConditionalGeneration.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>UTTERANCE = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Human: &quot;</span>, UTTERANCE) Human: My friends are cool but they eat too many carbs. <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([UTTERANCE], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>reply_ids = model.generate(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Bot: &quot;</span>, tokenizer.batch_decode(reply_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>]) Bot: That<span class="hljs-string">&#x27;s unfortunate. Are they trying to lose weight or are they just trying to be healthier? &gt;&gt;&gt; REPLY = &quot;I&#x27;</span>m <span class="hljs-keyword">not</span> sure<span class="hljs-string">&quot; &gt;&gt;&gt; print(&quot;</span>Human: <span class="hljs-string">&quot;, REPLY) Human: I&#x27;m not sure &gt;&gt;&gt; NEXT_UTTERANCE = ( ... &quot;</span>My friends are cool but they eat too many carbs.&lt;/s&gt; &lt;s&gt;That<span class="hljs-string">&#x27;s unfortunate. &quot; ... &quot;Are they trying to lose weight or are they just trying to be healthier?&lt;/s&gt; &quot; ... &quot;&lt;s&gt; I&#x27;</span>m <span class="hljs-keyword">not</span> sure.<span class="hljs-string">&quot; ... ) &gt;&gt;&gt; inputs = tokenizer([NEXT_UTTERANCE], return_tensors=&quot;</span>pt<span class="hljs-string">&quot;) &gt;&gt;&gt; next_reply_ids = model.generate(**inputs) &gt;&gt;&gt; print(&quot;</span>Bot: <span class="hljs-string">&quot;, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) Bot: That&#x27;s too bad. Have you tried encouraging them to change their eating habits?</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.BlenderbotForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BlenderbotForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BlenderbotForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BlenderbotForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1409" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BlenderbotForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.BlenderbotForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BlenderbotForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1440" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BlenderbotForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BlenderbotForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BlenderbotForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForCausalLM.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>, add_cross_attention=<span class="hljs-literal">False</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>expected_shape = [<span class="hljs-number">1</span>, inputs.input_ids.shape[-<span class="hljs-number">1</span>], model.config.vocab_size] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) == expected_shape <span class="hljs-literal">True</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFBlenderbotModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBlenderbotModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFBlenderbotModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFBlenderbotModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFBlenderbotModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFBlenderbotModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1166" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(input_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFBlenderbotModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFBlenderbotModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFBlenderbotModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1191" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60">: typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFBlenderbotModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.TFBlenderbotModel">TFBlenderbotModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, TFBlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBlenderbotModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFBlenderbotForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBlenderbotForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFBlenderbotForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFBlenderbotForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFBlenderbotForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFBlenderbotForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1286" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The BLENDERBOT Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(input_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFBlenderbotForConditionalGeneration.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFBlenderbotForConditionalGeneration.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFBlenderbotForConditionalGeneration.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1332" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60">: typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFBlenderbotForConditionalGeneration.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFBlenderbotForConditionalGeneration.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFBlenderbotForConditionalGeneration.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.TFBlenderbotForConditionalGeneration">TFBlenderbotForConditionalGeneration</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Conversation example::</p> <blockquote><blockquote><blockquote><p>from transformers import BlenderbotTokenizer, TFBlenderbotForConditionalGeneration &gt;&gt;&gt; mname = ‘facebook/blenderbot-400M-distill’ &gt;&gt;&gt; model = TFBlenderbotForConditionalGeneration.from_pretrained(mname) &gt;&gt;&gt; tokenizer = BlenderbotTokenizer.from_pretrained(mname) &gt;&gt;&gt; UTTERANCE = “My friends are cool but they eat too many carbs.” &gt;&gt;&gt; print(“Human: ”, UTTERANCE) &gt;&gt;&gt; inputs = tokenizer([UTTERANCE], return_tensors=‘tf’) &gt;&gt;&gt; reply_ids = model.generate(**inputs) &gt;&gt;&gt; print(“Bot: ”, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])</p></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><p>REPLY = “I’m not sure” &gt;&gt;&gt; print(“Human: ”, REPLY) &gt;&gt;&gt; NEXT_UTTERANCE = ( … “My friends are cool but they eat too many carbs.&lt;/s&gt; <s>That’s unfortunate. ” … “Are they trying to lose weight or are they just trying to be healthier?</s> ” … ”&lt;s&gt; I’m not sure.” … ) &gt;&gt;&gt; inputs = tokenizer([NEXT_UTTERANCE], return_tensors=‘tf’) next_reply_ids = model.generate(**inputs) &gt;&gt;&gt; print(“Bot: ”, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])</p></blockquote></blockquote></blockquote></div></div> <h2 class="relative group"><a id="transformers.FlaxBlenderbotModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBlenderbotModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxBlenderbotModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1193" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: BlenderbotConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1128" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxBlenderbotPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxBlenderbotPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotPreTrainedModel.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotPreTrainedModel.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotPreTrainedModel.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L949" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxBlenderbotPreTrainedModel.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotPreTrainedModel.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotPreTrainedModel.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotPreTrainedModel.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1012" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.decode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.decode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxBlenderbotPreTrainedModel.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxBlenderbotForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBlenderbotForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxBlenderbotForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1280" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: BlenderbotConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1128" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxBlenderbotPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxBlenderbotPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Conversation example::</p> <blockquote><blockquote><blockquote><p>from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration, BlenderbotConfig</p></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><p>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(‘facebook/blenderbot-400M-distill’) &gt;&gt;&gt; tokenizer = BlenderbotTokenizer.from_pretrained(‘facebook/blenderbot-400M-distill’)</p></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><p>UTTERANCE = “My friends are cool but they eat too many carbs.” &gt;&gt;&gt; inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors=‘np’)</p></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><h1 class="relative group"><a id="generate-reply->>>-reply_ids-=-model.generate(inputs[‘input_ids’],-num_beams=4,-max_length=5," class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#generate-reply->>>-reply_ids-=-model.generate(inputs[‘input_ids’],-num_beams=4,-max_length=5,"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Generate Reply &gt;&gt;&gt; reply_ids = model.generate(inputs[‘input_ids’], num_beams=4, max_length=5, </span></h1> <p>early_stopping=True).sequences &gt;&gt;&gt; print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])</p></blockquote></blockquote></blockquote></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotPreTrainedModel.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotPreTrainedModel.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotPreTrainedModel.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L949" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxBlenderbotPreTrainedModel.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxBlenderbotForConditionalGeneration.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxBlenderbotForConditionalGeneration.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1284" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxBlenderbotForConditionalGeneration.decode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxBlenderbotForConditionalGeneration.decode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxBlenderbotForConditionalGeneration.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1pf30sk"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1pf30sk"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/blenderbot.mdx-2f4c855d.js") ], params: {} } }); </script>
206
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/vision-encoder-decoder.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;vision-encoder-decoder-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.VisionEncoderDecoderConfig&quot;,&quot;title&quot;:&quot;VisionEncoderDecoderConfig&quot;},{&quot;local&quot;:&quot;transformers.VisionEncoderDecoderModel&quot;,&quot;title&quot;:&quot;VisionEncoderDecoderModel&quot;},{&quot;local&quot;:&quot;transformers.TFVisionEncoderDecoderModel&quot;,&quot;title&quot;:&quot;TFVisionEncoderDecoderModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxVisionEncoderDecoderModel&quot;,&quot;title&quot;:&quot;FlaxVisionEncoderDecoderModel&quot;}],&quot;title&quot;:&quot;Vision Encoder Decoder Models&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/vision-encoder-decoder.mdx-b5cd5518.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="vision-encoder-decoder-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#vision-encoder-decoder-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Vision Encoder Decoder Models </span></h1> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel">VisionEncoderDecoderModel</a> can be used to initialize an image-to-text-sequence model with any pretrained Transformer-based vision autoencoding model as the encoder (<em>e.g.</em> <a href="vit">ViT</a>, <a href="beit">BEiT</a>, <a href="deit">DeiT</a>, <a href="swin">Swin</a>) and any pretrained language model as the decoder (<em>e.g.</em> <a href="roberta">RoBERTa</a>, <a href="gpt2">GPT2</a>, <a href="bert">BERT</a>, <a href="distilbert">DistilBERT</a>).</p> <p>The effectiveness of initializing image-to-text-sequence models with pretrained checkpoints has been shown in (for example) <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.</p> <p>An example of how to use a <a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel">VisionEncoderDecoderModel</a> for inference can be seen in <a href="trocr">TrOCR</a>.</p> <h2 class="relative group"><a id="transformers.VisionEncoderDecoderConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>VisionEncoderDecoderConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionEncoderDecoderConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">VisionEncoderDecoderConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.VisionEncoderDecoderConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionEncoderDecoderConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L27" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderConfig.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderConfig.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments. Notably:</p> <ul> <li><strong>encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the encoder config.</li> <li><strong>decoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the decoder config.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a> is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel">VisionEncoderDecoderModel</a>. It is used to instantiate a Vision-Encoder-Text-Decoder model according to the specified arguments, defining the encoder and decoder configs.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViT &amp; BERT style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = ViTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViTBert model from a ViT &amp; bert-base-uncased style configurations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = model.config.encoder <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = model.config.decoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set decoder config to causal lm</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.add_cross_attention = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>, config=encoder_decoder_config)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionEncoderDecoderConfig.from_encoder_decoder_configs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_encoder_decoder_configs</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionEncoderDecoderConfig.from_encoder_decoder_configs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionEncoderDecoderConfig.from_encoder_decoder_configs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L93" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.VisionEncoderDecoderConfig.from_encoder_decoder_configs.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>An instance of a configuration object</p> <!-- HTML_TAG_END --></p></div></div> <p>Instantiate a <a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a> (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionEncoderDecoderConfig.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionEncoderDecoderConfig.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionEncoderDecoderConfig.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.VisionEncoderDecoderConfig.to_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance,</p> <!-- HTML_TAG_END --></p></div></div> <p>Serializes this instance to a Python dictionary. Override the default <em>to_dict()</em> from <em>PretrainedConfig</em>.</p></div></div> <h2 class="relative group"><a id="transformers.VisionEncoderDecoderModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>VisionEncoderDecoderModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionEncoderDecoderModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">VisionEncoderDecoderModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.VisionEncoderDecoderModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionEncoderDecoderModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L149" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: typing.Optional[transformers.configuration_utils.PretrainedConfig] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder<span class="opacity-60">: typing.Optional[transformers.modeling_utils.PreTrainedModel] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder<span class="opacity-60">: typing.Optional[transformers.modeling_utils.PreTrainedModel] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via <code>from_pretrained()</code>function and the decoder is loaded via <code>from_pretrained()</code>function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.</p> <p>The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.</p> <p>Additionally, in <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.</p> <p>After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel">VisionEncoderDecoderModel</a> is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one as decoder when created with the :meth<em>~transformers.AutoModel.from_pretrained</em> class method for the encoder and :meth<em>~transformers.AutoModelForCausalLM.from_pretrained</em> class method for the decoder.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionEncoderDecoderModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionEncoderDecoderModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionEncoderDecoderModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L401" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For training, <code>decoder_input_ids</code> are automatically created by the model by shifting the <code>labels</code> to the right, replacing -100 by the <code>pad_token_id</code> and prepending them with the <code>decoder_start_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple. kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.VisionEncoderDecoderModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel">VisionEncoderDecoderModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrOCRProcessor, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = TrOCRProcessor.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load image from the IAM dataset</span> <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.decoder_start_token_id = processor.tokenizer.cls_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = processor.tokenizer.pad_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.vocab_size = model.config.decoder.vocab_size <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = processor.tokenizer(text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference (generation)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(pixel_values) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_text = processor.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_encoder_decoder_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L246" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_pretrained_model_name_or_path<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_pretrained_model_name_or_path<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Information necessary to initiate the image encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Information necessary to initiate the text decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.</p> <p>The model is set in evaluation mode by default using <code>model.eval()</code> (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with <code>model.train()</code>.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFVisionEncoderDecoderModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFVisionEncoderDecoderModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFVisionEncoderDecoderModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFVisionEncoderDecoderModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFVisionEncoderDecoderModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFVisionEncoderDecoderModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py#L176" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via <code>from_pretrained()</code>function and the decoder is loaded via <code>from_pretrained()</code>function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.</p> <p>The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.</p> <p>Additionally, in <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.</p> <p>After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.TFVisionEncoderDecoderModel">TFVisionEncoderDecoderModel</a> is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one of the base model classes as decoder when created with the <code>from_pretrained()</code>class method for the encoder and <code>from_pretrained()</code>class method for the decoder.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFVisionEncoderDecoderModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFVisionEncoderDecoderModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFVisionEncoderDecoderModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py#L513" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using the vision&#x2019;s model&#x2019;s feature extractor. For example, using <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>Provide for sequence to sequence training to the decoder. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFVisionEncoderDecoderModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.TFVisionEncoderDecoderModel">TFVisionEncoderDecoderModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor, AutoTokenizer, TFVisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;gpt2&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>img = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = feature_extractor(images=img, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).pixel_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = decoder_tokenizer(<span class="hljs-string">&quot;Linda Davis&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>loss, logits = outputs.loss, outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;vit-gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;vit-gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generation</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(pixel_values, decoder_start_token_id=model.config.decoder.bos_token_id)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_encoder_decoder_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py#L348" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_pretrained_model_name_or_path<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_pretrained_model_name_or_path<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pytorch index checkpoint file</em> (e.g, <code>./pt_model/</code>). In this case, <code>encoder_from_pt</code> should be set to <code>True</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pytorch checkpoint file</em> (e.g, <code>./pt_model/</code>). In this case, <code>decoder_from_pt</code> should be set to <code>True</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFVisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxVisionEncoderDecoderModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxVisionEncoderDecoderModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxVisionEncoderDecoderModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxVisionEncoderDecoderModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxVisionEncoderDecoderModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxVisionEncoderDecoderModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L268" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: VisionEncoderDecoderConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Optional[typing.Tuple] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via <code>from_pretrained()</code>function and the decoder is loaded via <code>from_pretrained()</code>function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.</p> <p>The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.</p> <p>Additionally, in <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.</p> <p>After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.FlaxVisionEncoderDecoderModel">FlaxVisionEncoderDecoderModel</a> is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base vision model classes of the library as encoder module and another one as decoder module when created with the :meth<em>~transformers.FlaxAutoModel.from_pretrained</em> class method for the encoder and :meth<em>~transformers.FlaxAutoModelForCausalLM.from_pretrained</em> class method for the decoder.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxVisionEncoderDecoderModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxVisionEncoderDecoderModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxVisionEncoderDecoderModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L583" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.__call__.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.__call__.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using the vision model&#x2019;s feature extractor. For example, using <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.decoder.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>FlaxSeq2SeqLMOutput</code> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxVisionEncoderDecoderModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.FlaxVisionEncoderDecoderModel">FlaxVisionEncoderDecoderModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxVisionEncoderDecoderModel, ViTFeatureExtractor, GPT2Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load output tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_output = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;gpt2&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use GPT2&#x27;s eos_token as the pad as well as eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.eos_token_id = model.config.decoder.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generation</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequences = model.generate(pixel_values, num_beams=<span class="hljs-number">4</span>, max_length=<span class="hljs-number">12</span>).sequences <span class="hljs-meta">&gt;&gt;&gt; </span>captions = tokenizer_output.batch_decode(sequences, skip_special_tokens=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_encoder_decoder_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L708" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_pretrained_model_name_or_path</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_pretrained_model_name_or_path</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxVisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-gpt2 from a pretrained ViT and a pretrained GPT2 model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;gpt2&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-gpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="e0pn4c"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="e0pn4c"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/vision-encoder-decoder.mdx-b5cd5518.js") ], params: {} } }); </script>
207
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/mbart.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;mbart-and-mbart50&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview-of-mbart&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;training-of-mbart&quot;,&quot;title&quot;:&quot;Training of MBart&quot;}],&quot;title&quot;:&quot;Overview of MBart&quot;},{&quot;local&quot;:&quot;overview-of-mbart50&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;training-of-mbart50&quot;,&quot;title&quot;:&quot;Training of MBart-50&quot;}],&quot;title&quot;:&quot;Overview of MBart-50&quot;},{&quot;local&quot;:&quot;transformers.MBartConfig&quot;,&quot;title&quot;:&quot;MBartConfig&quot;},{&quot;local&quot;:&quot;transformers.MBartTokenizer&quot;,&quot;title&quot;:&quot;MBartTokenizer&quot;},{&quot;local&quot;:&quot;transformers.MBartTokenizerFast&quot;,&quot;title&quot;:&quot;MBartTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.MBart50Tokenizer&quot;,&quot;title&quot;:&quot;MBart50Tokenizer&quot;},{&quot;local&quot;:&quot;transformers.MBart50TokenizerFast&quot;,&quot;title&quot;:&quot;MBart50TokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.MBartModel&quot;,&quot;title&quot;:&quot;MBartModel&quot;},{&quot;local&quot;:&quot;transformers.MBartForConditionalGeneration&quot;,&quot;title&quot;:&quot;MBartForConditionalGeneration&quot;},{&quot;local&quot;:&quot;transformers.MBartForQuestionAnswering&quot;,&quot;title&quot;:&quot;MBartForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.MBartForSequenceClassification&quot;,&quot;title&quot;:&quot;MBartForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.MBartForCausalLM&quot;,&quot;title&quot;:&quot;MBartForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.TFMBartModel&quot;,&quot;title&quot;:&quot;TFMBartModel&quot;},{&quot;local&quot;:&quot;transformers.TFMBartForConditionalGeneration&quot;,&quot;title&quot;:&quot;TFMBartForConditionalGeneration&quot;},{&quot;local&quot;:&quot;transformers.FlaxMBartModel&quot;,&quot;title&quot;:&quot;FlaxMBartModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxMBartForConditionalGeneration&quot;,&quot;title&quot;:&quot;FlaxMBartForConditionalGeneration&quot;},{&quot;local&quot;:&quot;transformers.FlaxMBartForSequenceClassification&quot;,&quot;title&quot;:&quot;FlaxMBartForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.FlaxMBartForQuestionAnswering&quot;,&quot;title&quot;:&quot;FlaxMBartForQuestionAnswering&quot;}],&quot;title&quot;:&quot;MBart and MBart-50&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/mbart.mdx-4e986a75.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="mbart-and-mbart50" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mbart-and-mbart50"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBart and MBart-50 </span></h1> <p><strong>DISCLAIMER:</strong> If you see something strange, file a <a href="https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title" rel="nofollow">Github Issue</a> and assign @patrickvonplaten</p> <h2 class="relative group"><a id="overview-of-mbart" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview-of-mbart"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview of MBart </span></h2> <p>The MBart model was presented in <a href="https://arxiv.org/abs/2001.08210" rel="nofollow">Multilingual Denoising Pre-training for Neural Machine Translation</a> by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.</p> <p>According to the abstract, MBART is a sequence-to-sequence denoising auto-encoder pretrained on large-scale monolingual corpora in many languages using the BART objective. mBART is one of the first methods for pretraining a complete sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only on the encoder, decoder, or reconstructing parts of the text.</p> <p>This model was contributed by <a href="https://huggingface.co/valhalla" rel="nofollow">valhalla</a>. The Authors’ code can be found <a href="https://github.com/pytorch/fairseq/tree/master/examples/mbart" rel="nofollow">here</a></p> <h3 class="relative group"><a id="training-of-mbart" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#training-of-mbart"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Training of MBart </span></h3> <p>MBart is a multilingual encoder-decoder (sequence-to-sequence) model primarily intended for translation task. As the model is multilingual it expects the sequences in a different format. A special language id token is added in both the source and target text. The source text format is <code>X [eos, src_lang_code]</code> where <code>X</code> is the source text. The target text format is <code>[tgt_lang_code] X [eos]</code>. <code>bos</code> is never used.</p> <p>The regular <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a> will encode source text format, and it should be wrapped inside the context manager <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer.as_target_tokenizer">as_target_tokenizer()</a> to encode target text format.</p> <ul><li>Supervised training</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBartTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>example_english_phrase = <span class="hljs-string">&quot;UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>expected_translation_romanian = <span class="hljs-string">&quot;Şeful ONU declară că nu există o soluţie militară în Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(example_english_phrase, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(expected_translation_romanian, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model(**inputs, labels=batch[<span class="hljs-string">&quot;labels&quot;</span>])<!-- HTML_TAG_END --></pre></div> <ul><li><p>Generation</p> <p>While generating the target text set the <code>decoder_start_token_id</code> to the target language id. The following example shows how to translate English to Romanian using the <em>facebook/mbart-large-en-ro</em> model.</p></li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBartTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>article = <span class="hljs-string">&quot;UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(article, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>translated_tokens = model.generate(**inputs, decoder_start_token_id=tokenizer.lang_code_to_id[<span class="hljs-string">&quot;ro_RO&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(translated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <span class="hljs-string">&quot;Şeful ONU declară că nu există o soluţie militară în Siria&quot;</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="overview-of-mbart50" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview-of-mbart50"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview of MBart-50 </span></h2> <p>MBart-50 was introduced in the <a href="https://arxiv.org/abs/2008.00401" rel="nofollow">Multilingual Translation with Extensible Multilingual Pretraining and Finetuning</a> paper by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. MBart-50 is created using the original <em>mbart-large-cc25</em> checkpoint by extendeding its embedding layers with randomly initialized vectors for an extra set of 25 language tokens and then pretrained on 50 languages.</p> <p>According to the abstract</p> <p><em>Multilingual translation models can be created through multilingual finetuning. Instead of finetuning on one direction, a pretrained model is finetuned on many directions at the same time. It demonstrates that pretrained models can be extended to incorporate additional languages without loss of performance. Multilingual finetuning improves on average 1 BLEU over the strongest baselines (being either multilingual from scratch or bilingual finetuning) while improving 9.3 BLEU on average over bilingual baselines from scratch.</em></p> <h3 class="relative group"><a id="training-of-mbart50" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#training-of-mbart50"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Training of MBart-50 </span></h3> <p>The text format for MBart-50 is slightly different from mBART. For MBart-50 the language id token is used as a prefix for both source and target text i.e the text format is <code>[lang_code] X [eos]</code>, where <code>lang_code</code> is source language id for source text and target language id for target text, with <code>X</code> being the source or target text respectively.</p> <p>MBart-50 has its own tokenizer <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBart50Tokenizer">MBart50Tokenizer</a>.</p> <ul><li>Supervised training</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBart50TokenizerFast model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>) tokenizer = MBart50TokenizerFast.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) src_text = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> tgt_text = <span class="hljs-string">&quot;Şeful ONU declară că nu există o soluţie militară în Siria&quot;</span> model_inputs = tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids model(**model_inputs, labels=labels) <span class="hljs-comment"># forward pass</span><!-- HTML_TAG_END --></pre></div> <ul><li><p>Generation</p> <p>To generate using the mBART-50 multilingual translation models, <code>eos_token_id</code> is used as the <code>decoder_start_token_id</code> and the target language id is forced as the first generated token. To force the target language id as the first generated token, pass the <em>forced_bos_token_id</em> parameter to the <em>generate</em> method. The following example shows how to translate between Hindi to French and Arabic to English using the <em>facebook/mbart-50-large-many-to-many</em> checkpoint.</p></li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartForConditionalGeneration, MBart50TokenizerFast article_hi = <span class="hljs-string">&quot;संयुक्त राष्ट्र के प्रमुख का कहना है कि सीरिया में कोई सैन्य समाधान नहीं है&quot;</span> article_ar = <span class="hljs-string">&quot;الأمين العام للأمم المتحدة يقول إنه لا يوجد حل عسكري في سوريا.&quot;</span> model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>) tokenizer = MBart50TokenizerFast.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>) <span class="hljs-comment"># translate Hindi to French</span> tokenizer.src_lang = <span class="hljs-string">&quot;hi_IN&quot;</span> encoded_hi = tokenizer(article_hi, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.lang_code_to_id[<span class="hljs-string">&quot;fr_XX&quot;</span>]) tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-comment"># =&gt; &quot;Le chef de l &#x27;ONU affirme qu &#x27;il n &#x27;y a pas de solution militaire en Syria.&quot;</span> <span class="hljs-comment"># translate Arabic to English</span> tokenizer.src_lang = <span class="hljs-string">&quot;ar_AR&quot;</span> encoded_ar = tokenizer(article_ar, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generated_tokens = model.generate(**encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id[<span class="hljs-string">&quot;en_XX&quot;</span>]) tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-comment"># =&gt; &quot;The Secretary-General of the United Nations says there is no military solution in Syria.&quot;</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="transformers.MBartConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/configuration_mbart.py#L35" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 50265</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_function<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d_model<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_std<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_embedding<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartModel">MBartModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.TFMBartModel">TFMBartModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.d_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.d_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.encoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.encoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.decoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.decoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.encoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.encoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.decoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.decoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.decoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.decoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.encoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.encoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.activation_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.activation_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.activation_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.activation_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.classifier_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.classifier_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.init_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.init_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.scale_embedding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.scale_embedding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartConfig.forced_eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartConfig.forced_eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartModel">MBartModel</a>. It is used to instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MBART <a href="https://huggingface.co/facebook/mbart-large-cc25" rel="nofollow">facebook/mbart-large-cc25</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartModel, MBartConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a MBART facebook/mbart-large-cc25 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MBartConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/mbart-large-cc25 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.MBartTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sp_model_kwargs<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">additional_special_tokens<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct an MBART tokenizer.</p> <p>Adapted from <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a> and <a href="/docs/transformers/pr_16143/en/model_doc/xlnet#transformers.XLNetTokenizer">XLNetTokenizer</a>. Based on <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a>.</p> <p>The tokenization method is <code>&lt;tokens&gt; &lt;eos&gt; &lt;language code&gt;</code> for source language documents, and “&lt;language code&gt; &lt;tokens&gt; &lt;eos&gt;``` for target language documents.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>example_english_phrase = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>expected_translation_romanian = <span class="hljs-string">&quot;Şeful ONU declară că nu există o soluţie militară în Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(example_english_phrase, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(expected_translation_romanian, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>]<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizer.as_target_tokenizer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>as_target_tokenizer</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizer.as_target_tokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizer.as_target_tokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart.py#L339" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart.py#L218" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where <code>X</code> represents the sequence:</p> <ul><li><code>input_ids</code> (for encoder) <code>X [eos, src_lang_code]</code></li> <li><code>decoder_input_ids</code>: (for decoder) <code>X [eos, tgt_lang_code]</code></li></ul> <p>BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.</p></div></div> <h2 class="relative group"><a id="transformers.MBartTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart_fast.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">additional_special_tokens<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a “fast” MBART tokenizer (backed by HuggingFace’s <em>tokenizers</em> library). Based on <a href="https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models" rel="nofollow">BPE</a>.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizerFast">MBartTokenizerFast</a> is a subclass of <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta#transformers.XLMRobertaTokenizerFast">XLMRobertaTokenizerFast</a>. Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta#transformers.XLMRobertaTokenizerFast">XLMRobertaTokenizerFast</a> for usage examples and documentation concerning the initialization parameters and other methods.</p> <p>The tokenization method is <code>&lt;tokens&gt; &lt;eos&gt; &lt;language code&gt;</code> for source language documents, and “&lt;language code&gt; &lt;tokens&gt; &lt;eos&gt;``` for target language documents.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizerFast.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>example_english_phrase = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>expected_translation_romanian = <span class="hljs-string">&quot;Şeful ONU declară că nu există o soluţie militară în Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(example_english_phrase, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(expected_translation_romanian, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>]<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart_fast.py#L163" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartTokenizerFast.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizerFast.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartTokenizerFast.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizerFast.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartTokenizerFast.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang.</p> <p>An MBART sequence has the following format, where <code>X</code> represents the sequence:</p> <ul><li><code>input_ids</code> (for encoder) <code>X [eos, src_lang_code]</code></li> <li><code>decoder_input_ids</code>: (for decoder) <code>X [eos, tgt_lang_code]</code></li></ul> <p>BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizerFast.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizerFast.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizerFast.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart_fast.py#L192" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartTokenizerFast.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizerFast.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartTokenizerFast.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartTokenizerFast.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartTokenizerFast.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of zeros.</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not make use of token type ids, therefore a list of zeros is returned.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizerFast.set_src_lang_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_src_lang_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizerFast.set_src_lang_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizerFast.set_src_lang_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart_fast.py#L251" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_lang<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartTokenizerFast.set_tgt_lang_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_tgt_lang_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartTokenizerFast.set_tgt_lang_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartTokenizerFast.set_tgt_lang_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/tokenization_mbart_fast.py#L266" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lang<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].</p></div></div> <h2 class="relative group"><a id="transformers.MBart50Tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBart50Tokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50Tokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBart50Tokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBart50Tokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50Tokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50.py#L48" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sp_model_kwargs<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.src_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.src_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the source language.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.tgt_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.tgt_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.sp_model_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.sp_model_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a MBart50 tokenizer. Based on <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a>.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBart50Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBart50Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_text = <span class="hljs-string">&quot;Şeful ONU declară că nu există o soluţie militară în Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_inputs = tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(tgt_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model(**model_inputs, labels=labels) should work</span><!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50Tokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50Tokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50Tokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50.py#L287" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBart50Tokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART-50 sequence has the following format, where <code>X</code> represents the sequence:</p> <ul><li><code>input_ids</code> (for encoder) <code>[src_lang_code] X [eos]</code></li> <li><code>labels</code>: (for decoder) <code>[tgt_lang_code] X [eos]</code></li></ul> <p>BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50Tokenizer.convert_tokens_to_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50Tokenizer.convert_tokens_to_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50Tokenizer.convert_tokens_to_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50.py#L236" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60">: typing.List[str]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Converts a sequence of tokens (strings for sub-words) in a single string.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50Tokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50Tokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50Tokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50.py#L257" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50Tokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50Tokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBart50Tokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50Tokenizer.set_src_lang_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_src_lang_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50Tokenizer.set_src_lang_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50Tokenizer.set_src_lang_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50.py#L348" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_lang<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos].</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50Tokenizer.set_tgt_lang_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_tgt_lang_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50Tokenizer.set_tgt_lang_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50Tokenizer.set_tgt_lang_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50.py#L354" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_lang<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos].</p></div></div> <h2 class="relative group"><a id="transformers.MBart50TokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBart50TokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50TokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBart50TokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBart50TokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50TokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L57" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_lang<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.src_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.src_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the source language.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.tgt_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.tgt_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a “fast” MBART tokenizer for mBART-50 (backed by HuggingFace’s <em>tokenizers</em> library). Based on <a href="https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models" rel="nofollow">BPE</a>.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBart50TokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBart50TokenizerFast.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50&quot;</span>, src_lang=<span class="hljs-string">&quot;en_XX&quot;</span>, tgt_lang=<span class="hljs-string">&quot;ro_RO&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = <span class="hljs-string">&quot; UN Chief Says There Is No Military Solution in Syria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_text = <span class="hljs-string">&quot;Şeful ONU declară că nu există o soluţie militară în Siria&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_inputs = tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(tgt_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model(**model_inputs, labels=labels) should work</span><!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50TokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50TokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50TokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L170" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBart50TokenizerFast.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBart50TokenizerFast.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBart50TokenizerFast.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang.</p> <p>An MBART-50 sequence has the following format, where <code>X</code> represents the sequence:</p> <ul><li><code>input_ids</code> (for encoder) <code>[src_lang_code] X [eos]</code></li> <li><code>labels</code>: (for decoder) <code>[tgt_lang_code] X [eos]</code></li></ul> <p>BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50TokenizerFast.set_src_lang_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_src_lang_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50TokenizerFast.set_src_lang_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50TokenizerFast.set_src_lang_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L221" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_lang<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos].</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBart50TokenizerFast.set_tgt_lang_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_tgt_lang_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBart50TokenizerFast.set_tgt_lang_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBart50TokenizerFast.set_tgt_lang_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart50/tokenization_mbart50_fast.py#L236" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_lang<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the special tokens to the target language setting. prefix=[src_lang_code] and suffix=[eos].</p></div></div> <h2 class="relative group"><a id="transformers.MBartModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1134" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MBartConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare MBART Model outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1161" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartModel">MBartModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartModel.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MBartForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1251" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MBartConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The MBART Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForConditionalGeneration.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartForConditionalGeneration.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForConditionalGeneration.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1295" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForConditionalGeneration.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForConditionalGeneration.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartForConditionalGeneration.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartForConditionalGeneration">MBartForConditionalGeneration</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Translation example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-en-ro&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>example_english_phrase = <span class="hljs-string">&quot;42 is the answer&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(example_english_phrase, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Translate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], num_beams=<span class="hljs-number">4</span>, max_length=<span class="hljs-number">5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>, clean_up_tokenization_spaces=<span class="hljs-literal">False</span>)[<span class="hljs-number">0</span>] <span class="hljs-string">&#x27;42 este răspuns&#x27;</span><!-- HTML_TAG_END --></pre></div> <p>Mask filling example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># de_DE is the language symbol id &lt;LID&gt; for German</span> <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&quot;&lt;/s&gt; Meine Freunde sind &lt;mask&gt; nett aber sie essen zu viel Kuchen. &lt;/s&gt; de_DE&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer([TXT], add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_ids).logits <span class="hljs-meta">&gt;&gt;&gt; </span>masked_index = (input_ids[<span class="hljs-number">0</span>] == tokenizer.mask_token_id).nonzero().item() <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits[<span class="hljs-number">0</span>, masked_index].softmax(dim=<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>values, predictions = probs.topk(<span class="hljs-number">5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.decode(predictions).split() [<span class="hljs-string">&#x27;nett&#x27;</span>, <span class="hljs-string">&#x27;sehr&#x27;</span>, <span class="hljs-string">&#x27;ganz&#x27;</span>, <span class="hljs-string">&#x27;nicht&#x27;</span>, <span class="hljs-string">&#x27;so&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MBartForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1551" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1563" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" >transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartForQuestionAnswering">MBartForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">3.04</span> <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">16</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">16</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MBartForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1424" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MBartConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1437" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.decoder_inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.decoder_inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" >transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartForSequenceClassification">MBartForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]<!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MBartForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBartForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MBartForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MBartForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1684" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MBartForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MBartForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MBartForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_mbart.py#L1715" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MBartForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MBartForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MBartForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, MBartForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForCausalLM.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>expected_shape = [<span class="hljs-number">1</span>, inputs.input_ids.shape[-<span class="hljs-number">1</span>], model.config.vocab_size] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) == expected_shape <span class="hljs-literal">True</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMBartModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMBartModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMBartModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMBartModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMBartModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMBartModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_tf_mbart.py#L1138" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare MBART Model outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(input_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMBartModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMBartModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMBartModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_tf_mbart.py#L1150" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60">: typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMBartModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.TFMBartModel">TFMBartModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, TFMBartModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMBartModel.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMBartForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMBartForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMBartForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMBartForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMBartForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMBartForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_tf_mbart.py#L1225" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The MBART Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(input_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMBartForConditionalGeneration.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMBartForConditionalGeneration.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMBartForConditionalGeneration.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_tf_mbart.py#L1258" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60">: typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>MBart uses a specific language id token as the starting token for <code>decoder_input_ids</code> generation that varies according to source and target language, <em>e.g.</em> 25004 for <em>en_XX</em>, and 25003 for <em>de_DE</em>. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMBartForConditionalGeneration.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMBartForConditionalGeneration.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMBartForConditionalGeneration.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.TFMBartForConditionalGeneration">TFMBartForConditionalGeneration</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Summarization example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, TFMBartForConditionalGeneration, MBartConfig <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE_TO_SUMMARIZE = <span class="hljs-string">&quot;Meine Freunde sind cool, aber sie essen zu viel Kuchen.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Generate Summary</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary_ids = model.generate(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], num_beams=<span class="hljs-number">4</span>, max_length=<span class="hljs-number">5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.batch_decode(summary_ids, skip_special_tokens=<span class="hljs-literal">True</span>, clean_up_tokenization_spaces=<span class="hljs-literal">False</span>))<!-- HTML_TAG_END --></pre></div> <p>Mask filling example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, TFMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = MBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># de_DE is the language symbol id &lt;LID&gt; for German</span> <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&quot;&lt;/s&gt; Meine Freunde sind &lt;mask&gt; nett aber sie essen zu viel Kuchen. &lt;/s&gt; de_DE&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer([TXT], add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_ids).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probs = tf.nn.softmax(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># probs[5] is associated with the mask token</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxMBartModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMBartModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxMBartModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MBartConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartModel.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartModel.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxMBartPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartModel.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1013" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1076" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxMBartForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMBartForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxMBartForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1339" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MBartConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The MMBart Model with a language modeling head. Can be used for summarization. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxMBartPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Summarization example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration, MBartConfig <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE_TO_SUMMARIZE = <span class="hljs-string">&quot;Meine Freunde sind cool, aber sie essen zu viel Kuchen.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Generate Summary</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary_ids = model.generate(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], num_beams=<span class="hljs-number">4</span>, max_length=<span class="hljs-number">5</span>).sequences <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.batch_decode(summary_ids, skip_special_tokens=<span class="hljs-literal">True</span>, clean_up_tokenization_spaces=<span class="hljs-literal">False</span>))<!-- HTML_TAG_END --></pre></div> <p>Mask filling example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># de_DE is the language symbol id &lt;LID&gt; for German</span> <span class="hljs-meta">&gt;&gt;&gt; </span>TXT = <span class="hljs-string">&quot;&lt;/s&gt; Meine Freunde sind &lt;mask&gt; nett aber sie essen zu viel Kuchen. &lt;/s&gt; de_DE&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer([TXT], add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_ids).logits <span class="hljs-meta">&gt;&gt;&gt; </span>masked_index = (input_ids[<span class="hljs-number">0</span>] == tokenizer.mask_token_id).nonzero()[<span class="hljs-number">0</span>].item() <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits[<span class="hljs-number">0</span>, masked_index].softmax(dim=<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>values, predictions = probs.topk(<span class="hljs-number">5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.decode(predictions).split()<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1013" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartForConditionalGeneration.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartForConditionalGeneration.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartForConditionalGeneration.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1343" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForConditionalGeneration.decode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForConditionalGeneration.decode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartForConditionalGeneration.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxMBartForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMBartForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxMBartForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1652" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MBartConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForSequenceClassification.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForSequenceClassification.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxMBartPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1013" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1076" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxMBartForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMBartForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxMBartForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1740" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MBartConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int] = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig">MBartConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartForQuestionAnswering.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartForQuestionAnswering.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MBart Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html" rel="nofollow">flax.nn.Module</a> subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartConfig" >MBartConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxMBartPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1013" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.encode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.encode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMBartPreTrainedModel.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMBartPreTrainedModel.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMBartPreTrainedModel.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mbart/modeling_flax_mbart.py#L1076" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_position_ids<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartTokenizer">MBartTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.decoder_position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMBartPreTrainedModel.decode.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMBartPreTrainedModel.decode.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxMBartPreTrainedModel.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.mbart.configuration_mbart.MBartConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MBartTokenizer, FlaxMBartForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMBartForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MBartTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-cc25&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1h4ke6b"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1h4ke6b"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/mbart.mdx-4e986a75.js") ], params: {} } }); </script>
208
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/rembert.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;rembert&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.RemBertConfig&quot;,&quot;title&quot;:&quot;RemBertConfig&quot;},{&quot;local&quot;:&quot;transformers.RemBertTokenizer&quot;,&quot;title&quot;:&quot;RemBertTokenizer&quot;},{&quot;local&quot;:&quot;transformers.RemBertTokenizerFast&quot;,&quot;title&quot;:&quot;RemBertTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.RemBertModel&quot;,&quot;title&quot;:&quot;RemBertModel&quot;},{&quot;local&quot;:&quot;transformers.RemBertForCausalLM&quot;,&quot;title&quot;:&quot;RemBertForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.RemBertForMaskedLM&quot;,&quot;title&quot;:&quot;RemBertForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.RemBertForSequenceClassification&quot;,&quot;title&quot;:&quot;RemBertForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.RemBertForMultipleChoice&quot;,&quot;title&quot;:&quot;RemBertForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.RemBertForTokenClassification&quot;,&quot;title&quot;:&quot;RemBertForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.RemBertForQuestionAnswering&quot;,&quot;title&quot;:&quot;RemBertForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFRemBertModel&quot;,&quot;title&quot;:&quot;TFRemBertModel&quot;},{&quot;local&quot;:&quot;transformers.TFRemBertForMaskedLM&quot;,&quot;title&quot;:&quot;TFRemBertForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.TFRemBertForCausalLM&quot;,&quot;title&quot;:&quot;TFRemBertForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.TFRemBertForSequenceClassification&quot;,&quot;title&quot;:&quot;TFRemBertForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.TFRemBertForMultipleChoice&quot;,&quot;title&quot;:&quot;TFRemBertForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.TFRemBertForTokenClassification&quot;,&quot;title&quot;:&quot;TFRemBertForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.TFRemBertForQuestionAnswering&quot;,&quot;title&quot;:&quot;TFRemBertForQuestionAnswering&quot;}],&quot;title&quot;:&quot;RemBERT&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/rembert.mdx-8524b264.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="rembert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#rembert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBERT </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The RemBERT model was proposed in <a href="https://arxiv.org/abs/2010.12821" rel="nofollow">Rethinking Embedding Coupling in Pre-trained Language Models</a> by Hyung Won Chung, Thibault Févry, Henry Tsai, Melvin Johnson, Sebastian Ruder.</p> <p>The abstract from the paper is the following:</p> <p><em>We re-evaluate the standard practice of sharing weights between input and output embeddings in state-of-the-art pre-trained language models. We show that decoupled embeddings provide increased modeling flexibility, allowing us to significantly improve the efficiency of parameter allocation in the input embedding of multilingual models. By reallocating the input embedding parameters in the Transformer layers, we achieve dramatically better performance on standard natural language understanding tasks with the same number of parameters during fine-tuning. We also show that allocating additional capacity to the output embedding provides benefits to the model that persist through the fine-tuning stage even though the output embedding is discarded after pre-training. Our analysis shows that larger output embeddings prevent the model’s last layers from overspecializing to the pre-training task and encourage Transformer representations to be more general and more transferable to other tasks and languages. Harnessing these findings, we are able to train models that achieve strong performance on the XTREME benchmark without increasing the number of parameters at the fine-tuning stage.</em></p> <p>Tips:</p> <p>For fine-tuning, RemBERT can be thought of as a bigger version of mBERT with an ALBERT-like factorization of the embedding layer. The embeddings are not tied in pre-training, in contrast with BERT, which enables smaller input embeddings (preserved during fine-tuning) and bigger output embeddings (discarded at fine-tuning). The tokenizer is also similar to the Albert one rather than the BERT one.</p> <h2 class="relative group"><a id="transformers.RemBertConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/configuration_rembert.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 250300</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 1152</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 18</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_embedding_size<span class="opacity-60"> = 256</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_embedding_size<span class="opacity-60"> = 1664</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 4608</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 312</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 313</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 250300) &#x2014; Vocabulary size of the RemBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertModel">TFRemBertModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1152) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 18) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.input_embedding_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.input_embedding_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the input embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.output_embedding_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.output_embedding_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1664) &#x2014; Dimensionality of the output embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 4608) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.classifier_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.classifier_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the classifier layer when fine-tuning.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertModel">TFRemBertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a>. It is used to instantiate an RemBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the remert-large architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><!-- HTML_TAG_END --></pre></div> <blockquote><blockquote><blockquote><p>from transformers import RemBertModel, RemBertConfig &gt;&gt;&gt; # Initializing a RemBERT rembert style configuration &gt;&gt;&gt; configuration = RemBertConfig()</p></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><h1 class="relative group"><a id="initializing-a-model-from-the-rembert-style-configuration->>>-model-=-rembertmodel(configuration)" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#initializing-a-model-from-the-rembert-style-configuration->>>-model-=-rembertmodel(configuration)"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Initializing a model from the rembert style configuration &gt;&gt;&gt; model = RemBertModel(configuration) </span></h1></blockquote></blockquote></blockquote> <blockquote><blockquote><blockquote><h1 class="relative group"><a id="accessing-the-model-configuration->>>-configuration-=-model.config" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#accessing-the-model-configuration->>>-configuration-=-model.config"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Accessing the model configuration &gt;&gt;&gt; configuration = model.config </span></h1></blockquote></blockquote></blockquote></div> <h2 class="relative group"><a id="transformers.RemBertTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert.py#L43" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_space<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">keep_accents<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.sp_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.sp_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sp_model</strong> (<code>SentencePieceProcessor</code>) &#x2014; The <em>SentencePiece</em> processor that is used for every conversion (string, tokens and IDs).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a RemBERT tokenizer. Based on <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a>.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert.py#L172" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REMBERT sequence has the following format:</p> <ul><li>single sequence: <code>[CLS] X [SEP]</code></li> <li>pair of sequences: <code>[CLS] A [SEP] B [SEP]</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert.py#L197" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert.py#L228" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT sequence pair mask has the following format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |<!-- HTML_TAG_END --></pre></div> <p>If <code>token_ids_1</code> is <code>None</code>, this method only returns the first portion of the mask (0s).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert.py#L258" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.RemBertTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert_fast.py#L52" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_space<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">keep_accents<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.remove_space" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.remove_space"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.keep_accents" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.keep_accents"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to keep accents when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a “fast” RemBert tokenizer (backed by HuggingFace’s <em>tokenizers</em> library). Based on <a href="https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models" rel="nofollow">Unigram</a>. This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert_fast.py#L144" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertTokenizerFast.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RemBERT sequence has the following format:</p> <ul><li>single sequence: <code>[CLS] X [SEP]</code></li> <li>pair of sequences: <code>[CLS] A [SEP] B [SEP]</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizerFast.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizerFast.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizerFast.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert_fast.py#L169" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Set to True if the token list is already formatted with special tokens for the model<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertTokenizerFast.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert_fast.py#L200" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertTokenizerFast.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT sequence pair mask has the following format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |<!-- HTML_TAG_END --></pre></div> <p>if token_ids_1 is None, only returns the first portion of the mask (0s).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertTokenizerFast.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertTokenizerFast.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertTokenizerFast.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/tokenization_rembert_fast.py#L230" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.RemBertModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L743" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_pooling_layer<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RemBERT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p>The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in <a href="https://arxiv.org/abs/1706.03762" rel="nofollow">Attention is all you need</a> by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.</p> <p>To behave as an decoder the model needs to be initialized with the <code>is_decoder</code> argument of the configuration set to <code>True</code>. To be used in a Seq2Seq model, the model needs to initialized with both <code>is_decoder</code> argument and <code>add_cross_attention</code> set to <code>True</code>; an <code>encoder_hidden_states</code> is then expected as an input to the forward pass.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L782" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertModel">RemBertModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertModel.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RemBertForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1016" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a <code>language modeling</code> head on top for CLM fine-tuning. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1038" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertForCausalLM">RemBertForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForCausalLM, RemBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;google/rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RemBertConfig.from_pretrained(<span class="hljs-string">&quot;google/rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForCausalLM.from_pretrained(<span class="hljs-string">&quot;google/rembert&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RemBertForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L913" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a <code>language modeling</code> head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L935" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertForMaskedLM">RemBertForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForMaskedLM.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RemBertForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1169" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1180" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertForSequenceClassification">RemBertForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RemBertForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1266" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForMultipleChoice.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertForMultipleChoice.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForMultipleChoice.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1277" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForMultipleChoice.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForMultipleChoice.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertForMultipleChoice.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertForMultipleChoice">RemBertForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RemBertForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1358" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1370" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertForTokenClassification">RemBertForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForTokenClassification.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RemBertForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RemBertForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RemBertForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RemBertForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1437" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RemBertForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RemBertForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RemBertForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_rembert.py#L1449" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertTokenizer">RemBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RemBertForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RemBertForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RemBertForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertForQuestionAnswering">RemBertForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, RemBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RemBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRemBertModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRemBertModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRemBertModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRemBertModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L932" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RemBERT Model transformer outputing raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRemBertModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L938" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertModel.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertModel.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRemBertModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you’re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertModel">TFRemBertModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertModel.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRemBertForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRemBertForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRemBertForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1026" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForMaskedLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForMaskedLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForMaskedLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1042" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMaskedLM.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMaskedLM.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRemBertForMaskedLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertForMaskedLM">TFRemBertForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForMaskedLM.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRemBertForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRemBertForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRemBertForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a <code>language modeling</code> head on top for CLM fine-tuning.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForCausalLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForCausalLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForCausalLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1134" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRemBertForCausalLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>encoder_hidden_states (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul><li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li></ul> <p>past_key_values (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don’t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. use_cache (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation labels (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>): Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForCausalLM.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRemBertForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRemBertForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRemBertForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1251" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForSequenceClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForSequenceClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForSequenceClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1265" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForSequenceClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForSequenceClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRemBertForSequenceClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertForSequenceClassification">TFRemBertForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRemBertForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRemBertForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRemBertForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1336" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForMultipleChoice.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForMultipleChoice.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForMultipleChoice.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1356" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForMultipleChoice.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForMultipleChoice.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRemBertForMultipleChoice.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertForMultipleChoice">TFRemBertForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRemBertForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRemBertForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRemBertForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1464" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForTokenClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForTokenClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForTokenClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1476" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForTokenClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForTokenClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRemBertForTokenClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertForTokenClassification">TFRemBertForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForTokenClassification.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape( <span class="hljs-meta">... </span> tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids)) <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRemBertForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRemBertForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRemBertForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1545" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig">RemBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRemBertForQuestionAnswering.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRemBertForQuestionAnswering.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRemBertForQuestionAnswering.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rembert/modeling_tf_rembert.py#L1556" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRemBertForQuestionAnswering.call.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRemBertForQuestionAnswering.call.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRemBertForQuestionAnswering.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.RemBertConfig" >RemBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/rembert#transformers.TFRemBertForQuestionAnswering">TFRemBertForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RemBertTokenizer, TFRemBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RemBertTokenizer.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRemBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;rembert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&quot; &quot;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span>])<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="13iifyq"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="13iifyq"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/rembert.mdx-8524b264.js") ], params: {} } }); </script>
209
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/mpnet.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;mpnet&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.MPNetConfig&quot;,&quot;title&quot;:&quot;MPNetConfig&quot;},{&quot;local&quot;:&quot;transformers.MPNetTokenizer&quot;,&quot;title&quot;:&quot;MPNetTokenizer&quot;},{&quot;local&quot;:&quot;transformers.MPNetTokenizerFast&quot;,&quot;title&quot;:&quot;MPNetTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.MPNetModel&quot;,&quot;title&quot;:&quot;MPNetModel&quot;},{&quot;local&quot;:&quot;transformers.MPNetForMaskedLM&quot;,&quot;title&quot;:&quot;MPNetForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.MPNetForSequenceClassification&quot;,&quot;title&quot;:&quot;MPNetForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.MPNetForMultipleChoice&quot;,&quot;title&quot;:&quot;MPNetForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.MPNetForTokenClassification&quot;,&quot;title&quot;:&quot;MPNetForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.MPNetForQuestionAnswering&quot;,&quot;title&quot;:&quot;MPNetForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFMPNetModel&quot;,&quot;title&quot;:&quot;TFMPNetModel&quot;},{&quot;local&quot;:&quot;transformers.TFMPNetForMaskedLM&quot;,&quot;title&quot;:&quot;TFMPNetForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.TFMPNetForSequenceClassification&quot;,&quot;title&quot;:&quot;TFMPNetForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.TFMPNetForMultipleChoice&quot;,&quot;title&quot;:&quot;TFMPNetForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.TFMPNetForTokenClassification&quot;,&quot;title&quot;:&quot;TFMPNetForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.TFMPNetForQuestionAnswering&quot;,&quot;title&quot;:&quot;TFMPNetForQuestionAnswering&quot;}],&quot;title&quot;:&quot;MPNet&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/mpnet.mdx-a2ac5620.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="mpnet" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mpnet"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNet </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The MPNet model was proposed in <a href="https://arxiv.org/abs/2004.09297" rel="nofollow">MPNet: Masked and Permuted Pre-training for Language Understanding</a> by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.</p> <p>MPNet adopts a novel pre-training method, named masked and permuted language modeling, to inherit the advantages of masked language modeling and permuted language modeling for natural language understanding.</p> <p>The abstract from the paper is the following:</p> <p><em>BERT adopts masked language modeling (MLM) for pre-training and is one of the most successful pre-training models. Since BERT neglects dependency among predicted tokens, XLNet introduces permuted language modeling (PLM) for pre-training to address this problem. However, XLNet does not leverage the full position information of a sentence and thus suffers from position discrepancy between pre-training and fine-tuning. In this paper, we propose MPNet, a novel pre-training method that inherits the advantages of BERT and XLNet and avoids their limitations. MPNet leverages the dependency among predicted tokens through permuted language modeling (vs. MLM in BERT), and takes auxiliary position information as input to make the model see a full sentence and thus reducing the position discrepancy (vs. PLM in XLNet). We pre-train MPNet on a large-scale dataset (over 160GB text corpora) and fine-tune on a variety of down-streaming tasks (GLUE, SQuAD, etc). Experimental results show that MPNet outperforms MLM and PLM by a large margin, and achieves better results on these tasks compared with previous state-of-the-art pre-trained methods (e.g., BERT, XLNet, RoBERTa) under the same model setting.</em></p> <p>Tips:</p> <ul><li>MPNet doesn’t have <code>token_type_ids</code>, you don’t need to indicate which token belongs to which segment. just separate your segments with the separation token <code>tokenizer.sep_token</code> (or <code>[sep]</code>).</li></ul> <p>The original code can be found <a href="https://github.com/microsoft/MPNet" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.MPNetConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/configuration_mpnet.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 30527</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">relative_attention_num_buckets<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30527) &#x2014; Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetModel">MPNetModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetModel">TFMPNetModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetConfig.relative_attention_num_buckets" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetConfig.relative_attention_num_buckets"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>relative_attention_num_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of buckets to use for each attention layer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetModel">MPNetModel</a> or a <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetModel">TFMPNetModel</a>. It is used to instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPNet <a href="https://huggingface.co/mpnet-base" rel="nofollow">mpnet-base</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetModel, MPNetConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a MPNet mpnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MPNetConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the mpnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.MPNetTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/tokenization_mpnet.py#L66" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.do_basic_tokenize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.do_basic_tokenize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.never_split" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.never_split"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.tokenize_chinese_chars" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.tokenize_chinese_chars"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> which contains most of the methods. Users should refer to the superclass for more information regarding methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/tokenization_mpnet.py#L231" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MPNet sequence has the following format:</p> <ul><li>single sequence: <code>&lt;s&gt; X &lt;/s&gt;</code></li> <li>pair of sequences: <code>&lt;s&gt; A &lt;/s&gt;&lt;/s&gt; B &lt;/s&gt;</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/tokenization_mpnet.py#L256" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Set to True if the token list is already formatted with special tokens for the model<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> methods.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/tokenization_mpnet.py#L283" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of zeros.</p> <!-- HTML_TAG_END --></p></div></div> <p>Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/tokenization_mpnet.py#L306" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.MPNetTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/tokenization_mpnet_fast.py#L51" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.tokenize_chinese_chars" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.tokenize_chinese_chars"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">this issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a “fast” MPNet tokenizer (backed by HuggingFace’s <em>tokenizers</em> library). Based on WordPiece.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/tokenization_mpnet_fast.py#L190" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetTokenizerFast.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of zeros.</p> <!-- HTML_TAG_END --></p></div></div> <p>Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not make use of token type ids, therefore a list of zeros is returned</p></div></div> <h2 class="relative group"><a id="transformers.MPNetModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L482" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_pooling_layer<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L511" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetModel">MPNetModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetModel.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MPNetForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L575" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L594" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetForMaskedLM">MPNetForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForMaskedLM.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MPNetForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L684" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L697" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetForSequenceClassification">MPNetForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MPNetForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L779" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForMultipleChoice.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetForMultipleChoice.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForMultipleChoice.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L792" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForMultipleChoice.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForMultipleChoice.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetForMultipleChoice.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetForMultipleChoice">MPNetForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MPNetForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L870" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L885" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetForTokenClassification">MPNetForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForTokenClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MPNetForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MPNetForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MPNetForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MPNetForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L970" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MPNetForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MPNetForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MPNetForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_mpnet.py#L984" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MPNetForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MPNetForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MPNetForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetForQuestionAnswering">MPNetForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, MPNetForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MPNetForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMPNetModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMPNetModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMPNetModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMPNetModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L678" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;attention_mask&quot;: attention_mask})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMPNetModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L683" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMPNetModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetModel">TFMPNetModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetModel.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMPNetForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMPNetForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMPNetForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L795" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;attention_mask&quot;: attention_mask})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForMaskedLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForMaskedLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForMaskedLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L812" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMaskedLM.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMaskedLM.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMPNetForMaskedLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetForMaskedLM">TFMPNetForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForMaskedLM.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMPNetForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMPNetForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMPNetForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L922" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;attention_mask&quot;: attention_mask})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForSequenceClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForSequenceClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForSequenceClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L933" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForSequenceClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForSequenceClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMPNetForSequenceClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetForSequenceClassification">TFMPNetForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMPNetForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMPNetForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMPNetForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1019" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;attention_mask&quot;: attention_mask})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForMultipleChoice.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForMultipleChoice.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForMultipleChoice.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1039" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForMultipleChoice.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForMultipleChoice.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMPNetForMultipleChoice.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetForMultipleChoice">TFMPNetForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMPNetForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMPNetForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMPNetForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1156" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;attention_mask&quot;: attention_mask})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForTokenClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForTokenClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForTokenClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1170" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForTokenClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForTokenClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMPNetForTokenClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetForTokenClassification">TFMPNetForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForTokenClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape( <span class="hljs-meta">... </span> tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids)) <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFMPNetForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMPNetForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMPNetForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1255" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig">MPNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;attention_mask&quot;: attention_mask})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMPNetForQuestionAnswering.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMPNetForQuestionAnswering.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMPNetForQuestionAnswering.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mpnet/modeling_tf_mpnet.py#L1268" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetTokenizer">MPNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMPNetForQuestionAnswering.call.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMPNetForQuestionAnswering.call.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFMPNetForQuestionAnswering.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.MPNetConfig" >MPNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/mpnet#transformers.TFMPNetForQuestionAnswering">TFMPNetForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MPNetTokenizer, TFMPNetForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MPNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMPNetForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;microsoft/mpnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&quot; &quot;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span>])<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="2isptf"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="2isptf"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/mpnet.mdx-a2ac5620.js") ], params: {} } }); </script>
210
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/roberta.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;roberta&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.RobertaConfig&quot;,&quot;title&quot;:&quot;RobertaConfig&quot;},{&quot;local&quot;:&quot;transformers.RobertaTokenizer&quot;,&quot;title&quot;:&quot;RobertaTokenizer&quot;},{&quot;local&quot;:&quot;transformers.RobertaTokenizerFast&quot;,&quot;title&quot;:&quot;RobertaTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.RobertaModel&quot;,&quot;title&quot;:&quot;RobertaModel&quot;},{&quot;local&quot;:&quot;transformers.RobertaForCausalLM&quot;,&quot;title&quot;:&quot;RobertaForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.RobertaForMaskedLM&quot;,&quot;title&quot;:&quot;RobertaForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.RobertaForSequenceClassification&quot;,&quot;title&quot;:&quot;RobertaForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.RobertaForMultipleChoice&quot;,&quot;title&quot;:&quot;RobertaForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.RobertaForTokenClassification&quot;,&quot;title&quot;:&quot;RobertaForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.RobertaForQuestionAnswering&quot;,&quot;title&quot;:&quot;RobertaForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFRobertaModel&quot;,&quot;title&quot;:&quot;TFRobertaModel&quot;},{&quot;local&quot;:&quot;transformers.TFRobertaForCausalLM&quot;,&quot;title&quot;:&quot;TFRobertaForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.TFRobertaForMaskedLM&quot;,&quot;title&quot;:&quot;TFRobertaForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.TFRobertaForSequenceClassification&quot;,&quot;title&quot;:&quot;TFRobertaForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.TFRobertaForMultipleChoice&quot;,&quot;title&quot;:&quot;TFRobertaForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.TFRobertaForTokenClassification&quot;,&quot;title&quot;:&quot;TFRobertaForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.TFRobertaForQuestionAnswering&quot;,&quot;title&quot;:&quot;TFRobertaForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.FlaxRobertaModel&quot;,&quot;title&quot;:&quot;FlaxRobertaModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxRobertaForMaskedLM&quot;,&quot;title&quot;:&quot;FlaxRobertaForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.FlaxRobertaForSequenceClassification&quot;,&quot;title&quot;:&quot;FlaxRobertaForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.FlaxRobertaForMultipleChoice&quot;,&quot;title&quot;:&quot;FlaxRobertaForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.FlaxRobertaForTokenClassification&quot;,&quot;title&quot;:&quot;FlaxRobertaForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.FlaxRobertaForQuestionAnswering&quot;,&quot;title&quot;:&quot;FlaxRobertaForQuestionAnswering&quot;}],&quot;title&quot;:&quot;RoBERTa&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/roberta.mdx-002140a3.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="roberta" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#roberta"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoBERTa </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The RoBERTa model was proposed in <a href="https://arxiv.org/abs/1907.11692" rel="nofollow">RoBERTa: A Robustly Optimized BERT Pretraining Approach</a> by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. It is based on Google’s BERT model released in 2018.</p> <p>It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining objective and training with much larger mini-batches and learning rates.</p> <p>The abstract from the paper is the following:</p> <p><em>Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.</em></p> <p>Tips:</p> <ul><li>This implementation is the same as <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel">BertModel</a> with a tiny embeddings tweak as well as a setup for Roberta pretrained models.</li> <li>RoBERTa has the same architecture as BERT, but uses a byte-level BPE as a tokenizer (same as GPT-2) and uses a different pretraining scheme.</li> <li>RoBERTa doesn’t have <code>token_type_ids</code>, you don’t need to indicate which token belongs to which segment. Just separate your segments with the separation token <code>tokenizer.sep_token</code> (or <code>&lt;/s&gt;</code>)</li> <li><a href="camembert">CamemBERT</a> is a wrapper around RoBERTa. Refer to this page for usage examples.</li></ul> <p>This model was contributed by <a href="https://huggingface.co/julien-c" rel="nofollow">julien-c</a>. The original code can be found <a href="https://github.com/pytorch/fairseq/tree/master/examples/roberta" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.RobertaConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/configuration_roberta.py#L37" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaModel">RobertaModel</a> or a <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaModel">TFRobertaModel</a>. It is used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a> class directly inherits <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertConfig">BertConfig</a>. It reuses the same defaults. Please check the parent class for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaConfig, RobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a RoBERTa configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = RobertaConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.RobertaTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L103" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.merges_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.merges_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.errors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.errors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.add_prefix_space" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.add_prefix_space"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (RoBERTa tokenizer detect beginning of words by the preceding space).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.</p> <p>This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 31414, 232, 328, 2] <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot; Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 20920, 232, 2]<!-- HTML_TAG_END --></pre></div> <p>You can get around that behavior by passing <code>add_prefix_space=True</code> when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>When used with <code>is_split_into_words=True</code>, this tokenizer will add a space before each word (even the first one).</p></div> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L340" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format:</p> <ul><li>single sequence: <code>&lt;s&gt; X &lt;/s&gt;</code></li> <li>pair of sequences: <code>&lt;s&gt; A &lt;/s&gt;&lt;/s&gt; B &lt;/s&gt;</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L365" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L392" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of zeros.</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L311" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.RobertaTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta_fast.py#L68" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;&lt;mask&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trim_offsets<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.merges_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.merges_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.errors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.errors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.add_prefix_space" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.add_prefix_space"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (RoBERTa tokenizer detect beginning of words by the preceding space).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaTokenizerFast.trim_offsets" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaTokenizerFast.trim_offsets"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trim_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the post processing step should trim offsets to avoid including whitespaces.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a “fast” RoBERTa tokenizer (backed by HuggingFace’s <em>tokenizers</em> library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.</p> <p>This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizerFast</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = RobertaTokenizerFast.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 31414, 232, 328, 2] <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot; Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 20920, 232, 2]<!-- HTML_TAG_END --></pre></div> <p>You can get around that behavior by passing <code>add_prefix_space=True</code> when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>When used with <code>is_split_into_words=True</code>, this tokenizer needs to be instantiated with <code>add_prefix_space=True</code>.</p></div> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaTokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaTokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaTokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta_fast.py#L270" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.RobertaModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L696" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_pooling_layer<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p>The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in <em>Attention is all you need</em>_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.</p> <p>To behave as an decoder the model needs to be initialized with the <code>is_decoder</code> argument of the configuration set to <code>True</code>. To be used in a Seq2Seq model, the model needs to initialized with both <code>is_decoder</code> argument and <code>add_cross_attention</code> set to <code>True</code>; an <code>encoder_hidden_states</code> is then expected as an input to the forward pass.</p> <p>.. _<em>Attention is all you need</em>: <a href="https://arxiv.org/abs/1706.03762" rel="nofollow">https://arxiv.org/abs/1706.03762</a></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L741" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaModel">RobertaModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaModel.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RobertaForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L882" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model with a <code>language modeling</code> head on top for CLM fine-tuning.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L908" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Tuple[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForCausalLM">RobertaForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForCausalLM, RobertaConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RobertaConfig.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForCausalLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RobertaForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1034" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1063" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMaskedLM.forward.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMaskedLM.forward.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForMaskedLM">RobertaForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RobertaForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1163" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1177" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForSequenceClassification">RobertaForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RobertaForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1261" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForMultipleChoice.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaForMultipleChoice.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForMultipleChoice.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1274" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForMultipleChoice.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForMultipleChoice.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaForMultipleChoice.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForMultipleChoice">RobertaForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RobertaForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1354" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1372" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForTokenClassification">RobertaForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RobertaForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RobertaForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RobertaForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RobertaForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1461" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RobertaForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RobertaForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RobertaForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1475" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RobertaForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RobertaForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RobertaForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForQuestionAnswering">RobertaForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRobertaModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRobertaModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRobertaModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRobertaModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L910" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRobertaModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L915" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaModel.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaModel.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRobertaModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you’re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaModel">TFRobertaModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaModel.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRobertaForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRobertaForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRobertaForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1138" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForCausalLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForCausalLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForCausalLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1171" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForCausalLM.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForCausalLM.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRobertaForCausalLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForCausalLM">TFRobertaForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForCausalLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRobertaForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRobertaForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRobertaForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1055" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForMaskedLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForMaskedLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForMaskedLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1072" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMaskedLM.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMaskedLM.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRobertaForMaskedLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForMaskedLM">TFRobertaForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRobertaForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRobertaForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRobertaForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1319" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForSequenceClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForSequenceClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForSequenceClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1330" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForSequenceClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForSequenceClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRobertaForSequenceClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForSequenceClassification">TFRobertaForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRobertaForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRobertaForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRobertaForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1402" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForMultipleChoice.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForMultipleChoice.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForMultipleChoice.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1426" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForMultipleChoice.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForMultipleChoice.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRobertaForMultipleChoice.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForMultipleChoice">TFRobertaForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRobertaForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRobertaForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRobertaForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1524" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForTokenClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForTokenClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForTokenClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1542" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForTokenClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForTokenClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRobertaForTokenClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForTokenClassification">TFRobertaForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape( <span class="hljs-meta">... </span> tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids)) <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRobertaForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRobertaForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRobertaForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1614" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRobertaForQuestionAnswering.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRobertaForQuestionAnswering.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRobertaForQuestionAnswering.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1627" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRobertaForQuestionAnswering.call.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRobertaForQuestionAnswering.call.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRobertaForQuestionAnswering.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForQuestionAnswering">TFRobertaForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&quot; &quot;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span>])<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRobertaModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRobertaModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRobertaModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L727" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RobertaConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRobertaPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRobertaPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaModel.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRobertaForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRobertaForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRobertaForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L789" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RobertaConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoBERTa Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRobertaPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRobertaPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRobertaForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRobertaForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRobertaForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L856" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RobertaConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRobertaPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRobertaPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRobertaForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRobertaForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRobertaForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L933" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RobertaConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRobertaPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRobertaPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;jax&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v[<span class="hljs-literal">None</span>, :] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRobertaForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRobertaForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRobertaForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L1010" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RobertaConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRobertaPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRobertaPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRobertaForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRobertaForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRobertaForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L1082" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RobertaConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRobertaPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRobertaPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRobertaPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRobertaPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRobertaPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRobertaPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRobertaPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1fm9nci"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1fm9nci"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/roberta.mdx-002140a3.js") ], params: {} } }); </script>
211
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/maskformer.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;maskformer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput&quot;,&quot;title&quot;:&quot;MaskFormer specific outputs&quot;},{&quot;local&quot;:&quot;transformers.MaskFormerConfig&quot;,&quot;title&quot;:&quot;MaskFormerConfig&quot;},{&quot;local&quot;:&quot;transformers.MaskFormerFeatureExtractor&quot;,&quot;title&quot;:&quot;MaskFormerFeatureExtractor&quot;},{&quot;local&quot;:&quot;transformers.MaskFormerModel&quot;,&quot;title&quot;:&quot;MaskFormerModel&quot;},{&quot;local&quot;:&quot;transformers.MaskFormerForInstanceSegmentation&quot;,&quot;title&quot;:&quot;MaskFormerForInstanceSegmentation&quot;}],&quot;title&quot;:&quot;MaskFormer&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/maskformer.mdx-07585051.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="maskformer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#maskformer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskFormer </span></h1> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This is a recently introduced model so the API hasn’t been tested extensively. There may be some bugs or slight breaking changes to fix it in the future. If you see something strange, file a <a href="https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title" rel="nofollow">Github Issue</a>.</p></div> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The MaskFormer model was proposed in <a href="https://arxiv.org/abs/2107.06278" rel="nofollow">Per-Pixel Classification is Not All You Need for Semantic Segmentation</a> by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. MaskFormer addresses semantic segmentation with a mask classification paradigm instead of performing classic pixel-level classification.</p> <p>The abstract from the paper is the following:</p> <p><em>Modern approaches typically formulate semantic segmentation as a per-pixel classification task, while instance-level segmentation is handled with an alternative mask classification. Our key insight: mask classification is sufficiently general to solve both semantic- and instance-level segmentation tasks in a unified manner using the exact same model, loss, and training procedure. Following this observation, we propose MaskFormer, a simple mask classification model which predicts a set of binary masks, each associated with a single global class label prediction. Overall, the proposed mask classification-based method simplifies the landscape of effective approaches to semantic and panoptic segmentation tasks and shows excellent empirical results. In particular, we observe that MaskFormer outperforms per-pixel classification baselines when the number of classes is large. Our mask classification-based method outperforms both current state-of-the-art semantic (55.6 mIoU on ADE20K) and panoptic segmentation (52.7 PQ on COCO) models.</em></p> <p>Tips:</p> <ul><li>MaskFormer’s Transformer decoder is identical to the decoder of <a href="detr">DETR</a>. During training, the authors of DETR did find it helpful to use auxiliary losses in the decoder, especially to help the model output the correct number of objects of each class. If you set the parameter <code>use_auxilary_loss</code> of <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerConfig">MaskFormerConfig</a> to <code>True</code>, then prediction feedforward neural networks and Hungarian losses are added after each decoder layer (with the FFNs sharing parameters).</li> <li>If you want to train the model in a distributed environment across multiple nodes, then one should update the <code>get_num_masks</code> function inside in the <code>MaskFormerLoss</code> class of <code>modeling_maskformer.py</code>. When training on multiple nodes, this should be set to the average number of target masks across all nodes, as can be seen in the original implementation <a href="https://github.com/facebookresearch/MaskFormer/blob/da3e60d85fdeedcb31476b5edd7d328826ce56cc/mask_former/modeling/criterion.py#L169" rel="nofollow">here</a>.</li> <li>One can use <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerFeatureExtractor">MaskFormerFeatureExtractor</a> to prepare images for the model and optional targets for the model.</li> <li>To get the final segmentation, depending on the task, you can call <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerFeatureExtractor.post_process_semantic_segmentation">post_process_semantic_segmentation()</a> or <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation">post_process_panoptic_segmentation()</a>. Both tasks can be solved using <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerForInstanceSegmentation">MaskFormerForInstanceSegmentation</a> output, the latter needs an additional <code>is_thing_map</code> to know which instances must be merged together..</li></ul> <p>The figure below illustrates the architecture of MaskFormer. Taken from the <a href="https://arxiv.org/abs/2107.06278" rel="nofollow">original paper</a>.</p> <img width="600" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/maskformer_architecture.png"> <p>This model was contributed by <a href="https://huggingface.co/francesco" rel="nofollow">francesco</a>. The original code can be found <a href="https://github.com/facebookresearch/MaskFormer" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskFormer specific outputs </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.maskformer.modeling_maskformer.</span><span class="font-semibold">MaskFormerModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/modeling_maskformer.py#L214" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_decoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">transformer_decoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">transformer_decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Last hidden states (final feature map) of the last stage of the encoder model (backbone).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.pixel_decoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.pixel_decoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.transformer_decoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.transformer_decoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>transformer_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Last hidden states (final feature map) of the last stage of the transformer decoder model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the encoder model at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.pixel_decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.pixel_decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.transformer_decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.transformer_decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>transformer_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states (also called feature maps) of the transformer decoder at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> <code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> containing <code>encoder_hidden_states</code>, <code>pixel_decoder_hidden_states</code> and <code>decoder_hidden_states</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights from Detr&#x2019;s decoder after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Class for outputs of <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerModel">MaskFormerModel</a>. This class returns all the needed hidden states to compute the logits.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.maskformer.modeling_maskformer.</span><span class="font-semibold">MaskFormerForInstanceSegmentationOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/modeling_maskformer.py#L257" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">class_queries_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">masks_queries_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auxiliary_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_decoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">transformer_decoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">transformer_decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014; The computed loss, returned when labels are present.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.class_queries_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.class_queries_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>class_queries_logits</strong> (<code>torch.FloatTensor</code>) &#x2014; A tensor of shape <code>(batch_size, num_queries, height, width)</code> representing the proposed masks for each query.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.masks_queries_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.masks_queries_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>masks_queries_logits</strong> (<code>torch.FloatTensor</code>) &#x2014; A tensor of shape <code>(batch_size, num_queries, num_classes + 1)</code> representing the proposed classes for each query. Note the <code>+ 1</code> is needed because we incorporate the null class.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Last hidden states (final feature map) of the last stage of the encoder model (backbone).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.pixel_decoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.pixel_decoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.transformer_decoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.transformer_decoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>transformer_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Last hidden states (final feature map) of the last stage of the transformer decoder model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the encoder model at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.pixel_decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.pixel_decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.transformer_decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.transformer_decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>transformer_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the transformer decoder at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> <code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> containing <code>encoder_hidden_states</code>, <code>pixel_decoder_hidden_states</code> and <code>decoder_hidden_states</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights from Detr&#x2019;s decoder after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Class for outputs of <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerForInstanceSegmentation">MaskFormerForInstanceSegmentation</a>.</p> <p>This output can be directly passed to <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerFeatureExtractor.post_process_segmentation">post_process_segmentation()</a> or <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation">post_process_panoptic_segmentation()</a> depending on the task. Please, see [`~MaskFormerFeatureExtractor] for details regarding usage.</p></div> <h2 class="relative group"><a id="transformers.MaskFormerConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskFormerConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaskFormerConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaskFormerConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/configuration_maskformer.py#L34" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fpn_feature_size<span class="opacity-60">: int = 256</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_feature_size<span class="opacity-60">: int = 256</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_object_weight<span class="opacity-60">: float = 0.1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auxiliary_loss<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">backbone_config<span class="opacity-60">: typing.Optional[typing.Dict] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_config<span class="opacity-60">: typing.Optional[typing.Dict] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_std<span class="opacity-60">: float = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_xavier_std<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dice_weight<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_entropy_weight<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_weight<span class="opacity-60">: float = 20.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_auxiliary_logits<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.mask_feature_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.mask_feature_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_feature_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The masks&#x2019; features size, this value will also be used to specify the Feature Pyramid Network features&#x2019; size.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.no_object_weight" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.no_object_weight"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_object_weight</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; Weight to apply to the null (no object) class.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.use_auxiliary_loss(bool," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.use_auxiliary_loss(bool,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auxiliary_loss(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code> <code>MaskFormerForInstanceSegmentationOutput</code> will contain the auxiliary losses computed using the logits from each decoder&#x2019;s stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.backbone_config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.backbone_config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>backbone_config</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; The configuration passed to the backbone, if unset, the configuration corresponding to <code>swin-base-patch4-window12-384</code> will be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.decoder_config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.decoder_config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_config</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; The configuration passed to the transformer decoder model, if unset the base config for <code>detr-resnet-50</code> will be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.init_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.init_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.init_xavier_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.init_xavier_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_xavier_std</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The scaling factor used for the Xavier initialization gain in the HM Attention map module.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.dice_weight" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.dice_weight"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dice_weight</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The weight for the dice loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.cross_entropy_weight" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.cross_entropy_weight"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_entropy_weight</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The weight for the cross entropy loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.mask_weight" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.mask_weight"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_weight</strong> (<code>float</code>, <em>optional</em>, defaults to 20.0) &#x2014; The weight for the mask loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.output_auxiliary_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.output_auxiliary_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_auxiliary_logits</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Should the model output its <code>auxiliary_logits</code> or not.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerModel">MaskFormerModel</a>. It is used to instantiate a MaskFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the “facebook/maskformer-swin-base-ade” architecture trained on <a href="https://huggingface.co/datasets/scene_parse_150" rel="nofollow">ADE20k-150</a>.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Currently, MaskFormer only supports the <a href="swin">Swin Transformer</a> as backbone.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MaskFormerConfig, MaskFormerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a MaskFormer facebook/maskformer-swin-base-ade configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MaskFormerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/maskformer-swin-base-ade style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MaskFormerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerConfig.from_backbone_and_decoder_configs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_backbone_and_decoder_configs</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerConfig.from_backbone_and_decoder_configs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerConfig.from_backbone_and_decoder_configs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/configuration_maskformer.py#L168" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">backbone_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerConfig" >MaskFormerConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.from_backbone_and_decoder_configs.backbone_config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.from_backbone_and_decoder_configs.backbone_config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>backbone_config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The backbone configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerConfig.from_backbone_and_decoder_configs.decoder_config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerConfig.from_backbone_and_decoder_configs.decoder_config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The transformer decoder configuration to use.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerConfig.from_backbone_and_decoder_configs.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerConfig" >MaskFormerConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>An instance of a configuration object</p> <!-- HTML_TAG_END --></p></div></div> <p>Instantiate a <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerConfig">MaskFormerConfig</a> (or a derived class) from a pre-trained backbone model configuration and DETR model configuration.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerConfig.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerConfig.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerConfig.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/configuration_maskformer.py#L190" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerConfig.to_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance,</p> <!-- HTML_TAG_END --></p></div></div> <p>Serializes this instance to a Python dictionary. Override the default <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.to_dict">to_dict()</a>.</p></div></div> <h2 class="relative group"><a id="transformers.MaskFormerFeatureExtractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskFormerFeatureExtractor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerFeatureExtractor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaskFormerFeatureExtractor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaskFormerFeatureExtractor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerFeatureExtractor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/feature_extraction_maskformer.py#L39" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_resize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"> = 800</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_size<span class="opacity-60"> = 1333</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size_divisibility<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_normalize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_mean<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_std<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_index<span class="opacity-60"> = 255</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.do_resize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.do_resize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code>, <em>optional</em>, defaults to 800) &#x2014; Resize the input to the given size. Only has an effect if <code>do_resize</code> is set to <code>True</code>. If size is a sequence like <code>(width, height)</code>, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if <code>height &gt; width</code>, then image will be rescaled to <code>(size * height / width, size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.max_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.max_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1333) &#x2014; The largest size an image dimension can have (otherwise it&#x2019;s capped). Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.size_divisibility" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.size_divisibility"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size_divisibility</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in Swin Transformer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.do_normalize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.do_normalize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with mean and standard deviation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.image_mean" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.image_mean"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_mean</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.image_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.image_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_std</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.ignore_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.ignore_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_index</strong> (<code>int</code>, <em>optional</em>, default to 255) &#x2014; Value of the index (label) to ignore.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a MaskFormer feature extractor. The feature extractor can be used to prepare image(s) and optional targets for the model.</p> <p>This feature extractor inherits from <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin">FeatureExtractionMixin</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerFeatureExtractor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerFeatureExtractor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerFeatureExtractor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/feature_extraction_maskformer.py#L161" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef(&#39;torch.Tensor&#39;), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef(&#39;torch.Tensor&#39;)]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">annotations<span class="opacity-60">: typing.Union[typing.List[typing.Dict], typing.List[typing.List[typing.Dict]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_and_return_pixel_mask<span class="opacity-60">: typing.Optional[bool] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.__call__.annotations" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.__call__.annotations"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>annotations</strong> (<code>Dict</code>, <code>List[Dict]</code>, <em>optional</em>) &#x2014; The corresponding annotations as dictionary of numpy arrays with the following keys:</p> <ul> <li><strong>masks</strong> (<code>np.ndarray</code>) The target mask of shape <code>(num_classes, height, width)</code>.</li> <li><strong>labels</strong> (<code>np.ndarray</code>) The target labels of shape <code>(num_classes)</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.__call__.pad_and_return_pixel_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.__call__.pad_and_return_pixel_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_and_return_pixel_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to pad images up to the largest image in a batch and create a pixel mask.</p> <p>If left to the default, will return a pixel mask that is:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of NumPy arrays. If set to <code>&apos;pt&apos;</code>, return PyTorch <code>torch.Tensor</code> objects.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerFeatureExtractor.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> — Pixel values to be fed to a model.</li> <li><strong>pixel_mask</strong> — Pixel mask to be fed to a model (when <code>pad_and_return_pixel_mask=True</code> or if <em>“pixel_mask”</em> is in <code>self.model_input_names</code>).</li> <li><strong>mask_labels</strong> — Optional mask labels of shape <code>(batch_size, num_classes, height, width) to be fed to a model (when </code>annotations` are provided).</li> <li><strong>class_labels</strong> — Optional class labels of shape <code>(batch_size, num_classes) to be fed to a model (when </code>annotations` are provided).</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to prepare for the model one or several image(s) and optional annotations. Images are by default padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are real/which are padding.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerFeatureExtractor.encode_inputs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode_inputs</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerFeatureExtractor.encode_inputs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerFeatureExtractor.encode_inputs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/feature_extraction_maskformer.py#L291" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values_list<span class="opacity-60">: typing.List[ForwardRef(&#39;torch.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">annotations<span class="opacity-60">: typing.Optional[typing.List[typing.Dict]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_and_return_pixel_mask<span class="opacity-60">: typing.Optional[bool] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.encode_inputs.pixel_values_list" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.encode_inputs.pixel_values_list"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values_list</strong> (<code>List[torch.Tensor]</code>) &#x2014; List of images (pixel values) to be padded. Each image should be a tensor of shape <code>(channels, height, width)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.encode_inputs.annotations" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.encode_inputs.annotations"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>annotations</strong> (<code>Dict</code>, <code>List[Dict]</code>, <em>optional</em>) &#x2014; The corresponding annotations as dictionary of numpy arrays with the following keys:</p> <ul> <li><strong>masks</strong> (<code>np.ndarray</code>) The target mask of shape <code>(num_classes, height, width)</code>.</li> <li><strong>labels</strong> (<code>np.ndarray</code>) The target labels of shape <code>(num_classes)</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.encode_inputs.pad_and_return_pixel_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.encode_inputs.pad_and_return_pixel_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_and_return_pixel_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to pad images up to the largest image in a batch and create a pixel mask.</p> <p>If left to the default, will return a pixel mask that is:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.encode_inputs.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.encode_inputs.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of NumPy arrays. If set to <code>&apos;pt&apos;</code>, return PyTorch <code>torch.Tensor</code> objects.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerFeatureExtractor.encode_inputs.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> — Pixel values to be fed to a model.</li> <li><strong>pixel_mask</strong> — Pixel mask to be fed to a model (when <code>pad_and_return_pixel_mask=True</code> or if <em>“pixel_mask”</em> is in <code>self.model_input_names</code>).</li> <li><strong>mask_labels</strong> — Optional mask labels of shape <code>(batch_size, num_classes, height, width) to be fed to a model (when </code>annotations` are provided).</li> <li><strong>class_labels</strong> — Optional class labels of shape <code>(batch_size, num_classes) to be fed to a model (when </code>annotations` are provided).</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Pad images up to the largest image in a batch and create a corresponding <code>pixel_mask</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerFeatureExtractor.post_process_segmentation"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>post_process_segmentation</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerFeatureExtractor.post_process_segmentation" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerFeatureExtractor.post_process_segmentation"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/feature_extraction_maskformer.py#L375" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">outputs<span class="opacity-60">: MaskFormerForInstanceSegmentationOutput</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">target_size<span class="opacity-60">: typing.Tuple[int, int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.post_process_segmentation.outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.post_process_segmentation.outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>outputs</strong> (<code>MaskFormerForInstanceSegmentationOutput</code>) &#x2014; The outputs from <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerForInstanceSegmentation">MaskFormerForInstanceSegmentation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.post_process_segmentation.target_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.post_process_segmentation.target_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>target_size</strong> (<code>Tuple[int, int]</code>, <em>optional</em>) &#x2014; If set, the <code>masks_queries_logits</code> will be resized to <code>target_size</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerFeatureExtractor.post_process_segmentation.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tensor of shape (<code>batch_size, num_labels, height, width</code>).</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts the output of <code>MaskFormerForInstanceSegmentationOutput</code> into image segmentation predictions. Only supports PyTorch.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerFeatureExtractor.post_process_semantic_segmentation"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>post_process_semantic_segmentation</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerFeatureExtractor.post_process_semantic_segmentation" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerFeatureExtractor.post_process_semantic_segmentation"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/feature_extraction_maskformer.py#L446" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">outputs<span class="opacity-60">: MaskFormerForInstanceSegmentationOutput</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">target_size<span class="opacity-60">: typing.Tuple[int, int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.post_process_semantic_segmentation.outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.post_process_semantic_segmentation.outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>outputs</strong> (<code>MaskFormerForInstanceSegmentationOutput</code>) &#x2014; The outputs from <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerForInstanceSegmentation">MaskFormerForInstanceSegmentation</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerFeatureExtractor.post_process_semantic_segmentation.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tensor of shape <code>batch_size, height, width</code>.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts the output of <code>MaskFormerForInstanceSegmentationOutput</code> into semantic segmentation predictions. Only supports PyTorch.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>post_process_panoptic_segmentation</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/feature_extraction_maskformer.py#L464" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">outputs<span class="opacity-60">: MaskFormerForInstanceSegmentationOutput</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">object_mask_threshold<span class="opacity-60">: float = 0.8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overlap_mask_area_threshold<span class="opacity-60">: float = 0.8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_ids_to_fuse<span class="opacity-60">: typing.Optional[typing.Set[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[Dict]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>outputs</strong> (<code>MaskFormerForInstanceSegmentationOutput</code>) &#x2014; The outputs from <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerForInstanceSegmentation">MaskFormerForInstanceSegmentation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.object_mask_threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.object_mask_threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>object_mask_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) &#x2014; The object mask threshold.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.overlap_mask_area_threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.overlap_mask_area_threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overlap_mask_area_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) &#x2014; The overlap mask area threshold to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.label_ids_to_fuse" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.label_ids_to_fuse"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_ids_to_fuse</strong> (<code>Set[int]</code>, <em>optional</em>) &#x2014; The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerFeatureExtractor.post_process_panoptic_segmentation.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[Dict]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of dictionaries, one per image, each dictionary containing two keys:</p> <ul> <li><strong>segmentation</strong> — a tensor of shape <code>(height, width)</code> where each pixel represents a <code>segment_id</code>.</li> <li><strong>segments</strong> — a dictionary with the following keys<ul> <li><strong>id</strong> — an integer representing the <code>segment_id</code>.</li> <li><strong>label_id</strong> — an integer representing the segment’s label.</li> <li><strong>was_fused</strong> — a boolean, <code>True</code> if <code>label_id</code> was in <code>label_ids_to_fuse</code>, <code>False</code> otherwise.</li> </ul></li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Converts the output of <code>MaskFormerForInstanceSegmentationOutput</code> into image panoptic segmentation predictions. Only supports PyTorch.</p></div></div> <h2 class="relative group"><a id="transformers.MaskFormerModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskFormerModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaskFormerModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaskFormerModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/modeling_maskformer.py#L2264" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MaskFormerConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerConfig">MaskFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare MaskFormer Model outputting raw hidden-states without any specific head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/modeling_maskformer.py#L2274" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput" >transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerModel.forward.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerModel.forward.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code>for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerModel.forward.pixel_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerModel.forward.pixel_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding pixel values. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of Detr&#x2019;s decoder attention layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>~MaskFormerModelOutput</code> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput" >transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput" >transformers.models.maskformer.modeling_maskformer.MaskFormerModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerConfig" >MaskFormerConfig</a>) and inputs.</p> <ul> <li><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) — Last hidden states (final feature map) of the last stage of the encoder model (backbone).</li> <li><strong>pixel_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) — Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).</li> <li><strong>transformer_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Last hidden states (final feature map) of the last stage of the transformer decoder model.</li> <li><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the encoder model at the output of each stage.</li> <li><strong>pixel_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage.</li> <li><strong>transformer_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states (also called feature maps) of the transformer decoder at the output of each stage.</li> <li><strong>hidden_states</strong> <code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> containing <code>encoder_hidden_states</code>, <code>pixel_decoder_hidden_states</code> and <code>decoder_hidden_states</code></li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights from Detr’s decoder after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerModel">MaskFormerModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MaskFormerFeatureExtractor, MaskFormerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;huggingface/cats-image&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = dataset[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-string">&quot;image&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = MaskFormerFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/maskformer-swin-base-ade&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MaskFormerModel.from_pretrained(<span class="hljs-string">&quot;facebook/maskformer-swin-base-ade&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.MaskFormerForInstanceSegmentation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskFormerForInstanceSegmentation </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerForInstanceSegmentation"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaskFormerForInstanceSegmentation</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaskFormerForInstanceSegmentation" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerForInstanceSegmentation"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/modeling_maskformer.py#L2344" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: MaskFormerConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaskFormerForInstanceSegmentation.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaskFormerForInstanceSegmentation.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaskFormerForInstanceSegmentation.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/maskformer/modeling_maskformer.py#L2423" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_labels<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">class_labels<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_auxiliary_logits<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput" >transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerForInstanceSegmentation.forward.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation.forward.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code>for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerForInstanceSegmentation.forward.pixel_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation.forward.pixel_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding pixel values. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for pixels that are real (i.e. <strong>not masked</strong>),</li> <li>0 for pixels that are padding (i.e. <strong>masked</strong>).</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerForInstanceSegmentation.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerForInstanceSegmentation.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of Detr&#x2019;s decoder attention layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerForInstanceSegmentation.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>~MaskFormerModelOutput</code> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerForInstanceSegmentation.forward.mask_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation.forward.mask_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_labels</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014; The target mask of shape <code>(num_classes, height, width)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaskFormerForInstanceSegmentation.forward.class_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaskFormerForInstanceSegmentation.forward.class_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>class_labels</strong> (<code>torch.LongTensor</code>, <em>optional</em>) &#x2014; The target labels of shape <code>(num_classes)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.MaskFormerForInstanceSegmentation.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput" >transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput" >transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerConfig" >MaskFormerConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.Tensor</code>, <em>optional</em>) — The computed loss, returned when labels are present.</li> <li><strong>class_queries_logits</strong> (<code>torch.FloatTensor</code>) — A tensor of shape <code>(batch_size, num_queries, height, width)</code> representing the proposed masks for each query.</li> <li><strong>masks_queries_logits</strong> (<code>torch.FloatTensor</code>) — A tensor of shape <code>(batch_size, num_queries, num_classes + 1)</code> representing the proposed classes for each query. Note the <code>+ 1</code> is needed because we incorporate the null class.</li> <li><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) — Last hidden states (final feature map) of the last stage of the encoder model (backbone).</li> <li><strong>pixel_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) — Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).</li> <li><strong>transformer_decoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Last hidden states (final feature map) of the last stage of the transformer decoder model.</li> <li><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the encoder model at the output of each stage.</li> <li><strong>pixel_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage.</li> <li><strong>transformer_decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the transformer decoder at the output of each stage.</li> <li><strong>hidden_states</strong> <code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> containing <code>encoder_hidden_states</code>, <code>pixel_decoder_hidden_states</code> and <code>decoder_hidden_states</code>.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights from Detr’s decoder after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/maskformer#transformers.MaskFormerForInstanceSegmentation">MaskFormerForInstanceSegmentation</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = MaskFormerFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/maskformer-swin-base-ade&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MaskFormerForInstanceSegmentation.from_pretrained(<span class="hljs-string">&quot;facebook/maskformer-swin-base-ade&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts class_queries_logits of shape `(batch_size, num_queries)`</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># and masks_queries_logits of shape `(batch_size, num_queries, height, width)`</span> <span class="hljs-meta">&gt;&gt;&gt; </span>class_queries_logits = outputs.class_queries_logits <span class="hljs-meta">&gt;&gt;&gt; </span>masks_queries_logits = outputs.masks_queries_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># you can pass them to feature_extractor for postprocessing</span> <span class="hljs-meta">&gt;&gt;&gt; </span>output = feature_extractor.post_process_segmentation(outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>output = feature_extractor.post_process_semantic_segmentation(outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>output = feature_extractor.post_process_panoptic_segmentation(outputs)<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="oovtve"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="oovtve"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/maskformer.mdx-07585051.js") ], params: {} } }); </script>
212
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/sew.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;sew&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.SEWConfig&quot;,&quot;title&quot;:&quot;SEWConfig&quot;},{&quot;local&quot;:&quot;transformers.SEWModel&quot;,&quot;title&quot;:&quot;SEWModel&quot;},{&quot;local&quot;:&quot;transformers.SEWForCTC&quot;,&quot;title&quot;:&quot;SEWForCTC&quot;},{&quot;local&quot;:&quot;transformers.SEWForSequenceClassification&quot;,&quot;title&quot;:&quot;SEWForSequenceClassification&quot;}],&quot;title&quot;:&quot;SEW&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/sew.mdx-a39f8b59.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="sew" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#sew"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SEW </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>SEW (Squeezed and Efficient Wav2Vec) was proposed in <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</p> <p>The abstract from the paper is the following:</p> <p><em>This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.</em></p> <p>Tips:</p> <ul><li>SEW is a speech model that accepts a float array corresponding to the raw waveform of the speech signal.</li> <li>SEWForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer">Wav2Vec2CTCTokenizer</a>.</li></ul> <p>This model was contributed by <a href="https://huggingface.co/anton-l" rel="nofollow">anton-l</a>.</p> <h2 class="relative group"><a id="transformers.SEWConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SEWConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SEWConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SEWConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SEWConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SEWConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/sew/configuration_sew.py#L32" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">squeeze_factor<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feat_proj_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layerdrop<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feat_extract_norm<span class="opacity-60"> = &#39;group&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feat_extract_activation<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_dim<span class="opacity-60"> = (64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_stride<span class="opacity-60"> = (5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_kernel<span class="opacity-60"> = (10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_bias<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_conv_pos_embeddings<span class="opacity-60"> = 128</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_conv_pos_embedding_groups<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">apply_spec_augment<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_prob<span class="opacity-60"> = 0.05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_length<span class="opacity-60"> = 10</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_min_masks<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_feature_prob<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_feature_length<span class="opacity-60"> = 10</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_feature_min_masks<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ctc_loss_reduction<span class="opacity-60"> = &#39;mean&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ctc_zero_infinity<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_weighted_layer_sum<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_proj_size<span class="opacity-60"> = 256</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <code>SEW</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.squeeze_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.squeeze_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>squeeze_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Sequence length downsampling factor after the encoder and upsampling factor after the transformer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.hidden_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.hidden_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.final_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.final_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.feat_extract_norm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.feat_extract_norm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature encoder. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.feat_proj_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.feat_proj_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.feat_extract_activation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.feat_extract_activation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.conv_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.conv_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.conv_stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.conv_stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.conv_kernel" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.conv_kernel"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the the length of <em>conv_dim</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.conv_bias" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.conv_bias"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.num_conv_pos_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.num_conv_pos_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.num_conv_pos_embedding_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.num_conv_pos_embedding_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.apply_spec_augment" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.apply_spec_augment"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature encoder. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.mask_time_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.mask_time_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be `prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True`.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.mask_time_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.mask_time_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.mask_time_min_masks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.mask_time_min_masks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.mask_feature_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.mask_feature_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be `prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True`.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.mask_feature_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.mask_feature_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.mask_feature_min_masks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.mask_feature_min_masks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.ctc_loss_reduction" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.ctc_loss_reduction"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.ctc_zero_infinity" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.ctc_zero_infinity"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.use_weighted_layer_sum" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.use_weighted_layer_sum"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification">Wav2Vec2ForSequenceClassification</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWConfig.classifier_proj_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWConfig.classifier_proj_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWModel">SEWModel</a>. It is used to instantiate a SEW model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW <a href="https://huggingface.co/asapp/sew-tiny-100k" rel="nofollow">asapp/sew-tiny-100k</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SEWModel, SEWConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a SEW asapp/sew-tiny-100k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SEWConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the asapp/sew-tiny-100k style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.SEWModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SEWModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SEWModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SEWModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SEWModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SEWModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/sew/modeling_sew.py#L840" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: SEWConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare SEW Model transformer outputting raw hidden-states without any specific head on top. SEW was proposed in <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SEWModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.SEWModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SEWModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/sew/modeling_sew.py#L907" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_indices<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWModel.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWModel.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SEWModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWConfig" >SEWConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWModel">SEWModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, SEWModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;asapp/sew-tiny-100k-ft-ls100h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWModel.from_pretrained(<span class="hljs-string">&quot;asapp/sew-tiny-100k-ft-ls100h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">292</span>, <span class="hljs-number">512</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.SEWForCTC" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SEWForCTC </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SEWForCTC"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SEWForCTC</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SEWForCTC" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SEWForCTC"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/sew/modeling_sew.py#L970" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForCTC.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>SEW Model with a <code>language modeling</code> head on top for Connectionist Temporal Classification (CTC). SEW was proposed in <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SEWForCTC.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.SEWForCTC.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SEWForCTC.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/sew/modeling_sew.py#L1011" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForCTC.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForCTC.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForCTC.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForCTC.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForCTC.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForCTC.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForCTC.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SEWForCTC.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWConfig" >SEWConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWForCTC">SEWForCTC</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, SEWForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;asapp/sew-tiny-100k-ft-ls100h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWForCTC.from_pretrained(<span class="hljs-string">&quot;asapp/sew-tiny-100k-ft-ls100h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription[<span class="hljs-number">0</span>] <span class="hljs-string">&#x27;MISTER QUILTER IS THE APPOSTILE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPOLLE&#x27;</span><!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">0.42</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.SEWForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SEWForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SEWForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SEWForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SEWForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SEWForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/sew/modeling_sew.py#L1101" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWConfig">SEWConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>SEW Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.</p> <p>SEW was proposed in <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SEWForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.SEWForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SEWForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/sew/modeling_sew.py#L1146" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForSequenceClassification.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SEWForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SEWForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SEWForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWConfig" >SEWConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/sew#transformers.SEWForSequenceClassification">SEWForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, SEWForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;anton-l/sew-mid-100k-ft-keyword-spotting&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SEWForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;anton-l/sew-mid-100k-ft-keyword-spotting&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label <span class="hljs-string">&#x27;_unknown_&#x27;</span><!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">9.52</span><!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="m9fkex"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="m9fkex"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/sew.mdx-a39f8b59.js") ], params: {} } }); </script>
213
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/data2vec.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;data2vec&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextConfig&quot;,&quot;title&quot;:&quot;Data2VecTextConfig&quot;},{&quot;local&quot;:&quot;transformers.Data2VecAudioConfig&quot;,&quot;title&quot;:&quot;Data2VecAudioConfig&quot;},{&quot;local&quot;:&quot;transformers.Data2VecAudioModel&quot;,&quot;title&quot;:&quot;Data2VecAudioModel&quot;},{&quot;local&quot;:&quot;transformers.Data2VecAudioForAudioFrameClassification&quot;,&quot;title&quot;:&quot;Data2VecAudioForAudioFrameClassification&quot;},{&quot;local&quot;:&quot;transformers.Data2VecAudioForCTC&quot;,&quot;title&quot;:&quot;Data2VecAudioForCTC&quot;},{&quot;local&quot;:&quot;transformers.Data2VecAudioForSequenceClassification&quot;,&quot;title&quot;:&quot;Data2VecAudioForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.Data2VecAudioForXVector&quot;,&quot;title&quot;:&quot;Data2VecAudioForXVector&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextModel&quot;,&quot;title&quot;:&quot;Data2VecTextModel&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextForCausalLM&quot;,&quot;title&quot;:&quot;Data2VecTextForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextForMaskedLM&quot;,&quot;title&quot;:&quot;Data2VecTextForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextForSequenceClassification&quot;,&quot;title&quot;:&quot;Data2VecTextForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextForMultipleChoice&quot;,&quot;title&quot;:&quot;Data2VecTextForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextForTokenClassification&quot;,&quot;title&quot;:&quot;Data2VecTextForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.Data2VecTextForQuestionAnswering&quot;,&quot;title&quot;:&quot;Data2VecTextForQuestionAnswering&quot;}],&quot;title&quot;:&quot;Data2Vec&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/data2vec.mdx-72db0e89.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="data2vec" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#data2vec"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2Vec </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The Data2Vec model was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli. Data2Vec proposes a unified framework for self-supervised learning across different data modalities - text, audio and images. Importantly, predicted targets for pre-training are contextualized latent representations of the inputs, rather than modality-specific, context-independent targets.</p> <p>The abstract from the paper is the following:</p> <p><em>While the general idea of self-supervised learning is identical across modalities, the actual algorithms and objectives differ widely because they were developed with a single modality in mind. To get us closer to general self-supervised learning, we present data2vec, a framework that uses the same learning method for either speech, NLP or computer vision. The core idea is to predict latent representations of the full input data based on a masked view of the input in a selfdistillation setup using a standard Transformer architecture. Instead of predicting modality-specific targets such as words, visual tokens or units of human speech which are local in nature, data2vec predicts contextualized latent representations that contain information from the entire input. Experiments on the major benchmarks of speech recognition, image classification, and natural language understanding demonstrate a new state of the art or competitive performance to predominant approaches. Models and code are available at <a href="http://www.github.com/pytorch/fairseq/tree/master/examples/data2vec" rel="nofollow">www.github.com/pytorch/fairseq/tree/master/examples/data2vec</a>.</em></p> <p>Tips:</p> <ul><li>Both Data2VecAudio and Data2VecText have been trained using the same self-supervised learning method. In the case of Data2VecAudio, preprocessing is identical to <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaModel">RobertaModel</a>, including tokenization.</li></ul> <p>This model was contributed by <a href="https://huggingface.co/edugp" rel="nofollow">edugp</a>. The original code can be found <a href="https://github.com/pytorch/fairseq/tree/main/examples/data2vec" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.Data2VecTextConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/configuration_data2vec_text.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 30522</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_embedding_type<span class="opacity-60"> = &#39;absolute&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the DATA2VEC model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <code>Data2VecModel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <code>Data2VecModel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.position_embedding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.position_embedding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextConfig.classifier_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextConfig.classifier_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextModel">Data2VecTextModel</a> and <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextModel">Data2VecTextModel</a>. It is used to instantiate a Data2VecText model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecText <a href="https://huggingface.co/facebook/data2vec-text-base" rel="nofollow">facebook/data2vec-text-base</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Data2VecTextModel, Data2VecTextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Data2VecText facebook/data2vec-text-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Data2VecTextConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/data2vec-text-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.Data2VecAudioConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecAudioConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecAudioConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/configuration_data2vec_audio.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feat_proj_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layerdrop<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feat_extract_activation<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_dim<span class="opacity-60"> = (512, 512, 512, 512, 512, 512, 512)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_stride<span class="opacity-60"> = (5, 2, 2, 2, 2, 2, 2)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_kernel<span class="opacity-60"> = (10, 3, 3, 3, 3, 2, 2)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_bias<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_conv_pos_embedding_groups<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_pos_kernel_size<span class="opacity-60"> = 19</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_conv_pos_embeddings<span class="opacity-60"> = 5</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_prob<span class="opacity-60"> = 0.05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_length<span class="opacity-60"> = 10</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_min_masks<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_feature_prob<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_feature_length<span class="opacity-60"> = 10</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_feature_min_masks<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ctc_loss_reduction<span class="opacity-60"> = &#39;sum&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ctc_zero_infinity<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_weighted_layer_sum<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_proj_size<span class="opacity-60"> = 256</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tdnn_dim<span class="opacity-60"> = (512, 512, 512, 512, 1500)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tdnn_kernel<span class="opacity-60"> = (5, 3, 3, 1, 1)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tdnn_dilation<span class="opacity-60"> = (1, 2, 3, 1, 1)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">xvector_output_dim<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_adapter<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adapter_kernel_size<span class="opacity-60"> = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adapter_stride<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_adapter_layers<span class="opacity-60"> = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_size<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the Data2VecAudio model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel">Data2VecAudioModel</a> or <code>TFData2VecAudioModel</code>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel">Data2VecAudioModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.hidden_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.hidden_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.final_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.final_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC">Data2VecAudioForCTC</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.feat_proj_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.feat_proj_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.feat_extract_activation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.feat_extract_activation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.conv_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.conv_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.conv_stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.conv_stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.conv_kernel" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.conv_kernel"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.conv_bias" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.conv_bias"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.num_conv_pos_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.num_conv_pos_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.num_conv_pos_embedding_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.num_conv_pos_embedding_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.mask_time_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.mask_time_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be `prob_vector_start</em>mask_time_length`. Note that overlap may decrease the<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.mask_time_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.mask_time_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.mask_time_min_masks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.mask_time_min_masks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.mask_feature_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.mask_feature_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be `prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True`.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.mask_feature_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.mask_feature_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.mask_feature_min_masks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.mask_feature_min_masks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.ctc_loss_reduction" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.ctc_loss_reduction"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC">Data2VecAudioForCTC</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.ctc_zero_infinity" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.ctc_zero_infinity"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC">Data2VecAudioForCTC</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.use_weighted_layer_sum" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.use_weighted_layer_sum"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForSequenceClassification">Data2VecAudioForSequenceClassification</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.classifier_proj_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.classifier_proj_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.tdnn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.tdnn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tdnn_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 1500)</code>) &#x2014; A tuple of integers defining the number of output channels of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dim</em> defines the number of <em>TDNN</em> layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.tdnn_kernel" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.tdnn_kernel"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tdnn_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 3, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_kernel</em> has to match the length of <em>tdnn_dim</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.tdnn_dilation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.tdnn_dilation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tdnn_dilation</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(1, 2, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the dilation factor of each 1D convolutional layer in <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dilation</em> has to match the length of <em>tdnn_dim</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.xvector_output_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.xvector_output_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>xvector_output_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the <em>XVector</em> embedding vectors.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.add_adapter" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.add_adapter"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_adapter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether a convolutional network should be stacked on top of the Data2VecAudio Encoder. Can be very useful for warm-starting Data2VecAudio for SpeechEncoderDecoder models.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.adapter_kernel_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.adapter_kernel_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adapter_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Kernel size of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.adapter_stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.adapter_stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adapter_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Stride of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.num_adapter_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.num_adapter_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_adapter_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Number of convolutional layers that should be used in the adapter network. Only relevant if <code>add_adapter is True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioConfig.output_hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioConfig.output_hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimensionality of the encoder output layer. If not defined, this defaults to <em>hidden-size</em>. Only relevant if <code>add_adapter is True</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel">Data2VecAudioModel</a>. It is used to instantiate an Data2VecAudio model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecAudio <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">facebook/data2vec-audio-base-960h</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Data2VecAudioModel, Data2VecAudioConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Data2VecAudio facebook/data2vec-audio-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Data2VecAudioConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/data2vec-audio-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.Data2VecAudioModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecAudioModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecAudioModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L898" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: Data2VecAudioConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top. Data2VecAudio was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L969" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_time_indices<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioBaseModelOutput</code>or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioModel.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioModel.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecAudioModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioBaseModelOutput</code>or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioBaseModelOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) — Sequence of extracted feature vectors of the last convolutional layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel">Data2VecAudioModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Data2VecAudioModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioModel.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">292</span>, <span class="hljs-number">768</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecAudioForAudioFrameClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecAudioForAudioFrameClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForAudioFrameClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecAudioForAudioFrameClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForAudioFrameClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForAudioFrameClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1291" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForAudioFrameClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecAudio Model with a frame classification head on top for tasks like Speaker Diarization.</p> <p>Data2VecAudio was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForAudioFrameClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForAudioFrameClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForAudioFrameClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1334" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForAudioFrameClassification.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForAudioFrameClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForAudioFrameClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForAudioFrameClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForAudioFrameClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForAudioFrameClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForAudioFrameClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecAudioForAudioFrameClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForAudioFrameClassification">Data2VecAudioForAudioFrameClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Data2VecAudioForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-audio-frame&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForAudioFrameClassification.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-audio-frame&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, sampling_rate=sampling_rate) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probabilities = torch.sigmoid(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># labels is a one-hot array of shape (num_frames, num_speakers)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = (probabilities &gt; <span class="hljs-number">0.5</span>).long() <span class="hljs-meta">&gt;&gt;&gt; </span>labels[<span class="hljs-number">0</span>].tolist() [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecAudioForCTC" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecAudioForCTC </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForCTC"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecAudioForCTC</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForCTC" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForCTC"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1036" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForCTC.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecAudio Model with a <code>language modeling</code> head on top for Connectionist Temporal Classification (CTC). Data2VecAudio was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForCTC.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForCTC.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForCTC.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1077" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForCTC.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForCTC.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForCTC.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForCTC.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForCTC.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForCTC.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForCTC.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecAudioForCTC.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC">Data2VecAudioForCTC</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Data2VecAudioForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription[<span class="hljs-number">0</span>] <span class="hljs-string">&#x27;MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL&#x27;</span><!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">66.95</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecAudioForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecAudioForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecAudioForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1167" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.</p> <p>Data2VecAudio was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1212" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForSequenceClassification.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecAudioForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForSequenceClassification">Data2VecAudioForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Data2VecAudioForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-seq-class&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-seq-class&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label <span class="hljs-string">&#x27;LABEL_1&#x27;</span><!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">0.69</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecAudioForXVector" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecAudioForXVector </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForXVector"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecAudioForXVector</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForXVector" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForXVector"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1449" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForXVector.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.</p> <p>Data2VecAudio was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecAudioForXVector.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecAudioForXVector.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecAudioForXVector.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1511" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_values<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.data2vec.modeling_data2vec_audio.XVectorOutput</code>or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForXVector.forward.input_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector.forward.input_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForXVector.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForXVector.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForXVector.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForXVector.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecAudioForXVector.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecAudioForXVector.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecAudioForXVector.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.data2vec.modeling_data2vec_audio.XVectorOutput</code>or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.data2vec.modeling_data2vec_audio.XVectorOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) — Classification hidden states before AMSoftmax.</p> </li> <li> <p><strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) — Utterance embeddings used for vector similarity-based retrieval.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForXVector">Data2VecAudioForXVector</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Data2VecAudioForXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-xvector&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForXVector.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-xvector&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor( <span class="hljs-meta">... </span> [d[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> dataset[:<span class="hljs-number">2</span>][<span class="hljs-string">&quot;audio&quot;</span>]], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> embeddings = model(**inputs).embeddings <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = torch.nn.functional.normalize(embeddings, dim=-<span class="hljs-number">1</span>).cpu() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the resulting embeddings can be used for cosine similarity-based retrieval</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.nn.CosineSimilarity(dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>similarity = cosine_sim(embeddings[<span class="hljs-number">0</span>], embeddings[<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>threshold = <span class="hljs-number">0.7</span> <span class="hljs-comment"># the optimal threshold is dataset-dependent</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">if</span> similarity &lt; threshold: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Speakers are not the same!&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(similarity.item(), <span class="hljs-number">2</span>) <span class="hljs-number">1.0</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecTextModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L700" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_pooling_layer<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare Data2VecText Model for text transformer outputting raw hidden-states without any specific head on top. Data2VecText was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p>The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in <em>Attention is all you need</em>_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.</p> <p>To behave as an decoder the model needs to be initialized with the <code>is_decoder</code> argument of the configuration set to <code>True</code>. To be used in a Seq2Seq model, the model needs to initialized with both <code>is_decoder</code> argument and <code>add_cross_attention</code> set to <code>True</code>; an <code>encoder_hidden_states</code> is then expected as an input to the forward pass.</p> <p>.. _<em>Attention is all you need</em>: <a href="https://arxiv.org/abs/1706.03762" rel="nofollow">https://arxiv.org/abs/1706.03762</a></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L744" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecTextModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextModel">Data2VecTextModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextModel.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecTextForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L885" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecText Model with a <code>language modeling</code> head on top for CLM fine-tuning. Data2VecText was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L911" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecTextForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForCausalLM">Data2VecTextForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Data2VecTextTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Data2VecTextTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = Data2VecTextConfig.from_pretrained(<span class="hljs-string">&quot;data2vec-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForCausalLM.from_pretrained(<span class="hljs-string">&quot;data2vec-base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecTextForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1037" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>data2vec Model with a <code>language modeling</code> head on top. Data2VecText was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1066" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMaskedLM.forward.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMaskedLM.forward.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecTextForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForMaskedLM">Data2VecTextForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForMaskedLM.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecTextForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1167" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>Data2VecText was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1181" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecTextForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForSequenceClassification">Data2VecTextForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecTextForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1265" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecText Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>Data2VecText was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForMultipleChoice.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForMultipleChoice.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForMultipleChoice.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1278" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForMultipleChoice.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForMultipleChoice.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecTextForMultipleChoice.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForMultipleChoice">Data2VecTextForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecTextForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1360" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecText Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>Data2VecText was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1378" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecTextForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForTokenClassification">Data2VecTextForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForTokenClassification.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.Data2VecTextForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data2VecTextForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Data2VecTextForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1468" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data2VecText Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>Data2VecText was proposed in <a href="https://arxiv.org/pdf/2202.03555" rel="nofollow">data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Data2VecTextForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.Data2VecTextForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Data2VecTextForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1482" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Data2VecTextForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Data2VecTextForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Data2VecTextForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForQuestionAnswering">Data2VecTextForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1oysb53"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1oysb53"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/data2vec.mdx-72db0e89.js") ], params: {} } }); </script>
214
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/lxmert.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;lxmert&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.LxmertConfig&quot;,&quot;title&quot;:&quot;LxmertConfig&quot;},{&quot;local&quot;:&quot;transformers.LxmertTokenizer&quot;,&quot;title&quot;:&quot;LxmertTokenizer&quot;},{&quot;local&quot;:&quot;transformers.LxmertTokenizerFast&quot;,&quot;title&quot;:&quot;LxmertTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.models.lxmert.modeling_lxmert.LxmertModelOutput&quot;,&quot;title&quot;:&quot;Lxmert specific outputs&quot;},{&quot;local&quot;:&quot;transformers.LxmertModel&quot;,&quot;title&quot;:&quot;LxmertModel&quot;},{&quot;local&quot;:&quot;transformers.LxmertForPreTraining&quot;,&quot;title&quot;:&quot;LxmertForPreTraining&quot;},{&quot;local&quot;:&quot;transformers.LxmertForQuestionAnswering&quot;,&quot;title&quot;:&quot;LxmertForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFLxmertModel&quot;,&quot;title&quot;:&quot;TFLxmertModel&quot;},{&quot;local&quot;:&quot;transformers.TFLxmertForPreTraining&quot;,&quot;title&quot;:&quot;TFLxmertForPreTraining&quot;}],&quot;title&quot;:&quot;LXMERT&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/lxmert.mdx-e3257043.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="lxmert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#lxmert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LXMERT </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The LXMERT model was proposed in <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers</a> by Hao Tan &amp; Mohit Bansal. It is a series of bidirectional transformer encoders (one for the vision modality, one for the language modality, and then one to fuse both modalities) pretrained using a combination of masked language modeling, visual-language text alignment, ROI-feature regression, masked visual-attribute modeling, masked visual-object modeling, and visual-question answering objectives. The pretraining consists of multiple multi-modal datasets: MSCOCO, Visual-Genome + Visual-Genome Question Answering, VQA 2.0, and GQA.</p> <p>The abstract from the paper is the following:</p> <p><em>Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly, the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative pretraining tasks: masked language modeling, masked object prediction (feature regression and label classification), cross-modality matching, and image question answering. These tasks help in learning both intra-modality and cross-modality relationships. After fine-tuning from our pretrained parameters, our model achieves the state-of-the-art results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our pretrained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR, and improve the previous best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel model components and pretraining strategies significantly contribute to our strong results; and also present several attention visualizations for the different encoders</em></p> <p>Tips:</p> <ul><li>Bounding boxes are not necessary to be used in the visual feature embeddings, any kind of visual-spacial features will work.</li> <li>Both the language hidden states and the visual hidden states that LXMERT outputs are passed through the cross-modality layer, so they contain information from both modalities. To access a modality that only attends to itself, select the vision/language hidden states from the first input in the tuple.</li> <li>The bidirectional cross-modality encoder attention only returns attention values when the language modality is used as the input and the vision modality is used as the context vector. Further, while the cross-modality encoder contains self-attention for each respective modality and cross-attention, only the cross attention is returned and both self attention outputs are disregarded.</li></ul> <p>This model was contributed by <a href="https://huggingface.co/eltoto1219" rel="nofollow">eltoto1219</a>. The original code can be found <a href="https://github.com/airsplay/lxmert" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.LxmertConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LxmertConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LxmertConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LxmertConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/configuration_lxmert.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 30522</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_qa_labels<span class="opacity-60"> = 9500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_object_labels<span class="opacity-60"> = 1600</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attr_labels<span class="opacity-60"> = 400</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">l_layers<span class="opacity-60"> = 9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">x_layers<span class="opacity-60"> = 5</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">r_layers<span class="opacity-60"> = 5</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_feat_dim<span class="opacity-60"> = 2048</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_pos_dim<span class="opacity-60"> = 4</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_loss_normalizer<span class="opacity-60"> = 6.67</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task_matched<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task_mask_lm<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task_obj_predict<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task_qa<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_obj_loss<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_attr_loss<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_feat_loss<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertModel">LxmertModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.TFLxmertModel">TFLxmertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.r_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.r_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>r_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of hidden layers in the Transformer visual encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.l_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.l_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>l_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 9) &#x2014; Number of hidden layers in the Transformer language encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.x_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.x_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>x_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of hidden layers in the Transformer cross modality encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <em>token_type_ids</em> passed into <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel">BertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.visual_feat_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.visual_feat_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_feat_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; This represents the last dimension of the pooled-object features used as input for the model, representing the size of each object feature itself.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.visual_pos_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.visual_pos_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_pos_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; This represents the number of spacial features that are mixed into the visual features. The default is set to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.visual_loss_normalizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.visual_loss_normalizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_loss_normalizer</strong> (<code>float</code>, <em>optional</em>, defaults to 1/15) &#x2014; This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one decided to train with multiple vision-based loss objectives.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.num_qa_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.num_qa_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_qa_labels</strong> (<code>int</code>, <em>optional</em>, defaults to 9500) &#x2014; This represents the total number of different question answering (QA) labels there are. If using more than one dataset with QA, the user will need to account for the total number of labels that all of the datasets have in total.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.num_object_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.num_object_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_object_labels</strong> (<code>int</code>, <em>optional</em>, defaults to 1600) &#x2014; This represents the total number of semantically unique objects that lxmert will be able to classify a pooled-object feature as belonging too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.num_attr_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.num_attr_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attr_labels</strong> (<code>int</code>, <em>optional</em>, defaults to 400) &#x2014; This represents the total number of semantically unique attributes that lxmert will be able to classify a pooled-object feature as possessing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.task_matched" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.task_matched"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task_matched</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; This task is used for sentence-image matching. If the sentence correctly describes the image the label will be 1. If the sentence does not correctly describe the image, the label will be 0.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.task_mask_lm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.task_mask_lm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task_mask_lm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss objective.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.task_obj_predict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.task_obj_predict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task_obj_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.task_qa" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.task_qa"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task_qa</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to add the question-answering loss to the objective<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.visual_obj_loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.visual_obj_loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_obj_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to calculate the object-prediction loss objective<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.visual_attr_loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.visual_attr_loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_attr_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to calculate the attribute-prediction loss objective<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.visual_feat_loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.visual_feat_loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_feat_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to calculate the feature-regression loss objective<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return the attentions from the vision, language, and cross-modality layers should be returned.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertConfig.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertConfig.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return the hidden states from the vision, language, and cross-modality layers should be returned.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertModel">LxmertModel</a> or a <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.TFLxmertModel">TFLxmertModel</a>. It is used to instantiate a LXMERT model according to the specified arguments, defining the model architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p></div> <h2 class="relative group"><a id="transformers.LxmertTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LxmertTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LxmertTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LxmertTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/tokenization_lxmert.py#L36" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct an LXMERT tokenizer.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.LxmertTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LxmertTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LxmertTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LxmertTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/tokenization_lxmert_fast.py#L40" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a “fast” LXMERT tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertTokenizerFast">LxmertTokenizerFast</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Lxmert specific outputs </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.lxmert.modeling_lxmert.</span><span class="font-semibold">LxmertModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_output<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_output<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooled_output<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the language encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the visual encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.pooled_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.pooled_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooled_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.language_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.vision_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.cross_encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput.cross_encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Lxmert’s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the “relation-ship” encoder”)</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.lxmert.modeling_lxmert.</span><span class="font-semibold">LxmertForPreTrainingOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L147" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: [&lt;class &#39;torch.FloatTensor&#39;&gt;] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_logits<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_relationship_score<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">question_answering_score<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.prediction_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.prediction_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>): Prediction scores of question answering objective (classification).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.language_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.language_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.vision_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.vision_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.language_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.language_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.vision_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.vision_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.cross_encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput.cross_encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Output type of <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertForPreTraining">LxmertForPreTraining</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.lxmert.modeling_lxmert.</span><span class="font-semibold">LxmertForQuestionAnsweringOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L107" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">question_answering_score<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k. question_answering_score &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>, <em>optional</em>): Prediction scores of question answering objective (classification).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.language_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.language_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.vision_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.vision_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.language_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.language_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.vision_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.vision_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.cross_encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput.cross_encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Output type of <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertForQuestionAnswering">LxmertForQuestionAnswering</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.lxmert.modeling_tf_lxmert.</span><span class="font-semibold">TFLxmertModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_tf_lxmert.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_output<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_output<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooled_output<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the language encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the visual encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.pooled_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.pooled_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooled_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.language_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.vision_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.cross_encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput.cross_encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Lxmert’s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the “relation-ship” encoder”)</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.lxmert.modeling_tf_lxmert.</span><span class="font-semibold">TFLxmertForPreTrainingOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_tf_lxmert.py#L96" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_logits<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_relationship_score<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">question_answering_score<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vision_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>tf.Tensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.prediction_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.prediction_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, n_qa_answers)</code>): Prediction scores of question answering objective (classification).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.language_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.language_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.vision_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.vision_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.language_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.language_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.vision_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.vision_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.cross_encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput.cross_encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Output type of <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertForPreTraining">LxmertForPreTraining</a>.</p></div> <h2 class="relative group"><a id="transformers.LxmertModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LxmertModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LxmertModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LxmertModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L886" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.</p> <p>The LXMERT model was proposed in <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers</a> by Hao Tan and Mohit Bansal. It’s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LxmertModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L901" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_feats<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_pos<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput" >transformers.models.lxmert.modeling_lxmert.LxmertModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_pos_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.visual_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.visual_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LxmertModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput" >transformers.models.lxmert.modeling_lxmert.LxmertModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertModelOutput" >transformers.models.lxmert.modeling_lxmert.LxmertModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>language_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the language encoder.</li> <li><strong>vision_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the visual encoder.</li> <li><strong>pooled_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear</li> <li><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertModel">LxmertModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LxmertTokenizer, LxmertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LxmertTokenizer.from_pretrained(<span class="hljs-string">&quot;unc-nlp/lxmert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LxmertModel.from_pretrained(<span class="hljs-string">&quot;unc-nlp/lxmert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.LxmertForPreTraining" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LxmertForPreTraining </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertForPreTraining"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LxmertForPreTraining</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LxmertForPreTraining" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertForPreTraining"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L1025" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Lxmert Model with a specified pretraining head on top.</p> <p>The LXMERT model was proposed in <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers</a> by Hao Tan and Mohit Bansal. It’s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertForPreTraining.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LxmertForPreTraining.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertForPreTraining.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L1153" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_feats<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_pos<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">obj_labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">matched_label<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ans<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_pos_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.visual_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.visual_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code> obj_labels &#x2014; (<code>Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]</code>, <em>optional</em>): each key is named after each one of the visual losses and each element of the tuple is of the shape <code>(batch_size, num_features)</code> and <code>(batch_size, num_features, visual_feature_dim)</code> for each the label id and the label score respectively<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.matched_label" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.matched_label"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>matched_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates that the sentence does not match the image,</li> <li>1 indicates that the sentence does match the image.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForPreTraining.forward.ans" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForPreTraining.forward.ans"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ans</strong> (<code>Torch.Tensor</code> of shape <code>(batch_size)</code>, <em>optional</em>) &#x2014; a one hot representation hof the correct answer <em>optional</em><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LxmertForPreTraining.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) — Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</li> <li><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>cross_relationship_score:</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) — Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax).</li> <li><strong>question_answering_score:</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>) — Prediction scores of question answering objective (classification).</li> <li><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertForPreTraining">LxmertForPreTraining</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div></div></div> <h2 class="relative group"><a id="transformers.LxmertForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LxmertForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LxmertForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LxmertForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L1291" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Lxmert Model with a visual-answering head on top for downstream QA tasks</p> <p>The LXMERT model was proposed in <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers</a> by Hao Tan and Mohit Bansal. It’s a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LxmertForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.LxmertForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LxmertForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_lxmert.py#L1384" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_feats<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_pos<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_visual_features, visual_pos_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.visual_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.visual_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LxmertForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LxmertForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.</p> <p>labels &#x2014; (<code>Torch.Tensor</code> of shape <code>(batch_size)</code>, <em>optional</em>): A one-hot representation of the correct answer<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LxmertForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput" >transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) — Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k.</li> <li><strong>question_answering_score:</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, n_qa_answers)</code>, <em>optional</em>) — Prediction scores of question answering objective (classification).</li> <li><strong>language_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertForQuestionAnswering">LxmertForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LxmertTokenizer, LxmertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LxmertTokenizer.from_pretrained(<span class="hljs-string">&quot;unc-nlp/lxmert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LxmertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;unc-nlp/lxmert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFLxmertModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFLxmertModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLxmertModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLxmertModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLxmertModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLxmertModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_tf_lxmert.py#L944" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.</p> <p>The LXMERT model was proposed in <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers</a> by Hao Tan and Mohit Bansal. It’s a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLxmertModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLxmertModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLxmertModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_tf_lxmert.py#L949" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_feats<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_pos<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.visual_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.visual_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; MMask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFLxmertModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>language_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the language encoder.</li> <li><strong>vision_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the visual encoder.</li> <li><strong>pooled_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear</li> <li><strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.TFLxmertModel">TFLxmertModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LxmertTokenizer, TFLxmertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LxmertTokenizer.from_pretrained(<span class="hljs-string">&quot;unc-nlp/lxmert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFLxmertModel.from_pretrained(<span class="hljs-string">&quot;unc-nlp/lxmert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFLxmertForPreTraining" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFLxmertForPreTraining </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLxmertForPreTraining"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLxmertForPreTraining</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLxmertForPreTraining" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLxmertForPreTraining"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_tf_lxmert.py#L1201" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig">LxmertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Lxmert Model with a <code>language modeling</code> head on top.</p> <p>The LXMERT model was proposed in <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers</a> by Hao Tan and Mohit Bansal. It’s a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction.</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLxmertForPreTraining.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLxmertForPreTraining.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLxmertForPreTraining.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/lxmert/modeling_tf_lxmert.py#L1301" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_feats<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_pos<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">visual_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">masked_lm_labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">obj_labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">matched_label<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ans<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertTokenizer">LxmertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a> visual_feats &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model)</p> <p>These are currently not provided by the transformers library. visual_pos &#x2014; (<code>tf.Tensor</code> of shape <code>(batch_size, num_visual_features, visual_feat_dim)</code>): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to</p> <ol> <li></li> </ol> <p>These are currently not provided by the transformers library.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.visual_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.visual_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>visual_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; MMask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.masked_lm_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.masked_lm_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>masked_lm_labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code> obj_labels &#x2014; (<code>Dict[Str: Tuple[tf.Tensor, tf.Tensor]]</code>, <em>optional</em>, defaults to <code>None</code>): each key is named after each one of the visual losses and each element of the tuple is of the shape <code>(batch_size, num_features)</code> and <code>(batch_size, num_features, visual_feature_dim)</code> for each the label id and the label score respectively<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.matched_label" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.matched_label"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>matched_label</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates that the sentence does not match the image,</li> <li>1 indicates that the sentence does match the image.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLxmertForPreTraining.call.ans" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLxmertForPreTraining.call.ans"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ans</strong> (<code>Torch.Tensor</code> of shape <code>(batch_size)</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; a one hot representation hof the correct answer <em>optional</em><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFLxmertForPreTraining.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput" >transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.LxmertConfig" >LxmertConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>tf.Tensor</code> of shape <code>(1,)</code>) — Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</li> <li><strong>prediction_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>cross_relationship_score:</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) — Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax).</li> <li><strong>question_answering_score:</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, n_qa_answers)</code>) — Prediction scores of question answering objective (classification).</li> <li><strong>language_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>vision_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for input features + one for the output of each cross-modality layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</li> <li><strong>language_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>vision_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/lxmert#transformers.TFLxmertForPreTraining">TFLxmertForPreTraining</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div></div></div> <script type="module" data-hydrate="1lq0mm2"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1lq0mm2"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/lxmert.mdx-e3257043.js") ], params: {} } }); </script>
215
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/convnext.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;convnext&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.models.convnext.modeling_convnext.ConvNextModelOutput&quot;,&quot;title&quot;:&quot;ConvNeXT specific outputs&quot;},{&quot;local&quot;:&quot;transformers.ConvNextConfig&quot;,&quot;title&quot;:&quot;ConvNextConfig&quot;},{&quot;local&quot;:&quot;transformers.ConvNextFeatureExtractor&quot;,&quot;title&quot;:&quot;ConvNextFeatureExtractor&quot;},{&quot;local&quot;:&quot;transformers.ConvNextModel&quot;,&quot;title&quot;:&quot;ConvNextModel&quot;},{&quot;local&quot;:&quot;transformers.ConvNextForImageClassification&quot;,&quot;title&quot;:&quot;ConvNextForImageClassification&quot;},{&quot;local&quot;:&quot;transformers.TFConvNextModel&quot;,&quot;title&quot;:&quot;TFConvNextModel&quot;},{&quot;local&quot;:&quot;transformers.TFConvNextForImageClassification&quot;,&quot;title&quot;:&quot;TFConvNextForImageClassification&quot;}],&quot;title&quot;:&quot;ConvNeXT&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/convnext.mdx-f632ad47.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="convnext" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#convnext"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvNeXT </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The ConvNeXT model was proposed in <a href="https://arxiv.org/abs/2201.03545" rel="nofollow">A ConvNet for the 2020s</a> by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. ConvNeXT is a pure convolutional model (ConvNet), inspired by the design of Vision Transformers, that claims to outperform them.</p> <p>The abstract from the paper is the following:</p> <p><em>The “Roaring 20s” of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually “modernize” a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.</em></p> <p>Tips:</p> <ul><li>See the code examples below each model regarding usage.</li></ul> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.jpg" alt="drawing" width="600"> <small>ConvNeXT architecture. Taken from the <a href="https://arxiv.org/abs/2201.03545">original paper</a>.</small> <p>This model was contributed by <a href="https://huggingface.co/nielsr" rel="nofollow">nielsr</a>. TensorFlow version of the model was contributed by <a href="https://github.com/ariG23498" rel="nofollow">ariG23498</a>, <a href="https://github.com/gante" rel="nofollow">gante</a>, and <a href="https://github.com/sayakpaul" rel="nofollow">sayakpaul</a> (equal contribution). The original code can be found <a href="https://github.com/facebookresearch/ConvNeXt" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.models.convnext.modeling_convnext.ConvNextModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.convnext.modeling_convnext.ConvNextModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvNeXT specific outputs </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.convnext.modeling_convnext.ConvNextModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.convnext.modeling_convnext.</span><span class="font-semibold">ConvNextModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.convnext.modeling_convnext.ConvNextModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.convnext.modeling_convnext.ConvNextModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_convnext.py#L76" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.convnext.modeling_convnext.ConvNextModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.convnext.modeling_convnext.ConvNextModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Last hidden states (final feature map) of the last stage of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.convnext.modeling_convnext.ConvNextModelOutput.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.convnext.modeling_convnext.ConvNextModelOutput.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.dim[-1])</code>) &#x2014; Global average pooling of the last feature map followed by a layernorm.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.convnext.modeling_convnext.ConvNextModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.convnext.modeling_convnext.ConvNextModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the model at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Class for <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextModel">ConvNextModel</a>’s outputs, with potential hidden states (feature maps).</p></div> <h2 class="relative group"><a id="transformers.ConvNextConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvNextConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvNextConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvNextConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvNextConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvNextConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/configuration_convnext.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_channels<span class="opacity-60"> = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">patch_size<span class="opacity-60"> = 4</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_stages<span class="opacity-60"> = 4</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_sizes<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">depths<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_scale_init_value<span class="opacity-60"> = 1e-06</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">drop_path_rate<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_size<span class="opacity-60"> = 224</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.num_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.num_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The number of input channels.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.patch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.patch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>patch_size</strong> (<code>int</code>, optional, defaults to 4) &#x2014; Patch size to use in the patch embedding layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.num_stages" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.num_stages"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_stages</strong> (<code>int</code>, optional, defaults to 4) &#x2014; The number of stages in the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.hidden_sizes" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.hidden_sizes"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_sizes</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [96, 192, 384, 768]) &#x2014; Dimensionality (hidden size) at each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.depths" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.depths"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>depths</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [3, 3, 9, 3]) &#x2014; Depth (number of blocks) for each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in each block. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.layer_scale_init_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.layer_scale_init_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_scale_init_value</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; The initial value for the layer scale.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextConfig.drop_path_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextConfig.drop_path_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>drop_path_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The drop rate for stochastic depth.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextModel">ConvNextModel</a>. It is used to instantiate an ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXT <a href="https://huggingface.co/facebook/convnext-tiny-224" rel="nofollow">facebook/convnext-tiny-224</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvNextModel, ConvNextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ConvNext convnext-tiny-224 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ConvNextConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the convnext-tiny-224 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvNextModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.ConvNextFeatureExtractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvNextFeatureExtractor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvNextFeatureExtractor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvNextFeatureExtractor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvNextFeatureExtractor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvNextFeatureExtractor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/feature_extraction_convnext.py#L37" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_resize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"> = 224</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resample<span class="opacity-60"> = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">crop_pct<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_normalize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_mean<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_std<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextFeatureExtractor.do_resize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor.do_resize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize (and optionally center crop) the input to a certain <code>size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextFeatureExtractor.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. If 384 or larger, the image is resized to (<code>size</code>, <code>size</code>). Else, the smaller edge of the image will be matched to int(<code>size</code>/ <code>crop_pct</code>), after which the image is cropped to <code>size</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextFeatureExtractor.resample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor.resample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BICUBIC</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextFeatureExtractor.crop_pct" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor.crop_pct"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>crop_pct</strong> (<code>float</code>, <em>optional</em>) &#x2014; The percentage of the image to crop. If <code>None</code>, then a cropping percentage of 224 / 256 is used. Only has an effect if <code>do_resize</code> is set to <code>True</code> and <code>size</code> &lt; 384.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextFeatureExtractor.do_normalize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor.do_normalize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with mean and standard deviation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextFeatureExtractor.image_mean" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor.image_mean"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextFeatureExtractor.image_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextFeatureExtractor.image_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a ConvNeXT feature extractor.</p> <p>This feature extractor inherits from <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin">FeatureExtractionMixin</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p></div> <h2 class="relative group"><a id="transformers.ConvNextModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvNextModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvNextModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvNextModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvNextModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvNextModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_convnext.py#L368" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig">ConvNextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare ConvNext model outputting raw features without any specific head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvNextModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvNextModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvNextModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_convnext.py#L382" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.models.convnext.modeling_convnext.ConvNextModelOutput" >transformers.models.convnext.modeling_convnext.ConvNextModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextModel.forward.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextModel.forward.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code>for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvNextModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.models.convnext.modeling_convnext.ConvNextModelOutput" >transformers.models.convnext.modeling_convnext.ConvNextModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.models.convnext.modeling_convnext.ConvNextModelOutput" >transformers.models.convnext.modeling_convnext.ConvNextModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig" >ConvNextConfig</a>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) — Last hidden states (final feature map) of the last stage of the model.</li> <li><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.dim[-1])</code>) — Global average pooling of the last feature map followed by a layernorm.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the model at the output of each stage.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextModel">ConvNextModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvNextFeatureExtractor, ConvNextModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;huggingface/cats-image&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = dataset[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-string">&quot;image&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ConvNextFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvNextModel.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">768</span>, <span class="hljs-number">7</span>, <span class="hljs-number">7</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ConvNextForImageClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextForImageClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvNextForImageClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvNextForImageClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvNextForImageClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvNextForImageClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvNextForImageClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_convnext.py#L430" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextForImageClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextForImageClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig">ConvNextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvNextForImageClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvNextForImageClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvNextForImageClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_convnext.py#L445" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.models.convnext.modeling_convnext.ConvNextClassifierOutput</code>or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextForImageClassification.forward.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextForImageClassification.forward.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code>for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextForImageClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextForImageClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextForImageClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextForImageClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvNextForImageClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvNextForImageClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvNextForImageClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.models.convnext.modeling_convnext.ConvNextClassifierOutput</code>or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.models.convnext.modeling_convnext.ConvNextClassifierOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig" >ConvNextConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the model at the output of each stage.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextForImageClassification">ConvNextForImageClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvNextFeatureExtractor, ConvNextForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;huggingface/cats-image&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = dataset[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-string">&quot;image&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ConvNextFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvNextForImageClassification.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(model.config.id2label[predicted_label]) tabby, tabby cat<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvNextModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvNextModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvNextModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvNextModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvNextModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvNextModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_tf_convnext.py#L441" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig">ConvNextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare ConvNext model outputting raw features without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvNextModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvNextModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvNextModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_tf_convnext.py#L446" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextModel.call.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextModel.call.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextFeatureExtractor">ConvNextFeatureExtractor</a>. See <code>ConvNextFeatureExtractor.__call__()</code>for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvNextModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig" >ConvNextConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you’re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.TFConvNextModel">TFConvNextModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvNextFeatureExtractor, TFConvNextModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ConvNextFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvNextModel.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvNextForImageClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextForImageClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvNextForImageClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvNextForImageClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvNextForImageClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvNextForImageClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvNextForImageClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_tf_convnext.py#L521" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextForImageClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextForImageClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig">ConvNextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvNextForImageClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvNextForImageClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvNextForImageClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convnext/modeling_tf_convnext.py#L536" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextForImageClassification.call.pixel_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextForImageClassification.call.pixel_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextFeatureExtractor">ConvNextFeatureExtractor</a>. See <code>ConvNextFeatureExtractor.__call__()</code>for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextForImageClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextForImageClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextForImageClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextForImageClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvNextForImageClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvNextForImageClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvNextForImageClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.ConvNextConfig" >ConvNextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convnext#transformers.TFConvNextForImageClassification">TFConvNextForImageClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvNextFeatureExtractor, TFConvNextForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ConvNextFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFViTForImageClassification.from_pretrained(<span class="hljs-string">&quot;facebook/convnext-tiny-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = tf.math.argmax(logits, axis=-<span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[<span class="hljs-built_in">int</span>(predicted_class_idx)])<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="xf0356"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="xf0356"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/convnext.mdx-f632ad47.js") ], params: {} } }); </script>
216
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/imagegpt.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;imagegpt&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.ImageGPTConfig&quot;,&quot;title&quot;:&quot;ImageGPTConfig&quot;},{&quot;local&quot;:&quot;transformers.ImageGPTFeatureExtractor&quot;,&quot;title&quot;:&quot;ImageGPTFeatureExtractor&quot;},{&quot;local&quot;:&quot;transformers.ImageGPTModel&quot;,&quot;title&quot;:&quot;ImageGPTModel&quot;},{&quot;local&quot;:&quot;transformers.ImageGPTForCausalImageModeling&quot;,&quot;title&quot;:&quot;ImageGPTForCausalImageModeling&quot;},{&quot;local&quot;:&quot;transformers.ImageGPTForImageClassification&quot;,&quot;title&quot;:&quot;ImageGPTForImageClassification&quot;}],&quot;title&quot;:&quot;ImageGPT&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/imagegpt.mdx-5a1cdb73.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="imagegpt" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#imagegpt"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageGPT </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The ImageGPT model was proposed in <a href="https://openai.com/blog/image-gpt" rel="nofollow">Generative Pretraining from Pixels</a> by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like model trained to predict the next pixel value, allowing for both unconditional and conditional image generation.</p> <p>The abstract from the paper is the following:</p> <p><em>Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0% top-1 accuracy on a linear probe of our features.</em></p> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/imagegpt_architecture.png" alt="drawing" width="600"> <small>Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf). </small> <p>This model was contributed by <a href="https://huggingface.co/nielsr" rel="nofollow">nielsr</a>, based on <a href="https://github.com/openai/image-gpt/issues/7" rel="nofollow">this issue</a>. The original code can be found <a href="https://github.com/openai/image-gpt" rel="nofollow">here</a>.</p> <p>Tips:</p> <ul><li>Demo notebooks for ImageGPT can be found <a href="https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ImageGPT" rel="nofollow">here</a>.</li> <li>ImageGPT is almost exactly the same as <a href="gpt2">GPT-2</a>, with the exception that a different activation function is used (namely “quick gelu”), and the layer normalization layers don’t mean center the inputs. ImageGPT also doesn’t have tied input- and output embeddings.</li> <li>As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special “start of sentence” (SOS) token, used at the beginning of every sequence. One can use <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor">ImageGPTFeatureExtractor</a> to prepare images for the model.</li> <li>Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly performant image features useful for downstream tasks, such as image classification. The authors showed that the features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as a sklearn logistic regression model for example). This is also referred to as “linear probing”. Features can be easily obtained by first forwarding the image through the model, then specifying <code>output_hidden_states=True</code>, and then average-pool the hidden states at whatever layer you like.</li> <li>Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can use <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification">ImageGPTForImageClassification</a>.</li> <li>ImageGPT comes in different sizes: there’s ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also train an XL variant, which they didn’t release. The differences in size are summarized in the following table:</li></ul> <table><thead><tr><th><strong>Model variant</strong></th> <th><strong>Depths</strong></th> <th><strong>Hidden sizes</strong></th> <th><strong>Decoder hidden size</strong></th> <th><strong>Params (M)</strong></th> <th><strong>ImageNet-1k Top 1</strong></th></tr></thead> <tbody><tr><td>MiT-b0</td> <td>[2, 2, 2, 2]</td> <td>[32, 64, 160, 256]</td> <td>256</td> <td>3.7</td> <td>70.5</td></tr> <tr><td>MiT-b1</td> <td>[2, 2, 2, 2]</td> <td>[64, 128, 320, 512]</td> <td>256</td> <td>14.0</td> <td>78.7</td></tr> <tr><td>MiT-b2</td> <td>[3, 4, 6, 3]</td> <td>[64, 128, 320, 512]</td> <td>768</td> <td>25.4</td> <td>81.6</td></tr> <tr><td>MiT-b3</td> <td>[3, 4, 18, 3]</td> <td>[64, 128, 320, 512]</td> <td>768</td> <td>45.2</td> <td>83.1</td></tr> <tr><td>MiT-b4</td> <td>[3, 8, 27, 3]</td> <td>[64, 128, 320, 512]</td> <td>768</td> <td>62.6</td> <td>83.6</td></tr> <tr><td>MiT-b5</td> <td>[3, 6, 40, 3]</td> <td>[64, 128, 320, 512]</td> <td>768</td> <td>82.0</td> <td>83.8</td></tr></tbody></table> <h2 class="relative group"><a id="transformers.ImageGPTConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageGPTConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageGPTConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageGPTConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/configuration_imagegpt.py#L30" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 513</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_positions<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_embd<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_layer<span class="opacity-60"> = 24</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_head<span class="opacity-60"> = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_inner<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_function<span class="opacity-60"> = &#39;quick_gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resid_pdrop<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">embd_pdrop<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attn_pdrop<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_epsilon<span class="opacity-60"> = 1e-05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_attn_weights<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tie_word_embeddings<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_attn_by_inverse_layer_idx<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">reorder_and_upcast_attn<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTModel">ImageGPTModel</a> or <code>TFImageGPTModel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.n_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.n_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 32*32) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.n_embd" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.n_embd"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_embd</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the embeddings and hidden states.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.n_layer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.n_layer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_layer</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.n_head" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.n_head"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_head</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.n_inner" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.n_inner"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_inner</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; Dimensionality of the inner feed-forward layers. <code>None</code> will set it to 4 times n_embd<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.activation_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.activation_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_function</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;quick_gelu&quot;</code>) &#x2014; Activation function (can be one of the activation functions defined in src/transformers/activations.py). Defaults to &#x201C;quick_gelu&#x201D;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.resid_pdrop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.resid_pdrop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resid_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.embd_pdrop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.embd_pdrop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>embd_pdrop</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.attn_pdrop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.attn_pdrop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attn_pdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.layer_norm_epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.layer_norm_epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon to use in the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.scale_attn_weights" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.scale_attn_weights"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_attn_weights</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Scale attention weights by dividing by sqrt(hidden_size)..<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.scale_attn_by_inverse_layer_idx" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.scale_attn_by_inverse_layer_idx"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_attn_by_inverse_layer_idx</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to additionally scale attention weights by <code>1 / layer_idx + 1</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTConfig.reorder_and_upcast_attn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTConfig.reorder_and_upcast_attn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>reorder_and_upcast_attn</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention dot-product/softmax to float() when training with mixed precision.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTModel">ImageGPTModel</a> or a <code>TFImageGPTModel</code>. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT <a href="https://huggingface.co/imagegpt" rel="nofollow">small</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTModel, ImageGPTConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ImageGPT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ImageGPTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.ImageGPTFeatureExtractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageGPTFeatureExtractor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTFeatureExtractor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageGPTFeatureExtractor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageGPTFeatureExtractor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTFeatureExtractor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/feature_extraction_imagegpt.py#L46" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clusters<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_resize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resample<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_normalize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTFeatureExtractor.clusters" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor.clusters"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clusters</strong> (<code>np.ndarray</code>) &#x2014; The color clusters to use, as a <code>np.ndarray</code> of shape <code>(n_clusters, 3)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTFeatureExtractor.do_resize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor.do_resize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTFeatureExtractor.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 32) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTFeatureExtractor.resample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor.resample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTFeatureExtractor.do_normalize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor.do_normalize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input to the range between -1 and +1.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs an ImageGPT feature extractor. This feature extractor can be used to resize images to a smaller resolution (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of “pixel values” (color clusters).</p> <p>This feature extractor inherits from <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin">FeatureExtractionMixin</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTFeatureExtractor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageGPTFeatureExtractor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTFeatureExtractor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/feature_extraction_imagegpt.py#L97" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef(&#39;torch.Tensor&#39;), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef(&#39;torch.Tensor&#39;)]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTFeatureExtractor.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTFeatureExtractor.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTFeatureExtractor.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ImageGPTFeatureExtractor.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> — Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to prepare for the model one or several image(s).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.</p></div></div></div> <h2 class="relative group"><a id="transformers.ImageGPTModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageGPTModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageGPTModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageGPTModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/modeling_imagegpt.py#L620" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: ImageGPTConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare ImageGPT Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageGPTModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/modeling_imagegpt.py#L655" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.Tensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor">ImageGPTFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor.__call__">ImageGPTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTModel.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTModel.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ImageGPTModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTConfig" >ImageGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTModel">ImageGPTModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTFeatureExtractor, ImageGPTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ImageGPTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;openai/imagegpt-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTModel.from_pretrained(<span class="hljs-string">&quot;openai/imagegpt-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ImageGPTForCausalImageModeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageGPTForCausalImageModeling </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTForCausalImageModeling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageGPTForCausalImageModeling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageGPTForCausalImageModeling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTForCausalImageModeling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/modeling_imagegpt.py#L900" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: ImageGPTConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTForCausalImageModeling.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageGPTForCausalImageModeling.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTForCausalImageModeling.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/modeling_imagegpt.py#L948" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.Tensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor">ImageGPTFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor.__call__">ImageGPTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForCausalImageModeling.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForCausalImageModeling.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for language modeling. Note that the labels <strong>are shifted</strong> inside the model, i.e. you can set <code>labels = input_ids</code> Indices are selected in <code>[-100, 0, ..., config.vocab_size]</code> All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ImageGPTForCausalImageModeling.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTConfig" >ImageGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTForCausalImageModeling">ImageGPTForCausalImageModeling</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTFeatureExtractor, ImageGPTForCausalImageModeling <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> matplotlib.pyplot <span class="hljs-keyword">as</span> plt <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ImageGPTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;openai/imagegpt-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTForCausalImageModeling.from_pretrained(<span class="hljs-string">&quot;openai/imagegpt-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>device = torch.device(<span class="hljs-string">&quot;cuda&quot;</span> <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;cpu&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.to(device) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># unconditional generation of 8 images</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">8</span> <span class="hljs-meta">&gt;&gt;&gt; </span>context = torch.full((batch_size, <span class="hljs-number">1</span>), model.config.vocab_size - <span class="hljs-number">1</span>) <span class="hljs-comment"># initialize with SOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>context = torch.tensor(context).to(device) <span class="hljs-meta">&gt;&gt;&gt; </span>output = model.generate( <span class="hljs-meta">... </span> input_ids=context, max_length=model.config.n_positions + <span class="hljs-number">1</span>, temperature=<span class="hljs-number">1.0</span>, do_sample=<span class="hljs-literal">True</span>, top_k=<span class="hljs-number">40</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>clusters = feature_extractor.clusters <span class="hljs-meta">&gt;&gt;&gt; </span>n_px = feature_extractor.size <span class="hljs-meta">&gt;&gt;&gt; </span>samples = output[:, <span class="hljs-number">1</span>:].cpu().detach().numpy() <span class="hljs-meta">&gt;&gt;&gt; </span>samples_img = [ <span class="hljs-meta">... </span> np.reshape(np.rint(<span class="hljs-number">127.5</span> * (clusters[s] + <span class="hljs-number">1.0</span>)), [n_px, n_px, <span class="hljs-number">3</span>]).astype(np.uint8) <span class="hljs-keyword">for</span> s <span class="hljs-keyword">in</span> samples <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-comment"># convert color cluster tokens back to pixels</span> <span class="hljs-meta">&gt;&gt;&gt; </span>f, axes = plt.subplots(<span class="hljs-number">1</span>, batch_size, dpi=<span class="hljs-number">300</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> img, ax <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(samples_img, axes): <span class="hljs-meta">... </span> ax.axis(<span class="hljs-string">&quot;off&quot;</span>) <span class="hljs-meta">... </span> ax.imshow(img)<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ImageGPTForImageClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageGPTForImageClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTForImageClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageGPTForImageClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageGPTForImageClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTForImageClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/modeling_imagegpt.py#L1088" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: ImageGPTConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTConfig">ImageGPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The ImageGPT Model transformer with an image classification head on top (linear layer). <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification">ImageGPTForImageClassification</a> average-pools the hidden states in order to do the classification.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageGPTForImageClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageGPTForImageClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageGPTForImageClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/imagegpt/modeling_imagegpt.py#L1100" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.Tensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code>or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; <code>input_ids_length</code> = <code>sequence_length</code> if <code>past_key_values</code> is <code>None</code> else <code>past_key_values[0][0].shape[-2]</code> (<code>sequence_length</code> of input past key value states). Indices of input sequence tokens in the vocabulary.</p> <p>If <code>past_key_values</code> is used, only <code>input_ids</code> that do not have their past calculated should be passed as <code>input_ids</code>.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor">ImageGPTFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTFeatureExtractor.__call__">ImageGPTFeatureExtractor.<strong>call</strong>()</a> for details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Tuple[Tuple[torch.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see <code>past_key_values</code> output below). Can be used to speed up sequential decoding. The <code>input_ids</code> which have their past given to this model should not be passed as <code>input_ids</code> as they have already been computed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>inputs_embeds</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageGPTForImageClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageGPTForImageClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ImageGPTForImageClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code>or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTConfig" >ImageGPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/imagegpt#transformers.ImageGPTForImageClassification">ImageGPTForImageClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ImageGPTFeatureExtractor, ImageGPTForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ImageGPTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;openai/imagegpt-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ImageGPTForImageClassification.from_pretrained(<span class="hljs-string">&quot;openai/imagegpt-small&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1p9fllt"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1p9fllt"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/imagegpt.mdx-5a1cdb73.js") ], params: {} } }); </script>
217
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/trocr.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;trocr&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;inference&quot;,&quot;title&quot;:&quot;Inference&quot;},{&quot;local&quot;:&quot;transformers.TrOCRConfig&quot;,&quot;title&quot;:&quot;TrOCRConfig&quot;},{&quot;local&quot;:&quot;transformers.TrOCRProcessor&quot;,&quot;title&quot;:&quot;TrOCRProcessor&quot;},{&quot;local&quot;:&quot;transformers.TrOCRForCausalLM&quot;,&quot;title&quot;:&quot;TrOCRForCausalLM&quot;}],&quot;title&quot;:&quot;TrOCR&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/trocr.mdx-6100bb6b.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="trocr" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trocr"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrOCR </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The TrOCR model was proposed in <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. TrOCR consists of an image Transformer encoder and an autoregressive text Transformer decoder to perform <a href="https://en.wikipedia.org/wiki/Optical_character_recognition" rel="nofollow">optical character recognition (OCR)</a>.</p> <p>The abstract from the paper is the following:</p> <p><em>Text recognition is a long-standing research problem for document digitalization. Existing approaches for text recognition are usually built based on CNN for image understanding and RNN for char-level text generation. In addition, another language model is usually needed to improve the overall accuracy as a post-processing step. In this paper, we propose an end-to-end text recognition approach with pre-trained image Transformer and text Transformer models, namely TrOCR, which leverages the Transformer architecture for both image understanding and wordpiece-level text generation. The TrOCR model is simple but effective, and can be pre-trained with large-scale synthetic data and fine-tuned with human-labeled datasets. Experiments show that the TrOCR model outperforms the current state-of-the-art models on both printed and handwritten text recognition tasks.</em></p> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/trocr_architecture.jpg" alt="drawing" width="600"> <small>TrOCR architecture. Taken from the <a href="https://arxiv.org/abs/2109.10282">original paper</a>. </small> <p>Please refer to the <code>VisionEncoderDecoder</code> class on how to use this model.</p> <p>This model was contributed by <a href="https://huggingface.co/nielsr" rel="nofollow">nielsr</a>. The original code can be found <a href="https://github.com/microsoft/unilm/tree/6f60612e7cc86a2a1ae85c47231507a587ab4e01/trocr" rel="nofollow">here</a>.</p> <p>Tips:</p> <ul><li>The quickest way to get started with TrOCR is by checking the <a href="https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR" rel="nofollow">tutorial notebooks</a>, which show how to use the model at inference time as well as fine-tuning on custom data.</li> <li>TrOCR is pre-trained in 2 stages before being fine-tuned on downstream datasets. It achieves state-of-the-art results on both printed (e.g. the <a href="https://paperswithcode.com/dataset/sroie" rel="nofollow">SROIE dataset</a> and handwritten (e.g. the <a href="https://fki.tic.heia-fr.ch/databases/iam-handwriting-database%3E" rel="nofollow">IAM Handwriting dataset</a> text recognition tasks. For more information, see the <a href="https://huggingface.co/models?other=trocr%3E" rel="nofollow">official models</a>.</li> <li>TrOCR is always used within the <a href="vision-encoder-decoder">VisionEncoderDecoder</a> framework.</li></ul> <h2 class="relative group"><a id="inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Inference </span></h2> <p>TrOCR’s <code>VisionEncoderDecoder</code> model accepts images as input and makes use of <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a> to autoregressively generate text given the input image.</p> <p>The [<code>ViTFeatureExtractor</code>/<code>DeiTFeatureExtractor</code>] class is responsible for preprocessing the input image and [<code>RobertaTokenizer</code>/<code>XLMRobertaTokenizer</code>] decodes the generated target tokens to the target string. The <a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor">TrOCRProcessor</a> wraps [<code>ViTFeatureExtractor</code>/<code>DeiTFeatureExtractor</code>] and [<code>RobertaTokenizer</code>/<code>XLMRobertaTokenizer</code>] into a single instance to both extract the input features and decode the predicted token ids.</p> <ul><li>Step-by-step Optical Character Recognition (OCR)</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrOCRProcessor, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>processor = TrOCRProcessor.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load image from the IAM dataset </span> <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(pixel_values) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_text = processor.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <!-- HTML_TAG_END --></pre></div> <p>See the <a href="https://huggingface.co/models?filter=trocr" rel="nofollow">model hub</a> to look for TrOCR checkpoints.</p> <h2 class="relative group"><a id="transformers.TrOCRConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrOCRConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrOCRConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrOCRConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/configuration_trocr.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 50265</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d_model<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_function<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_std<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_layerdrop<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_embedding<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_learned_position_embeddings<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layernorm_embedding<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRForCausalLM">TrOCRForCausalLM</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.d_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.d_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.decoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.decoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.decoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.decoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.decoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.decoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.activation_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.activation_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.activation_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.activation_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.classifier_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.classifier_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.init_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.init_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.scale_embedding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.scale_embedding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to scale the word embeddings by sqrt(d_model).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.use_learned_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.use_learned_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_learned_position_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRConfig.layernorm_embedding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRConfig.layernorm_embedding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layernorm_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a layernorm after the word + position embeddings.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRForCausalLM">TrOCRForCausalLM</a>. It is used to instantiate an TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrOCR <a href="https://huggingface.co/microsoft/trocr-base" rel="nofollow">microsoft/trocr-base</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrOCRForCausalLM, TrOCRConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a TrOCR-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = TrOCRConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the TrOCR-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TrOCRForCausalLM(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.TrOCRProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrOCRProcessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrOCRProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrOCRProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L23" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRProcessor.feature_extractor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRProcessor.feature_extractor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_extractor</strong> ([<code>ViTFeatureExtractor</code>/<code>DeiTFeatureExtractor</code>]) &#x2014; An instance of [<code>ViTFeatureExtractor</code>/<code>DeiTFeatureExtractor</code>]. The feature extractor is a required input.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRProcessor.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRProcessor.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> ([<code>RobertaTokenizer</code>/<code>XLMRobertaTokenizer</code>]) &#x2014; An instance of [<code>RobertaTokenizer</code>/<code>XLMRobertaTokenizer</code>]. The tokenizer is a required input.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a TrOCR processor which wraps a vision feature extractor and a TrOCR tokenizer into a single processor.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor">TrOCRProcessor</a> offers all the functionalities of [<code>ViTFeatureExtractor</code>/<code>DeiTFeatureExtractor</code>] and [<code>RobertaTokenizer</code>/<code>XLMRobertaTokenizer</code>]. See the <a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor.__call__"><strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor.decode">decode()</a> for more information.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrOCRProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L44" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor’s <code>__call__()</code>and returns its output. If used in the context <a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor.as_target_processor">as_target_processor()</a> this method forwards all its arguments to TrOCRTokenizer’s <code>__call__</code>. Please refer to the doctsring of the above two methods for more information.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L157" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and <code>from_pretrained</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a processor associated with a pretrained model.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This class method is simply calling the feature extractor <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and the tokenizer <code>from_pretrained</code> methods. Please refer to the docstrings of the methods above for more information.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your processor to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Saves the attributes of this processor (feature extractor, tokenizer…) in the specified directory so that it can be reloaded using the <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.ProcessorMixin.from_pretrained">from_pretrained()</a> method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This class method is simply calling <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> and <code>save_pretrained</code>. Please refer to the docstrings of the methods above for more information.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRProcessor.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrOCRProcessor.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRProcessor.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This method forwards all its arguments to TrOCRTokenizer’s <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode">batch_decode()</a>. Please refer to the docstring of this method for more information.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRProcessor.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrOCRProcessor.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRProcessor.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L60" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This method forwards all its arguments to TrOCRTokenizer’s <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode">decode()</a>. Please refer to the docstring of this method for more information.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRProcessor.as_target_processor"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>as_target_processor</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrOCRProcessor.as_target_processor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRProcessor.as_target_processor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L67" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.</p></div></div> <h2 class="relative group"><a id="transformers.TrOCRForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrOCRForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrOCRForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrOCRForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/modeling_trocr.py#L776" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRConfig">TrOCRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The TrOCR Decoder with a language modeling head. Can be used as the decoder part of <a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> and <code>VisionEncoderDecoder</code>. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrOCRForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrOCRForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrOCRForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/modeling_trocr.py#L807" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <code>TrOCRTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrOCRForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrOCRForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TrOCRForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRConfig" >TrOCRConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> VisionEncoderDecoderModel, TrOCRForCausalLM, ViTModel, TrOCRConfig, ViTConfig <span class="hljs-meta">&gt;&gt;&gt; </span>encoder = ViTModel(ViTConfig()) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = TrOCRForCausalLM(TrOCRConfig()) <span class="hljs-comment"># init vision2text model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1v2vvr"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1v2vvr"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/trocr.mdx-6100bb6b.js") ], params: {} } }); </script>
218
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/dit.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;dit&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;}],&quot;title&quot;:&quot;DiT&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/dit.mdx-28d290cd.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="dit" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#dit"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DiT </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>DiT was proposed in <a href="https://arxiv.org/abs/2203.02378" rel="nofollow">DiT: Self-supervised Pre-training for Document Image Transformer</a> by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. DiT applies the self-supervised objective of <a href="beit">BEiT</a> (BERT pre-training of Image Transformers) to 42 million document images, allowing for state-of-the-art results on tasks including:</p> <ul><li>document image classification: the <a href="https://www.cs.cmu.edu/~aharley/rvl-cdip/" rel="nofollow">RVL-CDIP</a> dataset (a collection of 400,000 images belonging to one of 16 classes).</li> <li>document layout analysis: the <a href="https://github.com/ibm-aur-nlp/PubLayNet" rel="nofollow">PubLayNet</a> dataset (a collection of more than 360,000 document images constructed by automatically parsing PubMed XML files).</li> <li>table detection: the <a href="https://github.com/cndplab-founder/ICDAR2019_cTDaR" rel="nofollow">ICDAR 2019 cTDaR</a> dataset (a collection of 600 training images and 240 testing images).</li></ul> <p>The abstract from the paper is the following:</p> <p><em>Image Transformer has recently achieved significant progress for natural image understanding, either using supervised (ViT, DeiT, etc.) or self-supervised (BEiT, MAE, etc.) pre-training techniques. In this paper, we propose DiT, a self-supervised pre-trained Document Image Transformer model using large-scale unlabeled text images for Document AI tasks, which is essential since no supervised counterparts ever exist due to the lack of human labeled document images. We leverage DiT as the backbone network in a variety of vision-based Document AI tasks, including document image classification, document layout analysis, as well as table detection. Experiment results have illustrated that the self-supervised pre-trained DiT model achieves new state-of-the-art results on these downstream tasks, e.g. document image classification (91.11 → 92.69), document layout analysis (91.0 → 94.9) and table detection (94.23 → 96.55). </em></p> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dit_architecture.jpg" alt="drawing" width="600"> <small>Summary of the approach. Taken from the [original paper](https://arxiv.org/abs/2203.02378). </small> <p>One can directly use the weights of DiT with the AutoModel API:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel model = AutoModel.from_pretrained(<span class="hljs-string">&quot;microsoft/dit-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>This will load the model pre-trained on masked image modeling. Note that this won’t include the language modeling head on top, used to predict visual tokens.</p> <p>To include the head, you can load the weights into a <code>BeitForMaskedImageModeling</code> model, like so:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BeitForMaskedImageModeling model = BeitForMaskedImageModeling.from_pretrained(<span class="hljs-string">&quot;microsoft/dit-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>You can also load a fine-tuned model from the <a href="https://huggingface.co/models?other=dit" rel="nofollow">hub</a>, like so:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/dit-base-finetuned-rvlcdip&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>This particular checkpoint was fine-tuned on <a href="https://www.cs.cmu.edu/~aharley/rvl-cdip/" rel="nofollow">RVL-CDIP</a>, an important benchmark for document image classification. A notebook that illustrates inference for document image classification can be found <a href="https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DiT/Inference_with_DiT_(Document_Image_Transformer)_for_document_image_classification.ipynb" rel="nofollow">here</a>.</p> <p>As DiT’s architecture is equivalent to that of BEiT, one can refer to <a href="beit">BEiT’s documentation page</a> for all tips, code examples and notebooks.</p> <p>This model was contributed by <a href="https://huggingface.co/nielsr" rel="nofollow">nielsr</a>. The original code can be found <a href="https://github.com/microsoft/unilm/tree/master/dit" rel="nofollow">here</a>.</p> <script type="module" data-hydrate="1opnpd3"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1opnpd3"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/dit.mdx-28d290cd.js") ], params: {} } }); </script>
219
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/deberta.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;deberta&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.DebertaConfig&quot;,&quot;title&quot;:&quot;DebertaConfig&quot;},{&quot;local&quot;:&quot;transformers.DebertaTokenizer&quot;,&quot;title&quot;:&quot;DebertaTokenizer&quot;},{&quot;local&quot;:&quot;transformers.DebertaTokenizerFast&quot;,&quot;title&quot;:&quot;DebertaTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.DebertaModel&quot;,&quot;title&quot;:&quot;DebertaModel&quot;},{&quot;local&quot;:&quot;transformers.DebertaPreTrainedModel&quot;,&quot;title&quot;:&quot;DebertaPreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.DebertaForMaskedLM&quot;,&quot;title&quot;:&quot;DebertaForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.DebertaForSequenceClassification&quot;,&quot;title&quot;:&quot;DebertaForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.DebertaForTokenClassification&quot;,&quot;title&quot;:&quot;DebertaForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.DebertaForQuestionAnswering&quot;,&quot;title&quot;:&quot;DebertaForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFDebertaModel&quot;,&quot;title&quot;:&quot;TFDebertaModel&quot;},{&quot;local&quot;:&quot;transformers.TFDebertaPreTrainedModel&quot;,&quot;title&quot;:&quot;TFDebertaPreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.TFDebertaForMaskedLM&quot;,&quot;title&quot;:&quot;TFDebertaForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.TFDebertaForSequenceClassification&quot;,&quot;title&quot;:&quot;TFDebertaForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.TFDebertaForTokenClassification&quot;,&quot;title&quot;:&quot;TFDebertaForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.TFDebertaForQuestionAnswering&quot;,&quot;title&quot;:&quot;TFDebertaForQuestionAnswering&quot;}],&quot;title&quot;:&quot;DeBERTa&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/deberta.mdx-e5952704.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="deberta" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deberta"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DeBERTa </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google’s BERT model released in 2018 and Facebook’s RoBERTa model released in 2019.</p> <p>It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa.</p> <p>The abstract from the paper is the following:</p> <p><em>Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at <a href="https://github.com/microsoft/DeBERTa" rel="nofollow">https://github.com/microsoft/DeBERTa</a>.</em></p> <p>This model was contributed by <a href="https://huggingface.co/DeBERTa" rel="nofollow">DeBERTa</a>. This model TF 2.0 implementation was contributed by <a href="https://huggingface.co/kamalkraj" rel="nofollow">kamalkraj</a> . The original code can be found <a href="https://github.com/microsoft/DeBERTa" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.DebertaConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/configuration_deberta.py#L33" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 50265</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-07</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">relative_attention<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_relative_positions<span class="opacity-60"> = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_biased_input<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pos_att_type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_dropout<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code>, <code>&quot;gelu&quot;</code>, <code>&quot;tanh&quot;</code>, <code>&quot;gelu_fast&quot;</code>, <code>&quot;mish&quot;</code>, <code>&quot;linear&quot;</code>, <code>&quot;sigmoid&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.relative_attention" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.relative_attention"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>relative_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether use relative position encoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.max_relative_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.max_relative_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_relative_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The range of relative positions <code>[-max_position_embeddings, max_position_embeddings]</code>. Use the same value as <code>max_position_embeddings</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The value used to pad input_ids.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.position_biased_input" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.position_biased_input"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_biased_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether add absolute position embedding to content embedding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.pos_att_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.pos_att_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pos_att_type</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The type of relative position attention, it can be a combination of <code>[&quot;p2c&quot;, &quot;c2p&quot;]</code>, e.g. <code>[&quot;p2c&quot;]</code>, <code>[&quot;p2c&quot;, &quot;c2p&quot;]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, optional, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> or a <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a>. It is used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa <a href="https://huggingface.co/microsoft/deberta-base" rel="nofollow">microsoft/deberta-base</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p></div> <h2 class="relative group"><a id="transformers.DebertaTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/tokenization_deberta.py#L62" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/tokenization_deberta.py#L133" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:</p> <ul><li>single sequence: [CLS] X [SEP]</li> <li>pair of sequences: [CLS] A [SEP] B [SEP]</li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/tokenization_deberta.py#L158" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> or <code>encode_plus</code> methods.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/tokenization_deberta.py#L185" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |<!-- HTML_TAG_END --></pre></div> <p>If <code>token_ids_1</code> is <code>None</code>, this method only returns the first portion of the mask (0s).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.GPT2Tokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.GPT2Tokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.GPT2Tokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/gpt2/tokenization_gpt2.py#L269" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.DebertaTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/tokenization_deberta_fast.py#L63" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">merges_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">errors<span class="opacity-60"> = &#39;replace&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_prefix_space<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a “fast” DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece. It is backed by HuggingFace’s <em>tokenizers</em> library.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaTokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaTokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaTokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/tokenization_deberta_fast.py#L153" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaTokenizerFast.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format:</p> <ul><li>single sequence: [CLS] X [SEP]</li> <li>pair of sequences: [CLS] A [SEP] B [SEP]</li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/tokenization_deberta_fast.py#L178" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaTokenizerFast.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0 0 | first sequence | second sequence |<!-- HTML_TAG_END --></pre></div> <p>If <code>token_ids_1</code> is <code>None</code>, this method only returns the first portion of the mask (0s).</p></div></div> <h2 class="relative group"><a id="transformers.DebertaModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L883" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.```</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L907" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaModel">DebertaModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaModel.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.DebertaPreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaPreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaPreTrainedModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaPreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaPreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaPreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaPreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L788" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.</p></div> <h2 class="relative group"><a id="transformers.DebertaForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L995" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model with a <code>language modeling</code> head on top. The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.```</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L1014" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaForMaskedLM">DebertaForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.DebertaForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L1130" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.```</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L1155" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaForSequenceClassification">DebertaForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.DebertaForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L1249" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.```</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L1263" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaForTokenClassification">DebertaForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.DebertaForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DebertaForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DebertaForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DebertaForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L1325" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.```</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DebertaForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.DebertaForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DebertaForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_deberta.py#L1338" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DebertaForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DebertaForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.DebertaForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaForQuestionAnswering">DebertaForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, DebertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DebertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;microsoft/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDebertaModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDebertaModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDebertaModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDebertaModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1088" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDebertaModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1094" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDebertaModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaModel">TFDebertaModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaModel.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDebertaPreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaPreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDebertaPreTrainedModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaPreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDebertaPreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDebertaPreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaPreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L993" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/keras/engine/training.py#L450" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Calls the model on new inputs and returns the outputs as tensors.</p> <p>In this case <code>call()</code> just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs).</p> <p>Note: This method should not be called directly. It is only meant to be overridden when subclassing <code>tf.keras.Model</code>. To call a model on an input, always use the <code>__call__()</code> method, i.e. <code>model(inputs)</code>, which relies on the underlying <code>call()</code> method.</p></div></div> <h2 class="relative group"><a id="transformers.TFDebertaForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDebertaForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDebertaForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1150" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model with a <code>language modeling</code> head on top. The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForMaskedLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForMaskedLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForMaskedLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1166" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForMaskedLM.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForMaskedLM.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDebertaForMaskedLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaForMaskedLM">TFDebertaForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDebertaForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDebertaForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDebertaForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForSequenceClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForSequenceClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForSequenceClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1270" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForSequenceClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForSequenceClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDebertaForSequenceClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaForSequenceClassification">TFDebertaForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDebertaForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDebertaForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDebertaForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1355" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForTokenClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForTokenClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForTokenClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1367" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForTokenClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForTokenClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDebertaForTokenClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaForTokenClassification">TFDebertaForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape( <span class="hljs-meta">... </span> tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids)) <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFDebertaForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFDebertaForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFDebertaForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1448" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig">DebertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>The DeBERTa model was proposed in <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It’s build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFDebertaForQuestionAnswering.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFDebertaForQuestionAnswering.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFDebertaForQuestionAnswering.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/deberta/modeling_tf_deberta.py#L1459" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaTokenizer">DebertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a [`~transformers.file_utils.ModelOutput&#x201C;] instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFDebertaForQuestionAnswering.call.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFDebertaForQuestionAnswering.call.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFDebertaForQuestionAnswering.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.DebertaConfig" >DebertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/deberta#transformers.TFDebertaForQuestionAnswering">TFDebertaForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DebertaTokenizer, TFDebertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = DebertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFDebertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;kamalkraj/deberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&quot; &quot;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span>])<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="xdp5w0"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="xdp5w0"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/deberta.mdx-e5952704.js") ], params: {} } }); </script>
220
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/convbert.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;convbert&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.ConvBertConfig&quot;,&quot;title&quot;:&quot;ConvBertConfig&quot;},{&quot;local&quot;:&quot;transformers.ConvBertTokenizer&quot;,&quot;title&quot;:&quot;ConvBertTokenizer&quot;},{&quot;local&quot;:&quot;transformers.ConvBertTokenizerFast&quot;,&quot;title&quot;:&quot;ConvBertTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.ConvBertModel&quot;,&quot;title&quot;:&quot;ConvBertModel&quot;},{&quot;local&quot;:&quot;transformers.ConvBertForMaskedLM&quot;,&quot;title&quot;:&quot;ConvBertForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.ConvBertForSequenceClassification&quot;,&quot;title&quot;:&quot;ConvBertForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.ConvBertForMultipleChoice&quot;,&quot;title&quot;:&quot;ConvBertForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.ConvBertForTokenClassification&quot;,&quot;title&quot;:&quot;ConvBertForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.ConvBertForQuestionAnswering&quot;,&quot;title&quot;:&quot;ConvBertForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFConvBertModel&quot;,&quot;title&quot;:&quot;TFConvBertModel&quot;},{&quot;local&quot;:&quot;transformers.TFConvBertForMaskedLM&quot;,&quot;title&quot;:&quot;TFConvBertForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.TFConvBertForSequenceClassification&quot;,&quot;title&quot;:&quot;TFConvBertForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.TFConvBertForMultipleChoice&quot;,&quot;title&quot;:&quot;TFConvBertForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.TFConvBertForTokenClassification&quot;,&quot;title&quot;:&quot;TFConvBertForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.TFConvBertForQuestionAnswering&quot;,&quot;title&quot;:&quot;TFConvBertForQuestionAnswering&quot;}],&quot;title&quot;:&quot;ConvBERT&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/convbert.mdx-b107085e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="convbert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#convbert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBERT </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The ConvBERT model was proposed in <a href="https://arxiv.org/abs/2008.02496" rel="nofollow">ConvBERT: Improving BERT with Span-based Dynamic Convolution</a> by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.</p> <p>The abstract from the paper is the following:</p> <p><em>Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost. Code and pre-trained models will be released.</em></p> <p>ConvBERT training tips are similar to those of BERT.</p> <p>This model was contributed by <a href="https://huggingface.co/abhishek" rel="nofollow">abhishek</a>. The original implementation can be found here: <a href="https://github.com/yitu-opensource/ConvBert" rel="nofollow">https://github.com/yitu-opensource/ConvBert</a></p> <h2 class="relative group"><a id="transformers.ConvBertConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/configuration_convbert.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 30522</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">embedding_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_ratio<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_kernel_size<span class="opacity-60"> = 9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_groups<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertModel">ConvBertModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertModel">TFConvBertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertModel">ConvBertModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertModel">TFConvBertModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.head_ratio" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.head_ratio"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_ratio</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Ratio gamma to reduce the number of attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.num_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.num_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of groups for grouped linear layers for ConvBert model<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.conv_kernel_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.conv_kernel_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 9) &#x2014; The size of the convolutional kernel.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertConfig.classifier_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertConfig.classifier_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertModel">ConvBertModel</a>. It is used to instantiate an ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvBERT <a href="https://huggingface.co/YituTech/conv-bert-base" rel="nofollow">conv-bert-base</a> architecture. Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertModel, ConvBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ConvBERT convbert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ConvBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the convbert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.ConvBertTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/tokenization_convbert.py#L46" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a ConvBERT tokenizer. <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> for usage examples and documentation concerning parameters.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BertTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L248" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BertTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:</p> <ul><li>single sequence: <code>[CLS] X [SEP]</code></li> <li>pair of sequences: <code>[CLS] A [SEP] B [SEP]</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.BertTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L273" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BertTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.BertTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L301" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BertTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |<!-- HTML_TAG_END --></pre></div> <p>If <code>token_ids_1</code> is <code>None</code>, this method only returns the first portion of the mask (0s).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.BertTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L330" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.ConvBertTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/tokenization_convbert_fast.py#L47" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a “fast” ConvBERT tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizerFast">ConvBertTokenizerFast</a> is identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece.</p> <p>Refer to superclass <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> for usage examples and documentation concerning parameters.</p></div> <h2 class="relative group"><a id="transformers.ConvBertModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L766" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvBertModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L793" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvBertModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertModel">ConvBertModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertModel.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ConvBertForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L880" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a <code>language modeling</code> head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvBertForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L897" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvBertForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertForMaskedLM">ConvBertForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForMaskedLM.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ConvBertForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L990" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvBertForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L1001" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvBertForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertForSequenceClassification">ConvBertForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ConvBertForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L1086" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForMultipleChoice.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvBertForMultipleChoice.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForMultipleChoice.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L1097" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForMultipleChoice.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMultipleChoice.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForMultipleChoice.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForMultipleChoice.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvBertForMultipleChoice.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertForMultipleChoice">ConvBertForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ConvBertForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L1180" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvBertForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L1195" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvBertForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertForTokenClassification">ConvBertForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForTokenClassification.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ConvBertForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBertForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConvBertForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConvBertForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L1262" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConvBertForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConvBertForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConvBertForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_convbert.py#L1273" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConvBertForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConvBertForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConvBertForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertForQuestionAnswering">ConvBertForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, ConvBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ConvBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvBertModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvBertModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvBertModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvBertModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L747" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvBertModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L753" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvBertModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertModel">TFConvBertModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertModel.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvBertForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvBertForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvBertForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L864" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForMaskedLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForMaskedLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForMaskedLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L885" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMaskedLM.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMaskedLM.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvBertForMaskedLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertForMaskedLM">TFConvBertForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForMaskedLM.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvBertForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvBertForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvBertForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1002" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForSequenceClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForSequenceClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForSequenceClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1009" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForSequenceClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForSequenceClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvBertForSequenceClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertForSequenceClassification">TFConvBertForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvBertForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvBertForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvBertForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1094" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForMultipleChoice.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForMultipleChoice.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForMultipleChoice.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1116" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForMultipleChoice.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForMultipleChoice.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvBertForMultipleChoice.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertForMultipleChoice">TFConvBertForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvBertForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvBertForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvBertForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1241" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForTokenClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForTokenClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForTokenClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1255" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForTokenClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForTokenClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvBertForTokenClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertForTokenClassification">TFConvBertForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForTokenClassification.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape( <span class="hljs-meta">... </span> tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids)) <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFConvBertForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFConvBertForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFConvBertForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1339" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig">ConvBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFConvBertForQuestionAnswering.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFConvBertForQuestionAnswering.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFConvBertForQuestionAnswering.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/convbert/modeling_tf_convbert.py#L1349" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertTokenizer">ConvBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFConvBertForQuestionAnswering.call.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFConvBertForQuestionAnswering.call.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFConvBertForQuestionAnswering.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.ConvBertConfig" >ConvBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/convbert#transformers.TFConvBertForQuestionAnswering">TFConvBertForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ConvBertTokenizer, TFConvBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ConvBertTokenizer.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFConvBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;YituTech/conv-bert-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&quot; &quot;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span>])<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1qitotk"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1qitotk"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/convbert.mdx-b107085e.js") ], params: {} } }); </script>
221
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/roformer.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;roformer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.RoFormerConfig&quot;,&quot;title&quot;:&quot;RoFormerConfig&quot;},{&quot;local&quot;:&quot;transformers.RoFormerTokenizer&quot;,&quot;title&quot;:&quot;RoFormerTokenizer&quot;},{&quot;local&quot;:&quot;transformers.RoFormerTokenizerFast&quot;,&quot;title&quot;:&quot;RoFormerTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.RoFormerModel&quot;,&quot;title&quot;:&quot;RoFormerModel&quot;},{&quot;local&quot;:&quot;transformers.RoFormerForCausalLM&quot;,&quot;title&quot;:&quot;RoFormerForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.RoFormerForMaskedLM&quot;,&quot;title&quot;:&quot;RoFormerForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.RoFormerForSequenceClassification&quot;,&quot;title&quot;:&quot;RoFormerForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.RoFormerForMultipleChoice&quot;,&quot;title&quot;:&quot;RoFormerForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.RoFormerForTokenClassification&quot;,&quot;title&quot;:&quot;RoFormerForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.RoFormerForQuestionAnswering&quot;,&quot;title&quot;:&quot;RoFormerForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.TFRoFormerModel&quot;,&quot;title&quot;:&quot;TFRoFormerModel&quot;},{&quot;local&quot;:&quot;transformers.TFRoFormerForMaskedLM&quot;,&quot;title&quot;:&quot;TFRoFormerForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.TFRoFormerForCausalLM&quot;,&quot;title&quot;:&quot;TFRoFormerForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.TFRoFormerForSequenceClassification&quot;,&quot;title&quot;:&quot;TFRoFormerForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.TFRoFormerForMultipleChoice&quot;,&quot;title&quot;:&quot;TFRoFormerForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.TFRoFormerForTokenClassification&quot;,&quot;title&quot;:&quot;TFRoFormerForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.TFRoFormerForQuestionAnswering&quot;,&quot;title&quot;:&quot;TFRoFormerForQuestionAnswering&quot;},{&quot;local&quot;:&quot;transformers.FlaxRoFormerModel&quot;,&quot;title&quot;:&quot;FlaxRoFormerModel&quot;},{&quot;local&quot;:&quot;transformers.FlaxRoFormerForMaskedLM&quot;,&quot;title&quot;:&quot;FlaxRoFormerForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.FlaxRoFormerForSequenceClassification&quot;,&quot;title&quot;:&quot;FlaxRoFormerForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.FlaxRoFormerForMultipleChoice&quot;,&quot;title&quot;:&quot;FlaxRoFormerForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.FlaxRoFormerForTokenClassification&quot;,&quot;title&quot;:&quot;FlaxRoFormerForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.FlaxRoFormerForQuestionAnswering&quot;,&quot;title&quot;:&quot;FlaxRoFormerForQuestionAnswering&quot;}],&quot;title&quot;:&quot;RoFormer&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/roformer.mdx-0f80020b.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="roformer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#roformer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormer </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The RoFormer model was proposed in <a href="https://arxiv.org/pdf/2104.09864v1.pdf" rel="nofollow">RoFormer: Enhanced Transformer with Rotary Position Embedding</a> by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.</p> <p>The abstract from the paper is the following:</p> <p><em>Position encoding in transformer architecture provides supervision for dependency modeling between elements at different positions in the sequence. We investigate various methods to encode positional information in transformer-based language models and propose a novel implementation named Rotary Position Embedding(RoPE). The proposed RoPE encodes absolute positional information with rotation matrix and naturally incorporates explicit relative position dependency in self-attention formulation. Notably, RoPE comes with valuable properties such as flexibility of being expand to any sequence lengths, decaying inter-token dependency with increasing relative distances, and capability of equipping the linear self-attention with relative position encoding. As a result, the enhanced transformer with rotary position embedding, or RoFormer, achieves superior performance in tasks with long texts. We release the theoretical analysis along with some preliminary experiment results on Chinese data. The undergoing experiment for English benchmark will soon be updated.</em></p> <p>Tips:</p> <ul><li>RoFormer is a BERT-like autoencoding model with rotary position embeddings. Rotary position embeddings have shown improved performance on classification tasks with long texts.</li></ul> <p>This model was contributed by <a href="https://huggingface.co/junnyu" rel="nofollow">junnyu</a>. The original code can be found <a href="https://github.com/ZhuiyiTechnology/roformer" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.RoFormerConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/configuration_roformer.py#L34" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 50000</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">embedding_size<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 3072</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 1536</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rotary_value<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50000) &#x2014; Vocabulary size of the RoFormer model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerModel">RoFormerModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerModel">TFRoFormerModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.embedding_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.embedding_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; Dimensionality of the encoder layers and the pooler layer. Defaults to the <code>hidden_size</code> if not provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1536) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 1536).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerModel">RoFormerModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerModel">TFRoFormerModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerConfig.rotary_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerConfig.rotary_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rotary_value</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not apply rotary position embeddings on value layer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerModel">RoFormerModel</a>. It is used to instantiate an RoFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoFormer <a href="https://huggingface.co/junnyu/roformer_chinese_base" rel="nofollow">junnyu/roformer_chinese_base</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerModel, RoFormerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a RoFormer junnyu/roformer_chinese_base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = RoFormerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the junnyu/roformer_chinese_base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.RoFormerTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/tokenization_roformer.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.do_basic_tokenize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.do_basic_tokenize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.never_split" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.never_split"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.tokenize_chinese_chars" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.tokenize_chinese_chars"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a RoFormer tokenizer. Based on <a href="https://pypi.org/project/rjieba/" rel="nofollow">Rust Jieba</a>.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.tokenize(<span class="hljs-string">&quot;今天天气非常好。&quot;</span>) <span class="hljs-comment"># [&#x27;今&#x27;, &#x27;天&#x27;, &#x27;天&#x27;, &#x27;气&#x27;, &#x27;非常&#x27;, &#x27;好&#x27;, &#x27;。&#x27;]</span><!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/tokenization_roformer.py#L227" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format:</p> <ul><li>single sequence: <code>[CLS] X [SEP]</code></li> <li>pair of sequences: <code>[CLS] A [SEP] B [SEP]</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/tokenization_roformer.py#L252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/tokenization_roformer.py#L280" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RoFormer sequence pair mask has the following format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |<!-- HTML_TAG_END --></pre></div> <p>If <code>token_ids_1</code> is <code>None</code>, this method only returns the first portion of the mask (0s).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/tokenization_roformer.py#L309" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.RoFormerTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerTokenizerFast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/tokenization_roformer_fast.py#L63" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer_file<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Construct a “fast” RoFormer tokenizer (backed by HuggingFace’s <em>tokenizers</em> library).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizerFast">RoFormerTokenizerFast</a> is almost identical to <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast">BertTokenizerFast</a> and runs end-to-end tokenization: punctuation splitting and wordpiece. There are some difference between them when tokenizing Chinese.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizerFast.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.tokenize(<span class="hljs-string">&quot;今天天气非常好。&quot;</span>) <span class="hljs-comment"># [&#x27;今&#x27;, &#x27;天&#x27;, &#x27;天&#x27;, &#x27;气&#x27;, &#x27;非常&#x27;, &#x27;好&#x27;, &#x27;。&#x27;]</span><!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/tokenization_roformer_fast.py#L139" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerTokenizerFast.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format:</p> <ul><li>single sequence: <code>[CLS] X [SEP]</code></li> <li>pair of sequences: <code>[CLS] A [SEP] B [SEP]</code></li></ul></div></div> <h2 class="relative group"><a id="transformers.RoFormerModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L794" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p>The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in <a href="https://arxiv.org/abs/1706.03762" rel="nofollow">Attention is all you need</a> by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.</p> <p>To behave as an decoder the model needs to be initialized with the <code>is_decoder</code> argument of the configuration set to <code>True</code>. To be used in a Seq2Seq model, the model needs to initialized with both <code>is_decoder</code> argument and <code>add_cross_attention</code> set to <code>True</code>; an <code>encoder_hidden_states</code> is then expected as an input to the forward pass.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L834" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerModel">RoFormerModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerModel.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RoFormerForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1062" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a <code>language modeling</code> head on top for CLM fine-tuning. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1084" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerForCausalLM">RoFormerForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForCausalLM, RoFormerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RoFormerConfig.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForCausalLM.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;今天天气非常好。&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RoFormerForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L961" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a <code>language modeling</code> head on top. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L983" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerForMaskedLM">RoFormerForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForMaskedLM.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RoFormerForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1235" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1245" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerForSequenceClassification">RoFormerForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RoFormerForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1327" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForMultipleChoice.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerForMultipleChoice.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForMultipleChoice.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1338" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForMultipleChoice.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForMultipleChoice.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerForMultipleChoice.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerForMultipleChoice">RoFormerForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RoFormerForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1419" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1431" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerForTokenClassification">RoFormerForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForTokenClassification.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.RoFormerForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoFormerForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RoFormerForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RoFormerForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1496" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RoFormerForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.RoFormerForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RoFormerForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_roformer.py#L1509" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RoFormerForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RoFormerForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.RoFormerForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerForQuestionAnswering">RoFormerForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, RoFormerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RoFormerForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRoFormerModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRoFormerModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRoFormerModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L808" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RoFormer Model transformer outputing raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerModel.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerModel.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerModel.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L814" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerModel.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerModel.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRoFormerModel.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you’re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerModel">TFRoFormerModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerModel.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRoFormerForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRoFormerForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRoFormerForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L870" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForMaskedLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForMaskedLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForMaskedLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L886" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMaskedLM.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMaskedLM.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRoFormerForMaskedLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerForMaskedLM">TFRoFormerForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForMaskedLM.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRoFormerForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRoFormerForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRoFormerForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L968" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a <code>language modeling</code> head on top for CLM fine-tuning.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForCausalLM.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForCausalLM.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForCausalLM.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L981" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRoFormerForCausalLM.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>labels (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>): Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForCausalLM.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRoFormerForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRoFormerForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRoFormerForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1096" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForSequenceClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForSequenceClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForSequenceClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForSequenceClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForSequenceClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRoFormerForSequenceClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerForSequenceClassification">TFRoFormerForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRoFormerForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRoFormerForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRoFormerForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1187" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForMultipleChoice.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForMultipleChoice.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForMultipleChoice.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1208" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForMultipleChoice.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForMultipleChoice.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRoFormerForMultipleChoice.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerForMultipleChoice">TFRoFormerForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRoFormerForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRoFormerForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRoFormerForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1335" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForTokenClassification.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForTokenClassification.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForTokenClassification.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1347" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForTokenClassification.call.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForTokenClassification.call.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRoFormerForTokenClassification.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerForTokenClassification">TFRoFormerForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForTokenClassification.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape( <span class="hljs-meta">... </span> tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids)) <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.TFRoFormerForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFRoFormerForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRoFormerForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1428" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">tf.keras.Model</a> subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>TF 2.0 models accepts two formats as inputs:</p> <ul><li>having all inputs as keyword arguments (like PyTorch models), or</li> <li>having all inputs as a list, tuple or dict in the first positional arguments.</li></ul> <p>This second option is useful when using <code>tf.keras.Model.fit</code> method which currently requires having all the tensors in the first argument of the model call function: <code>model(inputs)</code>.</p> <p>If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :</p> <ul><li>a single Tensor with <code>input_ids</code> only and nothing else: <code>model(inputs_ids)</code></li> <li>a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <code>model([input_ids, attention_mask])</code> or <code>model([input_ids, attention_mask, token_type_ids])</code></li> <li>a dictionary with one or several input Tensors associated to the input names given in the docstring: <code>model({&quot;input_ids&quot;: input_ids, &quot;token_type_ids&quot;: token_type_ids})</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRoFormerForQuestionAnswering.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRoFormerForQuestionAnswering.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRoFormerForQuestionAnswering.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_tf_roformer.py#L1439" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">training<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> `<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.training" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.training"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to `False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRoFormerForQuestionAnswering.call.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRoFormerForQuestionAnswering.call.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFRoFormerForQuestionAnswering.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.TFRoFormerForQuestionAnswering">TFRoFormerForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, TFRoFormerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRoFormerForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&quot; &quot;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span>])<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRoFormerModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRoFormerModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRoFormerModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L737" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RoFormerConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerModel.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerModel.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L643" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRoFormerPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRoFormerPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, FlaxRoFormerModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRoFormerModel.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRoFormerForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRoFormerForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRoFormerForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L797" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RoFormerConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForMaskedLM.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForMaskedLM.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a <code>language modeling</code> head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L643" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRoFormerPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRoFormerPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, FlaxRoFormerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRoFormerForMaskedLM.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRoFormerForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRoFormerForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRoFormerForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L862" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RoFormerConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForSequenceClassification.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForSequenceClassification.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L643" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRoFormerPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRoFormerPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, FlaxRoFormerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRoFormerForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRoFormerForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRoFormerForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRoFormerForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L938" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RoFormerConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForMultipleChoice.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForMultipleChoice.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L643" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRoFormerPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRoFormerPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, FlaxRoFormerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRoFormerForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;jax&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v[<span class="hljs-literal">None</span>, :] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRoFormerForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRoFormerForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRoFormerForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L1007" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RoFormerConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForTokenClassification.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForTokenClassification.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L643" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRoFormerPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRoFormerPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, FlaxRoFormerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRoFormerForTokenClassification.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.FlaxRoFormerForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxRoFormerForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxRoFormerForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L1076" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: RoFormerConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig">RoFormerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerForQuestionAnswering.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerForQuestionAnswering.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)</p> <p>This model is also a Flax Linen <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow">flax.linen.Module</a> subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.</p> <p>Finally, this model supports inherent JAX features such as:</p> <ul><li><a href="https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit" rel="nofollow">Just-In-Time (JIT) compilation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation" rel="nofollow">Automatic Differentiation</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap" rel="nofollow">Vectorization</a></li> <li><a href="https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap" rel="nofollow">Parallelization</a></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxRoFormerPreTrainedModel.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxRoFormerPreTrainedModel.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxRoFormerPreTrainedModel.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roformer/modeling_flax_roformer.py#L643" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: dict = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout_rng<span class="opacity-60">: PRNGKey = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerTokenizer">RoFormerTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxRoFormerPreTrainedModel.__call__.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxRoFormerPreTrainedModel.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roformer#transformers.RoFormerConfig" >RoFormerConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <code>FlaxRoFormerPreTrainedModel</code>forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RoFormerTokenizer, FlaxRoFormerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RoFormerTokenizer.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRoFormerForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;junnyu/roformer_chinese_base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="jw0k14"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="jw0k14"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/roformer.mdx-0f80020b.js") ], params: {} } }); </script>
222
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/prophetnet.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;prophetnet&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.ProphetNetConfig&quot;,&quot;title&quot;:&quot;ProphetNetConfig&quot;},{&quot;local&quot;:&quot;transformers.ProphetNetTokenizer&quot;,&quot;title&quot;:&quot;ProphetNetTokenizer&quot;},{&quot;local&quot;:&quot;transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput&quot;,&quot;title&quot;:&quot;ProphetNet specific outputs&quot;},{&quot;local&quot;:&quot;transformers.ProphetNetModel&quot;,&quot;title&quot;:&quot;ProphetNetModel&quot;},{&quot;local&quot;:&quot;transformers.ProphetNetEncoder&quot;,&quot;title&quot;:&quot;ProphetNetEncoder&quot;},{&quot;local&quot;:&quot;transformers.ProphetNetDecoder&quot;,&quot;title&quot;:&quot;ProphetNetDecoder&quot;},{&quot;local&quot;:&quot;transformers.ProphetNetForConditionalGeneration&quot;,&quot;title&quot;:&quot;ProphetNetForConditionalGeneration&quot;},{&quot;local&quot;:&quot;transformers.ProphetNetForCausalLM&quot;,&quot;title&quot;:&quot;ProphetNetForCausalLM&quot;}],&quot;title&quot;:&quot;ProphetNet&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/prophetnet.mdx-8afd09ba.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="prophetnet" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prophetnet"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNet </span></h1> <p><strong>DISCLAIMER:</strong> If you see something strange, file a <a href="https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title" rel="nofollow">Github Issue</a> and assign @patrickvonplaten</p> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The ProphetNet model was proposed in <a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020.</p> <p>ProphetNet is an encoder-decoder model and can predict n-future tokens for “ngram” language modeling instead of just the next token.</p> <p>The abstract from the paper is the following:</p> <p><em>In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.</em></p> <p>The Authors’ code can be found <a href="https://github.com/microsoft/ProphetNet" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.ProphetNetConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNetConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProphetNetConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProphetNetConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/configuration_prophetnet.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">activation_function<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 30522</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_encoder_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_encoder_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ffn_dim<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_decoder_layers<span class="opacity-60"> = 12</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_decoder_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dropout<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_std<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_cross_attention<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ngram<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_buckets<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">relative_max_distance<span class="opacity-60"> = 128</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">disable_ngram_loss<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eps<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.activation_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.activation_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for activations inside the fully connected layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.activation_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.activation_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetModel">ProphetNetModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.encoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.encoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.num_encoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.num_encoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.num_encoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.num_encoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.decoder_ffn_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.decoder_ffn_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the <code>intermediate</code> (often named feed-forward) layer in decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.num_decoder_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.num_decoder_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.num_decoder_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.num_decoder_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.attention_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.attention_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.init_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.init_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.add_cross_attention" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.add_cross_attention"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_cross_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether cross-attention layers should be added to the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.is_encoder_decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.is_encoder_decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether this is an encoder/decoder model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Padding token id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Beginning of stream token id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; End of stream token id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.ngram" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.ngram"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.num_buckets" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.num_buckets"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of buckets to use for each attention layer. This is for relative position calculation. See the [T5 paper](see <a href="https://arxiv.org/abs/1910.10683" rel="nofollow">https://arxiv.org/abs/1910.10683</a>) for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.relative_max_distance" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.relative_max_distance"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>relative_max_distance</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Relative distances greater than this number will be put into the last same bucket. This is for relative position calculation. See the [T5 paper](see <a href="https://arxiv.org/abs/1910.10683" rel="nofollow">https://arxiv.org/abs/1910.10683</a>) for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.disable_ngram_loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.disable_ngram_loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>disable_ngram_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether be trained predicting only the next first token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eps</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Controls the <code>epsilon</code> parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetModel">ProphetNetModel</a>. It is used to instantiate a ProphetNet model according to the specified arguments, defining the model architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p></div> <h2 class="relative group"><a id="transformers.ProphetNetTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNetTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProphetNetTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProphetNetTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/tokenization_prophetnet.py#L55" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_lower_case<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_basic_tokenize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">never_split<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;[UNK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">x_sep_token<span class="opacity-60"> = &#39;[X_SEP]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenize_chinese_chars<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strip_accents<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.do_lower_case" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.do_lower_case"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.do_basic_tokenize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.do_basic_tokenize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.never_split" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.never_split"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.x_sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.x_sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>x_sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[X_SEP]&quot;</code>) &#x2014; Special second separator token, which can be generated by <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetForConditionalGeneration">ProphetNetForConditionalGeneration</a>. It is used to separate bullet-point like sentences in summarization, <em>e.g.</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.tokenize_chinese_chars" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.tokenize_chinese_chars"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a ProphetNetTokenizer. Based on WordPiece.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetTokenizer.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetTokenizer.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetTokenizer.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/tokenization_prophetnet.py#L266" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetTokenizer.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:</p> <ul><li>single sequence: <code>[CLS] X [SEP]</code></li> <li>pair of sequences: <code>[CLS] A [SEP] B [SEP]</code></li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetTokenizer.convert_tokens_to_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetTokenizer.convert_tokens_to_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetTokenizer.convert_tokens_to_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/tokenization_prophetnet.py#L186" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Converts a sequence of tokens (string) in a single string.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/tokenization_prophetnet.py#L218" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetTokenizer.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ProphetNet sequence pair mask has the following format:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |<!-- HTML_TAG_END --></pre></div> <p>If <code>token_ids_1</code> is <code>None</code>, this method only returns the first portion of the mask (0s).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetTokenizer.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetTokenizer.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetTokenizer.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/tokenization_prophetnet.py#L191" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetTokenizer.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetTokenizer.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetTokenizer.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> method.</p></div></div> <h2 class="relative group"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNet specific outputs </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.prophetnet.modeling_prophetnet.</span><span class="font-semibold">ProphetNetSeq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_ngram<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ngram_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ngram_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.logits_ngram" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.logits_ngram"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_ngram_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_ngram_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_ngram_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.decoder_ngram_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.prophetnet.modeling_prophetnet.</span><span class="font-semibold">ProphetNetSeq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L336" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state_ngram<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ngram_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_ngram_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) &#x2014; Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.last_hidden_state_ngram" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.last_hidden_state_ngram"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_ngram_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_ngram_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_ngram_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.decoder_ngram_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.prophetnet.modeling_prophetnet.</span><span class="font-semibold">ProphetNetDecoderModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L421" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state_ngram<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states_ngram<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ngram_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) &#x2014; Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.last_hidden_state_ngram" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.last_hidden_state_ngram"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.ngram_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.ngram_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.ngram_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.ngram_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.prophetnet.modeling_prophetnet.</span><span class="font-semibold">ProphetNetDecoderLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L481" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_ngram<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states_ngram<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ngram_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.logits_ngram" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.logits_ngram"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.ngram_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.ngram_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.ngram_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.ngram_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.ProphetNetModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNetModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProphetNetModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProphetNetModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1751" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare ProphetNet Model outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>Original ProphetNet code can be found <a href="https://github.com/microsoft/ProphetNet" rel="nofollow">here</a>. Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file <code>convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py</code>.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1783" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60">: typing.Optional[typing.Tuple] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>ProphetNet uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) — Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,ngram * decoder_sequence_length, config.vocab_size)</code>) — Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetModel">ProphetNetModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetModel.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-comment"># main stream hidden states</span> <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states_ngram = outputs.last_hidden_state_ngram <span class="hljs-comment"># predict hidden states</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ProphetNetEncoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNetEncoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetEncoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProphetNetEncoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProphetNetEncoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetEncoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1244" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: ProphetNetConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_embeddings<span class="opacity-60">: Embedding = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetEncoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The standalone encoder part of the ProphetNetModel. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>Original ProphetNet code can be found <a href="https://github.com/microsoft/ProphetNet" rel="nofollow">here</a>. Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file <code>convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py</code>.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.</p> <p>word_embeddings (<code>torch.nn.Embeddings</code> of shape <code>(config.vocab_size, config.hidden_size)</code>, <em>optional</em>): The word embedding parameters. This can be used to initialize <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetEncoder">ProphetNetEncoder</a> with pre-defined word embeddings instead of randomly initialized word embeddings.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetEncoder.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetEncoder.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetEncoder.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1274" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetEncoder.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetEncoder.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetEncoder.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetEncoder.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetEncoder.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetEncoder.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetEncoder.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetEncoder.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetEncoder">ProphetNetEncoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetEncoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetEncoder.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/prophetnet-large-uncased-standalone&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ProphetNetDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNetDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProphetNetDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProphetNetDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1384" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: ProphetNetConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_embeddings<span class="opacity-60">: Embedding = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The standalone decoder part of the ProphetNetModel. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>Original ProphetNet code can be found <a href="https://github.com/microsoft/ProphetNet" rel="nofollow">here</a>. Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file <code>convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py</code>.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.</p> <p>word_embeddings (<code>torch.nn.Embeddings</code> of shape <code>(config.vocab_size, config.hidden_size)</code>, <em>optional</em>): The word embedding parameters. This can be used to initialize <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetEncoder">ProphetNetEncoder</a> with pre-defined word embeddings instead of randomly initialized word embeddings.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetDecoder.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetDecoder.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetDecoder.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1421" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetDecoder.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetDecoder.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetDecoder.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>) — Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>last_hidden_state_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) — Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetDecoder">ProphetNetDecoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetDecoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetDecoder.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ProphetNetForConditionalGeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNetForConditionalGeneration </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetForConditionalGeneration"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProphetNetForConditionalGeneration</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProphetNetForConditionalGeneration" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetForConditionalGeneration"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1878" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: ProphetNetConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>Original ProphetNet code can be found <a href="https://github.com/microsoft/ProphetNet" rel="nofollow">here</a>. Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file <code>convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py</code>.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetForConditionalGeneration.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetForConditionalGeneration.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetForConditionalGeneration.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L1899" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_outputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.decoder_input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.decoder_input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>ProphetNet uses the <code>eos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.decoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.decoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.decoder_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.decoder_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.encoder_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.encoder_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForConditionalGeneration.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForConditionalGeneration.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetForConditionalGeneration.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) — Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) — Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>decoder_ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>, <em>optional</em>) — Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, encoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)</code>. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetForConditionalGeneration">ProphetNetForConditionalGeneration</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_next_token = outputs.logits <span class="hljs-comment"># logits to predict next token as usual</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_ngram_next_tokens = outputs.logits_ngram <span class="hljs-comment"># logits to predict 2nd, 3rd, ... next tokens</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.ProphetNetForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNetForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProphetNetForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProphetNetForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L2087" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetConfig">ProphetNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal language modeling. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>Original ProphetNet code can be found <a href="https://github.com/microsoft/ProphetNet" rel="nofollow">here</a>. Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file <code>convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py</code>.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProphetNetForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProphetNetForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProphetNetForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/prophetnet/modeling_prophetnet.py#L2122" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attn_head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetTokenizer">ProphetNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.cross_attn_head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.cross_attn_head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProphetNetForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProphetNetForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels n <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ProphetNetForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput" >transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>ProphenetConfig</code>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, decoder_sequence_length, config.vocab_size)</code>) — Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>logits_ngram</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, ngram * decoder_sequence_length, config.vocab_size)</code>) — Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>ngram_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, ngram * decoder_sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>ngram_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)</code>.</p> <p>Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/prophetnet#transformers.ProphetNetForCausalLM">ProphetNetForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ProphetNetTokenizer, ProphetNetForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ProphetNetForCausalLM.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model can also be used with EncoderDecoder framework</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, EncoderDecoderModel, ProphetNetTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_enc = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_dec = ProphetNetTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = EncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;bert-large-uncased&quot;</span>, <span class="hljs-string">&quot;microsoft/prophetnet-large-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE = ( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;the us state department said wednesday it had received no &quot;</span> <span class="hljs-meta">... </span> <span class="hljs-string">&quot;formal word from bolivia that it was expelling the us ambassador there &quot;</span> <span class="hljs-meta">... </span> <span class="hljs-string">&quot;but said the charges made against him are `` baseless .&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer_enc(ARTICLE, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer_dec( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;us rejects charges against its ambassador in bolivia&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-<span class="hljs-number">1</span>], labels=labels[:, <span class="hljs-number">1</span>:]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1jg0gtx"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1jg0gtx"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/prophetnet.mdx-8afd09ba.js") ], params: {} } }); </script>
223
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/perceiver.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;perceiver&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput&quot;,&quot;title&quot;:&quot;Perceiver specific outputs&quot;},{&quot;local&quot;:&quot;transformers.PerceiverConfig&quot;,&quot;title&quot;:&quot;PerceiverConfig&quot;},{&quot;local&quot;:&quot;transformers.PerceiverTokenizer&quot;,&quot;title&quot;:&quot;PerceiverTokenizer&quot;},{&quot;local&quot;:&quot;transformers.PerceiverFeatureExtractor&quot;,&quot;title&quot;:&quot;PerceiverFeatureExtractor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor&quot;,&quot;title&quot;:&quot;PerceiverTextPreprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor&quot;,&quot;title&quot;:&quot;PerceiverImagePreprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor&quot;,&quot;title&quot;:&quot;PerceiverOneHotPreprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor&quot;,&quot;title&quot;:&quot;PerceiverAudioPreprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor&quot;,&quot;title&quot;:&quot;PerceiverMultimodalPreprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder&quot;,&quot;title&quot;:&quot;PerceiverProjectionDecoder&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder&quot;,&quot;title&quot;:&quot;PerceiverBasicDecoder&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder&quot;,&quot;title&quot;:&quot;PerceiverClassificationDecoder&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder&quot;,&quot;title&quot;:&quot;PerceiverOpticalFlowDecoder&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder&quot;,&quot;title&quot;:&quot;PerceiverBasicVideoAutoencodingDecoder&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder&quot;,&quot;title&quot;:&quot;PerceiverMultimodalDecoder&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor&quot;,&quot;title&quot;:&quot;PerceiverProjectionPostprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor&quot;,&quot;title&quot;:&quot;PerceiverAudioPostprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor&quot;,&quot;title&quot;:&quot;PerceiverClassificationPostprocessor&quot;},{&quot;local&quot;:&quot;transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor&quot;,&quot;title&quot;:&quot;PerceiverMultimodalPostprocessor&quot;},{&quot;local&quot;:&quot;transformers.PerceiverModel&quot;,&quot;title&quot;:&quot;PerceiverModel&quot;},{&quot;local&quot;:&quot;transformers.PerceiverForMaskedLM&quot;,&quot;title&quot;:&quot;PerceiverForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.PerceiverForSequenceClassification&quot;,&quot;title&quot;:&quot;PerceiverForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.PerceiverForImageClassificationLearned&quot;,&quot;title&quot;:&quot;PerceiverForImageClassificationLearned&quot;},{&quot;local&quot;:&quot;transformers.PerceiverForImageClassificationFourier&quot;,&quot;title&quot;:&quot;PerceiverForImageClassificationFourier&quot;},{&quot;local&quot;:&quot;transformers.PerceiverForImageClassificationConvProcessing&quot;,&quot;title&quot;:&quot;PerceiverForImageClassificationConvProcessing&quot;},{&quot;local&quot;:&quot;transformers.PerceiverForOpticalFlow&quot;,&quot;title&quot;:&quot;PerceiverForOpticalFlow&quot;},{&quot;local&quot;:&quot;transformers.PerceiverForMultimodalAutoencoding&quot;,&quot;title&quot;:&quot;PerceiverForMultimodalAutoencoding&quot;}],&quot;title&quot;:&quot;Perceiver&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/perceiver.mdx-6efe9939.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="perceiver" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#perceiver"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Perceiver </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The Perceiver IO model was proposed in <a href="https://arxiv.org/abs/2107.14795" rel="nofollow">Perceiver IO: A General Architecture for Structured Inputs &amp; Outputs</a> by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.</p> <p>Perceiver IO is a generalization of <a href="https://arxiv.org/abs/2103.03206" rel="nofollow">Perceiver</a> to handle arbitrary outputs in addition to arbitrary inputs. The original Perceiver only produced a single classification label. In addition to classification labels, Perceiver IO can produce (for example) language, optical flow, and multimodal videos with audio. This is done using the same building blocks as the original Perceiver. The computational complexity of Perceiver IO is linear in the input and output size and the bulk of the processing occurs in the latent space, allowing us to process inputs and outputs that are much larger than can be handled by standard Transformers. This means, for example, Perceiver IO can do BERT-style masked language modeling directly using bytes instead of tokenized inputs.</p> <p>The abstract from the paper is the following:</p> <p><em>The recently-proposed Perceiver model obtains good results on several domains (images, audio, multimodal, point clouds) while scaling linearly in compute and memory with the input size. While the Perceiver supports many kinds of inputs, it can only produce very simple outputs such as class scores. Perceiver IO overcomes this limitation without sacrificing the original’s appealing properties by learning to flexibly query the model’s latent space to produce outputs of arbitrary size and semantics. Perceiver IO still decouples model depth from data size and still scales linearly with data size, but now with respect to both input and output sizes. The full Perceiver IO model achieves strong results on tasks with highly structured output spaces, such as natural language and visual understanding, StarCraft II, and multi-task and multi-modal domains. As highlights, Perceiver IO matches a Transformer-based BERT baseline on the GLUE language benchmark without the need for input tokenization and achieves state-of-the-art performance on Sintel optical flow estimation.</em></p> <p>Here’s a TLDR explaining how Perceiver works:</p> <p>The main problem with the self-attention mechanism of the Transformer is that the time and memory requirements scale quadratically with the sequence length. Hence, models like BERT and RoBERTa are limited to a max sequence length of 512 tokens. Perceiver aims to solve this issue by, instead of performing self-attention on the inputs, perform it on a set of latent variables, and only use the inputs for cross-attention. In this way, the time and memory requirements don’t depend on the length of the inputs anymore, as one uses a fixed amount of latent variables, like 256 or 512. These are randomly initialized, after which they are trained end-to-end using backpropagation.</p> <p>Internally, <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a> will create the latents, which is a tensor of shape <code>(batch_size, num_latents, d_latents)</code>. One must provide <code>inputs</code> (which could be text, images, audio, you name it!) to the model, which it will use to perform cross-attention with the latents. The output of the Perceiver encoder is a tensor of the same shape. One can then, similar to BERT, convert the last hidden states of the latents to classification logits by averaging along the sequence dimension, and placing a linear layer on top of that to project the <code>d_latents</code> to <code>num_labels</code>.</p> <p>This was the idea of the original Perceiver paper. However, it could only output classification logits. In a follow-up work, PerceiverIO, they generalized it to let the model also produce outputs of arbitrary size. How, you might ask? The idea is actually relatively simple: one defines outputs of an arbitrary size, and then applies cross-attention with the last hidden states of the latents, using the outputs as queries, and the latents as keys and values.</p> <p>So let’s say one wants to perform masked language modeling (BERT-style) with the Perceiver. As the Perceiver’s input length will not have an impact on the computation time of the self-attention layers, one can provide raw bytes, providing <code>inputs</code> of length 2048 to the model. If one now masks out certain of these 2048 tokens, one can define the <code>outputs</code> as being of shape: <code>(batch_size, 2048, 768)</code>. Next, one performs cross-attention with the final hidden states of the latents to update the <code>outputs</code> tensor. After cross-attention, one still has a tensor of shape <code>(batch_size, 2048, 768)</code>. One can then place a regular language modeling head on top, to project the last dimension to the vocabulary size of the model, i.e. creating logits of shape <code>(batch_size, 2048, 262)</code> (as Perceiver uses a vocabulary size of 262 byte IDs).</p> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/perceiver_architecture.jpg" alt="drawing" width="600"> <small>Perceiver IO architecture. Taken from the <a href="https://arxiv.org/abs/2105.15203">original paper</a></small> <p>This model was contributed by <a href="https://huggingface.co/nielsr" rel="nofollow">nielsr</a>. The original code can be found <a href="https://github.com/deepmind/deepmind-research/tree/master/perceiver" rel="nofollow">here</a>.</p> <p>Tips:</p> <ul><li>The quickest way to get started with the Perceiver is by checking the <a href="https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Perceiver" rel="nofollow">tutorial notebooks</a>.</li> <li>Refer to the <a href="https://huggingface.co/blog/perceiver" rel="nofollow">blog post</a> if you want to fully understand how the model works and is implemented in the library. Note that the models available in the library only showcase some examples of what you can do with the Perceiver. There are many more use cases, including question answering, named-entity recognition, object detection, audio classification, video classification, etc. </li></ul> <p><strong>Note</strong>:</p> <ul><li>Perceiver does <strong>not</strong> work with <code>torch.nn.DataParallel</code> due to a bug in PyTorch, see <a href="https://github.com/pytorch/pytorch/issues/36035" rel="nofollow">issue #36035</a></li></ul> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Perceiver specific outputs </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L66" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for Perceiver base model’s outputs, with potential hidden states, attentions and cross-attentions.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L97" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_labels)</code>) &#x2014; Output of the basic decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for Perceiver decoder outputs, with potential cross-attentions.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverMaskedLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L115" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, num_latents, num_latents)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for Perceiver’s masked language model outputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L146" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for Perceiver’s outputs of sequence/image classification models, optical flow and multimodal autoencoding.</p></div> <h2 class="relative group"><a id="transformers.PerceiverConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/configuration_perceiver.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_latents<span class="opacity-60"> = 256</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d_latents<span class="opacity-60"> = 1280</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d_model<span class="opacity-60"> = 768</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_blocks<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_self_attends_per_block<span class="opacity-60"> = 26</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_self_attention_heads<span class="opacity-60"> = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_cross_attention_heads<span class="opacity-60"> = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qk_channels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">v_channels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attention_shape_for_attention<span class="opacity-60"> = &#39;kv&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">self_attention_widening_factor<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attention_widening_factor<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_embedding_init_scale<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_encoder_decoder<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_query_residual<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 262</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 2048</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_size<span class="opacity-60"> = 56</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train_size<span class="opacity-60"> = [368, 496]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_frames<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">audio_samples_per_frame<span class="opacity-60"> = 1920</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">samples_per_patch<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_shape<span class="opacity-60"> = [1, 16, 224, 224]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.num_latents" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.num_latents"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_latents</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The number of latents.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.d_latents" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.d_latents"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>d_latents</strong> (<code>int</code>, <em>optional</em>, defaults to 1280) &#x2014; Dimension of the latent embeddings.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.d_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.d_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the inputs. Should only be provided in case [<em>PerceiverTextPreprocessor</em>] is used or no preprocessor is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.num_blocks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.num_blocks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_blocks</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of blocks in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.num_self_attends_per_block" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.num_self_attends_per_block"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_self_attends_per_block</strong> (<code>int</code>, <em>optional</em>, defaults to 26) &#x2014; The number of self-attention layers per block.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.num_self_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.num_self_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_self_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each self-attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.num_cross_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.num_cross_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_cross_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each cross-attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.qk_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.qk_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>qk_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimension to project the queries + keys before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.v_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.v_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>v_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimension to project the values before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.cross_attention_shape_for_attention" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.cross_attention_shape_for_attention"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attention_shape_for_attention</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&apos;kv&apos;</code>) &#x2014; Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.self_attention_widening_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.self_attention_widening_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>self_attention_widening_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.cross_attention_widening_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.cross_attention_widening_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attention_widening_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.use_query_residual" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.use_query_residual"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_query_residual</strong> (<code>float</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to add a query residual in the cross-attention layer of the encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 262) &#x2014; Vocabulary size for the masked language modeling model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; The maximum sequence length that the masked language modeling model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.image_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.image_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 56) &#x2014; Size of the images after preprocessing, for <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned">PerceiverForImageClassificationLearned</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.train_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.train_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>train_size</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [368, 496]) &#x2014; Training size of the images for the optical flow model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.num_frames" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.num_frames"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_frames</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of video frames used for the multimodal autoencoding model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.audio_samples_per_frame" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.audio_samples_per_frame"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>audio_samples_per_frame</strong> (<code>int</code>, <em>optional</em>, defaults to 1920) &#x2014; Number of audio samples per frame for the multimodal autoencoding model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.samples_per_patch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.samples_per_patch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>samples_per_patch</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverConfig.output_shape" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverConfig.output_shape"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_shape</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>[1, 16, 224, 224]</code>) &#x2014; Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal autoencoding model. This excludes the channel dimension.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a>. It is used to instantiate an Perceiver model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Perceiver <a href="https://huggingface.co/deepmind/language-perceiver" rel="nofollow">deepmind/language-perceiver</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverModel, PerceiverConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Perceiver deepmind/language-perceiver style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = PerceiverConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the deepmind/language-perceiver style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.PerceiverTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/tokenization_perceiver.py#L27" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;[PAD]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;[BOS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;[EOS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_token<span class="opacity-60"> = &#39;[CLS]&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;[SEP]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_max_length<span class="opacity-60"> = 2048</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[BOS]&quot;</code>) &#x2014; The BOS token (reserved in the vocab, but not actually used).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[EOS]&quot;</code>) &#x2014; The end of sequence token (reserved in the vocab, but not actually used).</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The MASK token, useful for masked language modeling.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The CLS token (reserved in the vocab, but not actually used).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from two sequences.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2379" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.</p></div></div> <h2 class="relative group"><a id="transformers.PerceiverFeatureExtractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverFeatureExtractor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverFeatureExtractor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverFeatureExtractor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverFeatureExtractor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverFeatureExtractor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/feature_extraction_perceiver.py#L37" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_center_crop<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">crop_size<span class="opacity-60"> = 256</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_resize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"> = 224</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resample<span class="opacity-60"> = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_normalize<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_mean<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_std<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.do_center_crop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.do_center_crop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_center_crop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to crop the input at the center. If the input size is smaller than <code>crop_size</code> along any edge, the image is padded with 0&#x2019;s and then center cropped.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.crop_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.crop_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>crop_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Desired output size when applying center-cropping. Only has an effect if <code>do_center_crop</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.do_resize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.do_resize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.resample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.resample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BICUBIC</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.do_normalize" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.do_normalize"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with <code>image_mean</code> and <code>image_std</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.image_mean" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.image_mean"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.image_std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.image_std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Constructs a Perceiver feature extractor.</p> <p>This feature extractor inherits from <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.ImageFeatureExtractionMixin">ImageFeatureExtractionMixin</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverFeatureExtractor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverFeatureExtractor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverFeatureExtractor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/feature_extraction_perceiver.py#L121" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef(&#39;torch.Tensor&#39;), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef(&#39;torch.Tensor&#39;)]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverFeatureExtractor.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverFeatureExtractor.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverFeatureExtractor.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> — Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to prepare for the model one or several image(s).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.</p></div></div></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverTextPreprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverTextPreprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2767" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Text preprocessing for Perceiver Encoder. Can be used to embed <code>inputs</code> and add positional encodings.</p> <p>The dimensionality of the embeddings is determined by the <code>d_model</code> attribute of the configuration.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverImagePreprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverImagePreprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2924" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prep_type<span class="opacity-60"> = &#39;conv&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">spatial_downsample<span class="opacity-60">: int = 4</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temporal_downsample<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_encoding_type<span class="opacity-60">: str = &#39;fourier&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">in_channels<span class="opacity-60">: int = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">out_channels<span class="opacity-60">: int = 64</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_after_patching<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv_after_patching_in_channels<span class="opacity-60">: int = 54</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conv2d_use_batchnorm<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">concat_or_add_pos<span class="opacity-60">: str = &#39;concat&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">project_pos_dim<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**position_encoding_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.prep_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.prep_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prep_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;conv&quot;</code>) &#x2014; Preprocessing type. Can be &#x201C;conv1x1&#x201D;, &#x201C;conv&#x201D;, &#x201C;patches&#x201D;, &#x201C;pixels&#x201D;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.spatial_downsample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.spatial_downsample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>spatial_downsample</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; Spatial downsampling factor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.temporal_downsample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.temporal_downsample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temporal_downsample</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Temporal downsampling factor (only relevant in case a time dimension is present).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.position_encoding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.position_encoding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_encoding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;fourier&quot;</code>) &#x2014; Position encoding type. Can be &#x201C;fourier&#x201D; or &#x201C;trainable&#x201D;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.in_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.in_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>in_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Number of channels in the input.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.out_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.out_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>out_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Number of channels in the output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv_after_patching" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv_after_patching"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_after_patching</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply a convolutional layer after patching.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv_after_patching_in_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv_after_patching_in_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv_after_patching_in_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 54) &#x2014; Number of channels in the input of the convolutional layer after patching.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv2d_use_batchnorm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.conv2d_use_batchnorm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conv2d_use_batchnorm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to use batch normalization in the convolutional layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.concat_or_add_pos" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.concat_or_add_pos"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>concat_or_add_pos</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;concat&quot;</code>) &#x2014; How to concatenate the position encoding to the input. Can be &#x201C;concat&#x201D; or &#x201C;add&#x201D;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.project_pos_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.project_pos_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>project_pos_dim</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Dimension of the position encoding to project to. If -1, no projection is applied.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.*position_encoding_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor.*position_encoding_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START -->*<strong>*position_encoding_kwargs</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; Keyword arguments for the position encoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Image preprocessing for Perceiver Encoder.</p> <p>Note: the <em>out_channels</em> argument refers to the output channels of a convolutional layer, if <em>prep_type</em> is set to “conv1x1” or “conv”. If one adds absolute position embeddings, one must make sure the <em>num_channels</em> of the position encoding kwargs are set equal to the <em>out_channels</em>.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverOneHotPreprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverOneHotPreprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L3153" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverAudioPreprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverAudioPreprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L3179" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prep_type<span class="opacity-60">: str = &#39;patches&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">samples_per_patch<span class="opacity-60">: int = 96</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_encoding_type<span class="opacity-60">: str = &#39;fourier&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">concat_or_add_pos<span class="opacity-60">: str = &#39;concat&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">out_channels<span class="opacity-60"> = 64</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">project_pos_dim<span class="opacity-60"> = -1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**position_encoding_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.prep_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.prep_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prep_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;patches&quot;</code>) &#x2014; Preprocessor type to use. Only &#x201C;patches&#x201D; is supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.samples_per_patch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.samples_per_patch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>samples_per_patch</strong> (<code>int</code>, <em>optional</em>, defaults to 96) &#x2014; Number of samples per patch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.position_encoding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.position_encoding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_encoding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;fourier&quot;</code>) &#x2014; Type of position encoding to use. Can be &#x201C;trainable&#x201D; or &#x201C;fourier&#x201D;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.concat_or_add_pos" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.concat_or_add_pos"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>concat_or_add_pos</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;concat&quot;</code>) &#x2014; How to concatenate the position encoding to the input. Can be &#x201C;concat&#x201D; or &#x201C;add&#x201D;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.out_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.out_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>out_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Number of channels in the output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.project_pos_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.project_pos_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>project_pos_dim</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Dimension of the position encoding to project to. If -1, no projection is applied.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.*position_encoding_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor.*position_encoding_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START -->*<strong>*position_encoding_kwargs</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; Keyword arguments for the position encoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Audio preprocessing for Perceiver Encoder.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverMultimodalPreprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverMultimodalPreprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L3276" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modalities<span class="opacity-60">: typing.Mapping[str, typing.Callable[..., typing.Tuple[torch.Tensor, typing.Optional[torch.Tensor], torch.Tensor]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_probs<span class="opacity-60">: typing.Union[typing.Mapping[str, float], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_padding_size<span class="opacity-60">: int = 2</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.modalities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.modalities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modalities</strong> (<code>Dict[str, PreprocessorType]</code>) &#x2014; Dict mapping modality name to preprocessor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.mask_probs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.mask_probs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_probs</strong> (<code>Dict[str, float]</code>) &#x2014; Dict mapping modality name to masking probability of that modality.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.min_padding_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor.min_padding_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_padding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Multimodal preprocessing for Perceiver Encoder.</p> <p>Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number of channels.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverProjectionDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverProjectionDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2011" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionDecoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Baseline projection decoder (no cross-attention).</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverBasicDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverBasicDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2035" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_num_channels<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_encoding_type<span class="opacity-60"> = &#39;trainable&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_index_dims<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_channels<span class="opacity-60"> = 128</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">subsampled_index_dims<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qk_channels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">v_channels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_heads<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">widening_factor<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_query_residual<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">concat_preprocessed_input<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_project<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_encoding_only<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**position_encoding_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.output_num_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.output_num_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_num_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of channels in the output. Will only be used in case <em>final_project</em> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.position_encoding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.position_encoding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_encoding_type</strong> (<code>str</code>, <em>optional</em>, defaults to &#x201C;trainable&#x201D;) &#x2014; The type of position encoding to use. Can be either &#x201C;trainable&#x201D;, &#x201C;fourier&#x201D;, or &#x201C;none&#x201D;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.output_index_dims" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.output_index_dims"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_index_dims</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of dimensions of the output queries. Ignored if &#x2018;position_encoding_type&#x2019; == &#x2018;none&#x2019;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.num_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.num_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of channels of the decoder queries. Ignored if &#x2018;position_encoding_type&#x2019; == &#x2018;none&#x2019;.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.qk_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.qk_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>qk_channels</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of channels of the queries and keys in the cross-attention layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.v_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.v_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>v_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The number of channels of the values in the cross-attention layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.num_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.num_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of attention heads in the cross-attention layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.widening_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.widening_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>widening_factor</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The widening factor of the cross-attention layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.use_query_residual" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.use_query_residual"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_query_residual</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a residual connection between the query and the output of the cross-attention layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.concat_preprocessed_input" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.concat_preprocessed_input"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>concat_preprocessed_input</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to concatenate the preprocessed input to the query.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.final_project" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.final_project"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_project</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to project the output of the cross-attention layer to a target dimension.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.position_encoding_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder.position_encoding_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_encoding_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to only use this class to define output queries.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a cross-attention operation, in which the latents produce keys and values.</p> <p>The shape of the output of this class depends on how one defines the output queries (also called decoder queries).</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverClassificationDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverClassificationDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2215" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**decoder_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cross-attention based classification decoder. Light-weight wrapper of <code>PerceiverBasicDecoder</code> for logit output. Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverOpticalFlowDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverOpticalFlowDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2255" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_image_shape<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_num_channels<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rescale_factor<span class="opacity-60"> = 100.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**decoder_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Cross-attention based optical flow decoder.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverBasicVideoAutoencodingDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverBasicVideoAutoencodingDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2284" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_shape<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_encoding_type<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**decoder_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.output_shape" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.output_shape"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_shape</strong> (<code>List[int]</code>) &#x2014; Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.position_encoding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder.position_encoding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_encoding_type</strong> (<code>str</code>) &#x2014; The type of position encoding to use. Can be either &#x201C;trainable&#x201D;, &#x201C;fourier&#x201D;, or &#x201C;none&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cross-attention based video-autoencoding decoder. Light-weight wrapper of [<em>PerceiverBasicDecoder</em>] with video reshaping logic.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverMultimodalDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverMultimodalDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2357" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modalities<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_outputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_num_channels<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_padding_size<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">subsampled_index_dims<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**decoder_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.modalities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.modalities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modalities</strong> (<code>Dict[str, PerceiverAbstractDecoder]</code>) &#x2014; Dictionary mapping modality name to the decoder of that modality.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.num_outputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.num_outputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_outputs</strong> (<code>int</code>) &#x2014; The number of outputs of the decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.output_num_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.output_num_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_num_channels</strong> (<code>int</code>) &#x2014; The number of channels in the output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.min_padding_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.min_padding_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_padding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.subsampled_index_dims" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder.subsampled_index_dims"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>subsampled_index_dims</strong> (<code>Dict[str, PerceiverAbstractDecoder]</code>, <em>optional</em>) &#x2014; Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that modality.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Multimodal decoding by composing uni-modal decoders. The <em>modalities</em> argument of the constructor is a dictionary mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are concatenated along the time dimension.</p> <p>Next, there is a shared cross attention operation across all modalities.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverProjectionPostprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverProjectionPostprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2903" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">in_channels<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">out_channels<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor.in_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor.in_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>in_channels</strong> (<code>int</code>) &#x2014; Number of channels in the input.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor.out_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor.out_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>out_channels</strong> (<code>int</code>) &#x2014; Number of channels in the output.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower dimension.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverAudioPostprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverAudioPostprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2875" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">in_channels<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">postproc_type<span class="opacity-60">: str = &#39;patches&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.in_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.in_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>in_channels</strong> (<code>int</code>) &#x2014; Number of channels in the input.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.postproc_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor.postproc_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>postproc_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;patches&quot;</code>) &#x2014; Postprocessor type to use. Currently, only &#x201C;patches&#x201D; is supported.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverClassificationPostprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverClassificationPostprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2855" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">in_channels<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> ([<em>PerceiverConfig</em>]) &#x2014; Model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor.in_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor.in_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>in_channels</strong> (<code>int</code>) &#x2014; Number of channels in the input.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits.</p></div> <h2 class="relative group"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverMultimodalPostprocessor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.models.perceiver.modeling_perceiver.</span><span class="font-semibold">PerceiverMultimodalPostprocessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L2821" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modalities<span class="opacity-60">: typing.Mapping[str, typing.Callable[..., typing.Any]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_is_dict<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor.modalities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor.modalities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modalities</strong> (<code>Dict[str, PostprocessorType]</code>) &#x2014; Dictionary mapping modality name to postprocessor class for that modality.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor.input_is_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor.input_is_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_is_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If False, input is a tensor which is sliced up during postprocessing by <em>modality_sizes</em>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single postprocessor.</p></div> <h2 class="relative group"><a id="transformers.PerceiverModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L721" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_preprocessor<span class="opacity-60">: typing.Callable[..., typing.Tuple[torch.Tensor, typing.Optional[torch.Tensor], torch.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_postprocessor<span class="opacity-60">: typing.Callable[..., typing.Any] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder</strong> (<em>DecoderType</em>, <em>optional</em>) &#x2014; Optional decoder to use to decode the latent representation of the encoder. Examples include <em>transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.input_preprocessor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.input_preprocessor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_preprocessor</strong> (<em>PreprocessorType</em>, <em>optional</em>) &#x2014; Optional input preprocessor to use. Examples include <em>transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.output_postprocessor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.output_postprocessor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_postprocessor</strong> (<em>PostprocessorType</em>, <em>optional</em>) &#x2014; Optional output postprocessor to use. Examples include <em>transformers.models.perceiver.modeling_perceiver.PerceiverImagePostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor</em>, <em>transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.Note" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.Note"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>Note</strong> that you can define your own decoders, preprocessors and/or postprocessors to fit your use-case. &#x2014;<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The Perceiver: a scalable, fully attentional architecture. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L757" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">subsampled_output_points<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverConfig, PerceiverTokenizer, PerceiverFeatureExtractor, PerceiverModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.perceiver.modeling_perceiver <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> PerceiverTextPreprocessor, <span class="hljs-meta">... </span> PerceiverImagePreprocessor, <span class="hljs-meta">... </span> PerceiverClassificationDecoder, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># EXAMPLE 1: using the Perceiver to classify texts</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># - we define a TextPreprocessor, which can be used to embed tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># - we define a ClassificationDecoder, which can be used to decode the</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># final hidden states of the latents to classification logits</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># using trainable position embeddings</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = PerceiverConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>preprocessor = PerceiverTextPreprocessor(config) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = PerceiverClassificationDecoder( <span class="hljs-meta">... </span> config, <span class="hljs-meta">... </span> num_channels=config.d_latents, <span class="hljs-meta">... </span> trainable_position_encoding_kwargs=<span class="hljs-built_in">dict</span>(num_channels=config.d_latents, index_dims=<span class="hljs-number">1</span>), <span class="hljs-meta">... </span> use_query_residual=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># you can then do a forward pass as follows:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PerceiverTokenizer() <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># to train, one can train the model using standard cross-entropy:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>criterion = torch.nn.CrossEntropyLoss() <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = criterion(logits, labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># EXAMPLE 2: using the Perceiver to classify images</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># - we define an ImagePreprocessor, which can be used to embed images</span> <span class="hljs-meta">&gt;&gt;&gt; </span>preprocessor = PerceiverImagePreprocessor( <span class="hljs-meta">... </span> config, <span class="hljs-meta">... </span> prep_type=<span class="hljs-string">&quot;conv1x1&quot;</span>, <span class="hljs-meta">... </span> spatial_downsample=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> out_channels=<span class="hljs-number">256</span>, <span class="hljs-meta">... </span> position_encoding_type=<span class="hljs-string">&quot;trainable&quot;</span>, <span class="hljs-meta">... </span> concat_or_add_pos=<span class="hljs-string">&quot;concat&quot;</span>, <span class="hljs-meta">... </span> project_pos_dim=<span class="hljs-number">256</span>, <span class="hljs-meta">... </span> trainable_position_encoding_kwargs=<span class="hljs-built_in">dict</span>( <span class="hljs-meta">... </span> num_channels=<span class="hljs-number">256</span>, <span class="hljs-meta">... </span> index_dims=config.image_size**<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> ), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverModel( <span class="hljs-meta">... </span> config, <span class="hljs-meta">... </span> input_preprocessor=preprocessor, <span class="hljs-meta">... </span> decoder=PerceiverClassificationDecoder( <span class="hljs-meta">... </span> config, <span class="hljs-meta">... </span> num_channels=config.d_latents, <span class="hljs-meta">... </span> trainable_position_encoding_kwargs=<span class="hljs-built_in">dict</span>(num_channels=config.d_latents, index_dims=<span class="hljs-number">1</span>), <span class="hljs-meta">... </span> use_query_residual=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> ), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># you can then do a forward pass as follows:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># to train, one can train the model using standard cross-entropy:</span> <span class="hljs-meta">&gt;&gt;&gt; </span>criterion = torch.nn.CrossEntropyLoss() <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = criterion(logits, labels)<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PerceiverForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L954" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Example use of Perceiver for masked language modeling. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L985" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, num_latents, num_latents)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForMaskedLM">PerceiverForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverTokenizer, PerceiverForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PerceiverTokenizer.from_pretrained(<span class="hljs-string">&quot;deepmind/language-perceiver&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForMaskedLM.from_pretrained(<span class="hljs-string">&quot;deepmind/language-perceiver&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;This is an incomplete sentence where some words are missing.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># mask &quot; missing.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;input_ids&quot;</span>][<span class="hljs-number">0</span>, <span class="hljs-number">52</span>:<span class="hljs-number">61</span>] = tokenizer.mask_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(text, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;This is an incomplete sentence where some words are missing.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(text, padding=<span class="hljs-string">&quot;max_length&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># mask bytes corresponding to &quot; missing.&quot;. Note that the model performs much better if the masked span starts with a space.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding[<span class="hljs-string">&quot;input_ids&quot;</span>][<span class="hljs-number">0</span>, <span class="hljs-number">52</span>:<span class="hljs-number">61</span>] = tokenizer.mask_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>masked_tokens_predictions = logits[<span class="hljs-number">0</span>, <span class="hljs-number">52</span>:<span class="hljs-number">61</span>].argmax(dim=-<span class="hljs-number">1</span>).tolist() <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.decode(masked_tokens_predictions) <span class="hljs-string">&#x27; missing.&#x27;</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PerceiverForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1081" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Example use of Perceiver for text classification. This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1102" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForSequenceClassification">PerceiverForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverTokenizer, PerceiverForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = PerceiverTokenizer.from_pretrained(<span class="hljs-string">&quot;deepmind/language-perceiver&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;deepmind/language-perceiver&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PerceiverForImageClassificationLearned" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverForImageClassificationLearned </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForImageClassificationLearned"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverForImageClassificationLearned</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverForImageClassificationLearned" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForImageClassificationLearned"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1204" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Example use of Perceiver for image classification, for tasks such as ImageNet.</p> <p>This model uses learned position embeddings. In other words, this model is not given any privileged information about the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned">PerceiverForImageClassificationLearned</a> uses <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor">PerceiverImagePreprocessor</a> (with <code>prep_type=&quot;conv1x1&quot;</code>) to preprocess the input images, and <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder">PerceiverClassificationDecoder</a> to decode the latent representation of <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a> into classification logits.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForImageClassificationLearned.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverForImageClassificationLearned.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForImageClassificationLearned.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1235" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationLearned.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationLearned.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverForImageClassificationLearned.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned">PerceiverForImageClassificationLearned</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverFeatureExtractor, PerceiverForImageClassificationLearned <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;deepmind/vision-perceiver-learned&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForImageClassificationLearned.from_pretrained(<span class="hljs-string">&quot;deepmind/vision-perceiver-learned&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PerceiverForImageClassificationFourier" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverForImageClassificationFourier </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForImageClassificationFourier"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverForImageClassificationFourier</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverForImageClassificationFourier" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForImageClassificationFourier"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1343" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Example use of Perceiver for image classification, for tasks such as ImageNet.</p> <p>This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned">PerceiverForImageClassificationLearned</a> uses <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor">PerceiverImagePreprocessor</a> (with <code>prep_type=&quot;pixels&quot;</code>) to preprocess the input images, and <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder">PerceiverClassificationDecoder</a> to decode the latent representation of <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a> into classification logits.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForImageClassificationFourier.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverForImageClassificationFourier.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForImageClassificationFourier.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1372" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationFourier.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationFourier.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverForImageClassificationFourier.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForImageClassificationFourier">PerceiverForImageClassificationFourier</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverFeatureExtractor, PerceiverForImageClassificationFourier <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;deepmind/vision-perceiver-fourier&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForImageClassificationFourier.from_pretrained(<span class="hljs-string">&quot;deepmind/vision-perceiver-fourier&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PerceiverForImageClassificationConvProcessing" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverForImageClassificationConvProcessing </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForImageClassificationConvProcessing"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverForImageClassificationConvProcessing</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverForImageClassificationConvProcessing" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForImageClassificationConvProcessing"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1479" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Example use of Perceiver for image classification, for tasks such as ImageNet.</p> <p>This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy of 82.1 on ImageNet.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForImageClassificationLearned">PerceiverForImageClassificationLearned</a> uses <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor">PerceiverImagePreprocessor</a> (with <code>prep_type=&quot;conv&quot;</code>) to preprocess the input images, and <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder">PerceiverClassificationDecoder</a> to decode the latent representation of <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a> into classification logits.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForImageClassificationConvProcessing.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverForImageClassificationConvProcessing.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForImageClassificationConvProcessing.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1509" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pixel_values<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForImageClassificationConvProcessing.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForImageClassificationConvProcessing.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverForImageClassificationConvProcessing.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForImageClassificationConvProcessing">PerceiverForImageClassificationConvProcessing</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverFeatureExtractor, PerceiverForImageClassificationConvProcessing <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = PerceiverFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;deepmind/vision-perceiver-conv&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForImageClassificationConvProcessing.from_pretrained(<span class="hljs-string">&quot;deepmind/vision-perceiver-conv&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PerceiverForOpticalFlow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverForOpticalFlow </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForOpticalFlow"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverForOpticalFlow</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverForOpticalFlow" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForOpticalFlow"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1616" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForOpticalFlow">PerceiverForOpticalFlow</a> uses <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor">PerceiverImagePreprocessor</a> (with <em>prep_type=“patches”</em>) to preprocess the input images, and <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder">PerceiverOpticalFlowDecoder</a> to decode the latent representation of <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a>.</p> <p>As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation using the same encoding used for the input.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForOpticalFlow.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverForOpticalFlow.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForOpticalFlow.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1663" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForOpticalFlow.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForOpticalFlow.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the optical flow loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverForOpticalFlow.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForOpticalFlow">PerceiverForOpticalFlow</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverForOpticalFlow <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForOpticalFlow.from_pretrained(<span class="hljs-string">&quot;deepmind/optical-flow-perceiver&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># patches have shape (batch_size, num_frames, num_channels, height, width)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the authors train on resolutions of 368 x 496</span> <span class="hljs-meta">&gt;&gt;&gt; </span>patches = torch.randn(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">27</span>, <span class="hljs-number">368</span>, <span class="hljs-number">496</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=patches) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PerceiverForMultimodalAutoencoding" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PerceiverForMultimodalAutoencoding </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForMultimodalAutoencoding"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PerceiverForMultimodalAutoencoding</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PerceiverForMultimodalAutoencoding" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForMultimodalAutoencoding"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1756" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig">PerceiverConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForMultimodalAutoencoding">PerceiverForMultimodalAutoencoding</a> uses <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor">PerceiverMultimodalPreprocessor</a> to preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies the Perceiver encoder.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder">PerceiverMultimodalDecoder</a> is used to decode the latent representation of <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a>. This decoder uses each modality-specific decoder to construct queries. The decoder queries are created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent representation. This is determined by the subsampled indices for each modality, which can be provided as additional input to the forward pass of <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForMultimodalAutoencoding">PerceiverForMultimodalAutoencoding</a>.</p> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder">PerceiverMultimodalDecoder</a> also pads the decoder queries of the different modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention is performed with the latent representation of <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverModel">PerceiverModel</a>.</p> <p>Finally, <code>PerceiverMultiModalPostprocessor</code> is used to turn this tensor into an actual video. It first splits up the output into the different modalities, and then applies the respective postprocessor for each modality.</p> <p>Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the “label” modality), this auto-encoding model becomes a Kinetics 700 video classifier.</p> <p>This model is a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PerceiverForMultimodalAutoencoding.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.PerceiverForMultimodalAutoencoding.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PerceiverForMultimodalAutoencoding.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/perceiver/modeling_perceiver.py#L1872" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">subsampled_output_points<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.forward.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.forward.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.FloatTensor</code>) &#x2014; Inputs to the perceiver. Can be anything: images, text, audio, video, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>batch_size, sequence_length</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PerceiverForMultimodalAutoencoding.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PerceiverForMultimodalAutoencoding.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PerceiverForMultimodalAutoencoding.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput" >transformers.models.perceiver.modeling_perceiver.PerceiverClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverConfig" >PerceiverConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> <li><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/perceiver#transformers.PerceiverForMultimodalAutoencoding">PerceiverForMultimodalAutoencoding</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PerceiverForMultimodalAutoencoding <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># create multimodal inputs</span> <span class="hljs-meta">&gt;&gt;&gt; </span>images = torch.randn((<span class="hljs-number">1</span>, <span class="hljs-number">16</span>, <span class="hljs-number">3</span>, <span class="hljs-number">224</span>, <span class="hljs-number">224</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>audio = torch.randn((<span class="hljs-number">1</span>, <span class="hljs-number">30720</span>, <span class="hljs-number">1</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = <span class="hljs-built_in">dict</span>(image=images, audio=audio, label=torch.zeros((images.shape[<span class="hljs-number">0</span>], <span class="hljs-number">700</span>))) <span class="hljs-meta">&gt;&gt;&gt; </span>model = PerceiverForMultimodalAutoencoding.from_pretrained(<span class="hljs-string">&quot;deepmind/multimodal-perceiver&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># in the Perceiver IO paper, videos are auto-encoded in chunks</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># each chunk subsamples different index dimensions of the image and audio modality decoder queries</span> <span class="hljs-meta">&gt;&gt;&gt; </span>nchunks = <span class="hljs-number">128</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image_chunk_size = np.prod((<span class="hljs-number">16</span>, <span class="hljs-number">224</span>, <span class="hljs-number">224</span>)) // nchunks <span class="hljs-meta">&gt;&gt;&gt; </span>audio_chunk_size = audio.shape[<span class="hljs-number">1</span>] // model.config.samples_per_patch // nchunks <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># process the first chunk</span> <span class="hljs-meta">&gt;&gt;&gt; </span>chunk_idx = <span class="hljs-number">0</span> <span class="hljs-meta">&gt;&gt;&gt; </span>subsampling = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;image&quot;</span>: torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + <span class="hljs-number">1</span>)), <span class="hljs-meta">... </span> <span class="hljs-string">&quot;audio&quot;</span>: torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + <span class="hljs-number">1</span>)), <span class="hljs-meta">... </span> <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-literal">None</span>, <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs=inputs, subsampled_output_points=subsampling) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="11qdmyf"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="11qdmyf"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/perceiver.mdx-6efe9939.js") ], params: {} } }); </script>
224
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/bert-generation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;bertgeneration&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.BertGenerationConfig&quot;,&quot;title&quot;:&quot;BertGenerationConfig&quot;},{&quot;local&quot;:&quot;transformers.BertGenerationTokenizer&quot;,&quot;title&quot;:&quot;BertGenerationTokenizer&quot;},{&quot;local&quot;:&quot;transformers.BertGenerationEncoder&quot;,&quot;title&quot;:&quot;BertGenerationEncoder&quot;},{&quot;local&quot;:&quot;transformers.BertGenerationDecoder&quot;,&quot;title&quot;:&quot;BertGenerationDecoder&quot;}],&quot;title&quot;:&quot;BertGeneration&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/bert-generation.mdx-eb6fdeef.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="bertgeneration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bertgeneration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BertGeneration </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The BertGeneration model is a BERT model that can be leveraged for sequence-to-sequence tasks using <a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> as proposed in <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.</p> <p>The abstract from the paper is the following:</p> <p><em>Unsupervised pretraining of large neural models has recently revolutionized Natural Language Processing. By warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We developed a Transformer-based sequence-to-sequence model that is compatible with publicly available pre-trained BERT, GPT-2 and RoBERTa checkpoints and conducted an extensive empirical study on the utility of initializing our model, both encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation, Text Summarization, Sentence Splitting, and Sentence Fusion.</em></p> <p>Usage:</p> <ul><li>The model can be used in combination with the <a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> to leverage two pretrained BERT checkpoints for subsequent fine-tuning.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># leverage checkpoints for Bert2Bert model...</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use BERT&#x27;s cls token as BOS token and sep token as EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder = BertGenerationEncoder.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>, bos_token_id=<span class="hljs-number">101</span>, eos_token_id=<span class="hljs-number">102</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add cross attention layers and use BERT&#x27;s cls token as BOS token and sep token as EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = BertGenerationDecoder.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;bert-large-uncased&quot;</span>, add_cross_attention=<span class="hljs-literal">True</span>, is_decoder=<span class="hljs-literal">True</span>, bos_token_id=<span class="hljs-number">101</span>, eos_token_id=<span class="hljs-number">102</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># create tokenizer...</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;This is a long article to summarize&quot;</span>, add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;This is a short summary&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># train...</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span>loss.backward()<!-- HTML_TAG_END --></pre></div> <ul><li>Pretrained <a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> are also directly available in the model hub, e.g.,</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate sentence fusion model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sentence_fuser = EncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;google/roberta2roberta_L-24_discofuse&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;google/roberta2roberta_L-24_discofuse&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;This is the first sentence. This is the second sentence.&quot;</span>, add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = sentence_fuser.generate(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>]))<!-- HTML_TAG_END --></pre></div> <p>Tips:</p> <ul><li><a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationEncoder">BertGenerationEncoder</a> and <a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationDecoder">BertGenerationDecoder</a> should be used in combination with <code>EncoderDecoder</code>.</li> <li>For summarization, sentence splitting, sentence fusion and translation, no special tokens are required for the input. Therefore, no EOS token should be added to the end of the input.</li></ul> <p>This model was contributed by <a href="https://huggingface.co/patrickvonplaten" rel="nofollow">patrickvonplaten</a>. The original code can be found <a href="https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.BertGenerationConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BertGenerationConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertGenerationConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BertGenerationConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BertGenerationConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertGenerationConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert_generation/configuration_bert_generation.py#L20" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 50358</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 1024</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 24</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 16</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 4096</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 512</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-12</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_embedding_type<span class="opacity-60"> = &#39;absolute&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50358) &#x2014; Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <code>BertGeneration</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 24) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often called feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.position_embedding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.position_embedding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <code>BertGenerationPreTrainedModel</code> It is used to instantiate a BertGeneration model according to the specified arguments, defining the model architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertGenerationConfig, BertGenerationEncoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BertGeneration config</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BertGenerationConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the config</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertGenerationEncoder(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.BertGenerationTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BertGenerationTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertGenerationTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BertGenerationTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BertGenerationTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertGenerationTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert_generation/tokenization_bert_generation.py#L41" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_file<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token<span class="opacity-60"> = &#39;&lt;s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token<span class="opacity-60"> = &#39;&lt;/s&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">unk_token<span class="opacity-60"> = &#39;&lt;unk&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token<span class="opacity-60"> = &#39;&lt;pad&gt;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sep_token<span class="opacity-60"> = &#39;&lt;::::&gt;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sp_model_kwargs<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationTokenizer.vocab_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationTokenizer.vocab_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The begin of sequence token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationTokenizer.sp_model_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationTokenizer.sp_model_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct a BertGeneration tokenizer. Based on <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a>.</p> <p>This tokenizer inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertGenerationTokenizer.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.BertGenerationTokenizer.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertGenerationTokenizer.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert_generation/tokenization_bert_generation.py#L155" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.BertGenerationEncoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BertGenerationEncoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertGenerationEncoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BertGenerationEncoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BertGenerationEncoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertGenerationEncoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert_generation/modeling_bert_generation.py#L253" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationConfig">BertGenerationConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p>The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in <a href="https://arxiv.org/abs/1706.03762" rel="nofollow">Attention is all you need</a> by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.</p> <p>This model should be used when leveraging Bert or Roberta checkpoints for the <a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> class as described in <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.</p> <p>To behave as an decoder the model needs to be initialized with the <code>is_decoder</code> argument of the configuration set to <code>True</code>. To be used in a Seq2Seq model, the model needs to initialized with both <code>is_decoder</code> argument and <code>add_cross_attention</code> set to <code>True</code>; an <code>encoder_hidden_states</code> is then expected as an input to the forward pass.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertGenerationEncoder.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.BertGenerationEncoder.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertGenerationEncoder.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert_generation/modeling_bert_generation.py#L294" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationTokenizer">BertGenerationTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>: <code>1</code> for tokens that are NOT MASKED, <code>0</code> for MASKED tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationEncoder.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationEncoder.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BertGenerationEncoder.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationConfig" >BertGenerationConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationEncoder">BertGenerationEncoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertGenerationTokenizer, BertGenerationEncoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertGenerationTokenizer.from_pretrained(<span class="hljs-string">&quot;google/bert_for_seq_generation_L-24_bbc_encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertGenerationEncoder.from_pretrained(<span class="hljs-string">&quot;google/bert_for_seq_generation_L-24_bbc_encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.BertGenerationDecoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BertGenerationDecoder </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertGenerationDecoder"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BertGenerationDecoder</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BertGenerationDecoder" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertGenerationDecoder"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert_generation/modeling_bert_generation.py#L442" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationConfig">BertGenerationConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>BertGeneration Model with a <code>language modeling</code> head on top for CLM fine-tuning.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)</p> <p>This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BertGenerationDecoder.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.BertGenerationDecoder.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BertGenerationDecoder.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert_generation/modeling_bert_generation.py#L461" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationTokenizer">BertGenerationTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BertGenerationDecoder.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BertGenerationDecoder.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BertGenerationDecoder.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationConfig" >BertGenerationConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/bert-generation#transformers.BertGenerationDecoder">BertGenerationDecoder</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertGenerationTokenizer.from_pretrained(<span class="hljs-string">&quot;google/bert_for_seq_generation_L-24_bbc_encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertGenerationConfig.from_pretrained(<span class="hljs-string">&quot;google/bert_for_seq_generation_L-24_bbc_encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertGenerationDecoder.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/bert_for_seq_generation_L-24_bbc_encoder&quot;</span>, config=config <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_token_type_ids=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="11tiecp"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="11tiecp"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/bert-generation.mdx-eb6fdeef.js") ], params: {} } }); </script>
225
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_doc/xlm-roberta-xl.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;xlmrobertaxl&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;overview&quot;,&quot;title&quot;:&quot;Overview&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLConfig&quot;,&quot;title&quot;:&quot;XLMRobertaXLConfig&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLModel&quot;,&quot;title&quot;:&quot;XLMRobertaXLModel&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLForCausalLM&quot;,&quot;title&quot;:&quot;XLMRobertaXLForCausalLM&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLForMaskedLM&quot;,&quot;title&quot;:&quot;XLMRobertaXLForMaskedLM&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLForSequenceClassification&quot;,&quot;title&quot;:&quot;XLMRobertaXLForSequenceClassification&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLForMultipleChoice&quot;,&quot;title&quot;:&quot;XLMRobertaXLForMultipleChoice&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLForTokenClassification&quot;,&quot;title&quot;:&quot;XLMRobertaXLForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.XLMRobertaXLForQuestionAnswering&quot;,&quot;title&quot;:&quot;XLMRobertaXLForQuestionAnswering&quot;}],&quot;title&quot;:&quot;XLM-RoBERTa-XL&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_doc/xlm-roberta-xl.mdx-480f1600.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="xlmrobertaxl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlmrobertaxl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM-RoBERTa-XL </span></h1> <h2 class="relative group"><a id="overview" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#overview"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Overview </span></h2> <p>The XLM-RoBERTa-XL model was proposed in <a href="https://arxiv.org/abs/2105.00572" rel="nofollow">Larger-Scale Transformers for Multilingual Masked Language Modeling</a> by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau. </p> <p>The abstract from the paper is the following:</p> <p><em>Recent work has demonstrated the effectiveness of cross-lingual language model pretraining for cross-lingual understanding. In this study, we present the results of two larger multilingual masked language models, with 3.5B and 10.7B parameters. Our two new models dubbed XLM-R XL and XLM-R XXL outperform XLM-R by 1.8% and 2.4% average accuracy on XNLI. Our model also outperforms the RoBERTa-Large model on several English tasks of the GLUE benchmark by 0.3% on average while handling 99 more languages. This suggests pretrained models with larger capacity may obtain both strong performance on high-resource languages while greatly improving low-resource languages. We make our code and models publicly available.</em></p> <p>Tips:</p> <ul><li>XLM-RoBERTa-XL is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does not require <code>lang</code> tensors to understand which language is used, and should be able to determine the correct language from the input ids.</li></ul> <p>This model was contributed by <a href="https://github.com/Soonhwan-Kwon" rel="nofollow">Soonhwan-Kwon</a> and <a href="https://huggingface.co/stefan-it" rel="nofollow">stefan-it</a>. The original code can be found <a href="https://github.com/pytorch/fairseq/tree/master/examples/xlmr" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="transformers.XLMRobertaXLConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py#L34" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"> = 250880</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_size<span class="opacity-60"> = 2560</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60"> = 36</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_attention_heads<span class="opacity-60"> = 32</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">intermediate_size<span class="opacity-60"> = 10240</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_act<span class="opacity-60"> = &#39;gelu&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_probs_dropout_prob<span class="opacity-60"> = 0.1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_position_embeddings<span class="opacity-60"> = 514</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type_vocab_size<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60"> = 0.02</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer_norm_eps<span class="opacity-60"> = 1e-05</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_embedding_type<span class="opacity-60"> = &#39;absolute&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">classifier_dropout<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 250880) &#x2014; Vocabulary size of the XLM_ROBERTA_XL model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel">XLMRobertaXLModel</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2560) &#x2014; Dimensionality of the encoder layers and the pooler layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 36) &#x2014; Number of hidden layers in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.num_attention_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.num_attention_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.intermediate_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.intermediate_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 10240) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.hidden_act" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.hidden_act"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.hidden_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.hidden_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.attention_probs_dropout_prob" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.attention_probs_dropout_prob"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.max_position_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.max_position_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 514) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.type_vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.type_vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel">XLMRobertaXLModel</a> or <code>TFXLMRobertaXLModel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.layer_norm_eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.layer_norm_eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-5) &#x2014; The epsilon used by the layer normalization layers.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.position_embedding_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.position_embedding_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLConfig.classifier_dropout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLConfig.classifier_dropout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is the configuration class to store the configuration of a <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel">XLMRobertaXLModel</a> or a <code>TFXLMRobertaXLModel</code>. It is used to instantiate a XLM_ROBERTA_XL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XLM_ROBERTA_XL <a href="https://huggingface.co/bert-base-uncased" rel="nofollow">bert-base-uncased</a> architecture.</p> <p>Configuration objects inherit from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> and can be used to control the model outputs. Read the documentation from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> for more information.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMRobertaXLModel, XLMRobertaXLConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a XLM_ROBERTA_XL bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = XLMRobertaXLConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the bert-base-uncased style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.XLMRobertaXLModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L672" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_pooling_layer<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig">XLMRobertaXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The bare XLM-RoBERTa-xlarge Model transformer outputting raw hidden-states without any specific head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <p>The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in <em>Attention is all you need</em><em>by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the <code>is_decoder</code> argument of the configuration set to <code>True</code>. To be used in a Seq2Seq model, the model needs to initialized with both <code>is_decoder</code> argument and <code>add_cross_attention</code> set to <code>True</code>; an <code>encoder_hidden_states</code> is then expected as an input to the forward pass. .. </em><em>Attention is all you need</em>: <a href="https://arxiv.org/abs/1706.03762" rel="nofollow">https://arxiv.org/abs/1706.03762</a></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLModel.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLModel.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLModel.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details. <a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token. <a href="../glossary#token-type-ids">What are token type IDs?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>. <a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLModel.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLModel.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XLMRobertaXLModel.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig" >XLMRobertaXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) — Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) — Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder’s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel">XLMRobertaXLModel</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, XLMRobertaXLModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLModel.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.XLMRobertaXLForCausalLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLForCausalLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForCausalLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLForCausalLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForCausalLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForCausalLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L854" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig">XLMRobertaXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>XLM-RoBERTa-xlarge Model with a <code>language modeling</code> head on top for CLM fine-tuning. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForCausalLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForCausalLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForCausalLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L879" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details. <a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token. <a href="../glossary#token-type-ids">What are token type IDs?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>. <a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForCausalLM.forward.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForCausalLM.forward.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XLMRobertaXLForCausalLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig" >XLMRobertaXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) — Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLForCausalLM">XLMRobertaXLForCausalLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForCausalLM, RobertaConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RobertaConfig.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForCausalLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.XLMRobertaXLForMaskedLM" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLForMaskedLM </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForMaskedLM"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLForMaskedLM</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForMaskedLM" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForMaskedLM"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1004" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig">XLMRobertaXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>XLM-RoBERTa-xlarge Model with a <code>language modeling</code> head on top. This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForMaskedLM.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForMaskedLM.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForMaskedLM.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1032" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details. <a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token. <a href="../glossary#token-type-ids">What are token type IDs?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>. <a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMaskedLM.forward.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMaskedLM.forward.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XLMRobertaXLForMaskedLM.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig" >XLMRobertaXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) — Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLForMaskedLM">XLMRobertaXLForMaskedLM</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, XLMRobertaXLForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLForMaskedLM.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.XLMRobertaXLForSequenceClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLForSequenceClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForSequenceClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLForSequenceClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForSequenceClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForSequenceClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1132" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig">XLMRobertaXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForSequenceClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForSequenceClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForSequenceClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1145" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details. <a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token. <a href="../glossary#token-type-ids">What are token type IDs?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>. <a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForSequenceClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForSequenceClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XLMRobertaXLForSequenceClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig" >XLMRobertaXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) — Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLForSequenceClassification">XLMRobertaXLForSequenceClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example of single-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, XLMRobertaXLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div> <p>Example of multi-label classification:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, XLMRobertaXLForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) <!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.XLMRobertaXLForMultipleChoice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLForMultipleChoice </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForMultipleChoice"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLForMultipleChoice</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForMultipleChoice" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForMultipleChoice"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1229" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig">XLMRobertaXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>XLM-Roberta-xlarge Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForMultipleChoice.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForMultipleChoice.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForMultipleChoice.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1241" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details. <a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token. <a href="../glossary#token-type-ids">What are token type IDs?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>. <a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForMultipleChoice.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForMultipleChoice.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XLMRobertaXLForMultipleChoice.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig" >XLMRobertaXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) — <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLForMultipleChoice">XLMRobertaXLForMultipleChoice</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, XLMRobertaXLForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.XLMRobertaXLForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1323" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig">XLMRobertaXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>XLM-Roberta-xlarge Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForTokenClassification.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForTokenClassification.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForTokenClassification.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1340" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">labels<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details. <a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token. <a href="../glossary#token-type-ids">What are token type IDs?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>. <a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForTokenClassification.forward.labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForTokenClassification.forward.labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XLMRobertaXLForTokenClassification.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig" >XLMRobertaXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) — Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLForTokenClassification">XLMRobertaXLForTokenClassification</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, XLMRobertaXLForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLForTokenClassification.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.XLMRobertaXLForQuestionAnswering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLMRobertaXLForQuestionAnswering </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForQuestionAnswering"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">XLMRobertaXLForQuestionAnswering</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForQuestionAnswering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForQuestionAnswering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1438" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig">XLMRobertaXLConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>XLM-Roberta-xlarge Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute <code>span start logits</code> and <code>span end logits</code>).</p> <p>This model inherits from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow">torch.nn.Module</a> subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.XLMRobertaXLForQuestionAnswering.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.XLMRobertaXLForQuestionAnswering.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.XLMRobertaXLForQuestionAnswering.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py#L1451" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">position_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_embeds<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details. <a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token. <a href="../glossary#token-type-ids">What are token type IDs?</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.position_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.position_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>. <a href="../glossary#position-ids">What are position IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.inputs_embeds" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.inputs_embeds"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.XLMRobertaXLForQuestionAnswering.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.XLMRobertaXLForQuestionAnswering.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.XLMRobertaXLForQuestionAnswering.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLConfig" >XLMRobertaXLConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) — Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) — Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) — Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>The <a href="/docs/transformers/pr_16143/en/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLForQuestionAnswering">XLMRobertaXLForQuestionAnswering</a> forward method, overrides the <code>__call__</code> special method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Although the recipe for forward pass needs to be defined within this function, one should call the <code>Module</code> instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.</p></div> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, XLMRobertaXLForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMRobertaXLForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;xlm-roberta-xlarge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) <!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1n3i83r"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1n3i83r"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_doc/xlm-roberta-xl.mdx-480f1600.js") ], params: {} } }); </script>
226
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/version.json
{"version":"1647278773957"}
227
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/error.svelte-7219ae09.js
import{S as h,i as w,s as y,e as E,t as v,c as d,a as b,h as P,d as o,g as u,F as R,j as N,k as S,l as C,m as j,L as F}from"./chunks/vendor-4833417e.js";function H(r){let l,t=r[1].frame+"",a;return{c(){l=E("pre"),a=v(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(o)},m(f,s){u(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].frame+"")&&N(a,t)},d(f){f&&o(l)}}}function L(r){let l,t=r[1].stack+"",a;return{c(){l=E("pre"),a=v(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(o)},m(f,s){u(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].stack+"")&&N(a,t)},d(f){f&&o(l)}}}function z(r){let l,t,a,f,s=r[1].message+"",c,k,n,p,i=r[1].frame&&H(r),_=r[1].stack&&L(r);return{c(){l=E("h1"),t=v(r[0]),a=S(),f=E("pre"),c=v(s),k=S(),i&&i.c(),n=S(),_&&_.c(),p=C()},l(e){l=d(e,"H1",{});var m=b(l);t=P(m,r[0]),m.forEach(o),a=j(e),f=d(e,"PRE",{});var q=b(f);c=P(q,s),q.forEach(o),k=j(e),i&&i.l(e),n=j(e),_&&_.l(e),p=C()},m(e,m){u(e,l,m),R(l,t),u(e,a,m),u(e,f,m),R(f,c),u(e,k,m),i&&i.m(e,m),u(e,n,m),_&&_.m(e,m),u(e,p,m)},p(e,[m]){m&1&&N(t,e[0]),m&2&&s!==(s=e[1].message+"")&&N(c,s),e[1].frame?i?i.p(e,m):(i=H(e),i.c(),i.m(n.parentNode,n)):i&&(i.d(1),i=null),e[1].stack?_?_.p(e,m):(_=L(e),_.c(),_.m(p.parentNode,p)):_&&(_.d(1),_=null)},i:F,o:F,d(e){e&&o(l),e&&o(a),e&&o(f),e&&o(k),i&&i.d(e),e&&o(n),_&&_.d(e),e&&o(p)}}}function D({error:r,status:l}){return{props:{error:r,status:l}}}function A(r,l,t){let{status:a}=l,{error:f}=l;return r.$$set=s=>{"status"in s&&t(0,a=s.status),"error"in s&&t(1,f=s.error)},[a,f]}class G extends h{constructor(l){super();w(this,l,A,z,y,{status:0,error:1})}}export{G as default,D as load};
228
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/start-7cd6c1d0.js
var me=Object.defineProperty,fe=Object.defineProperties;var pe=Object.getOwnPropertyDescriptors;var B=Object.getOwnPropertySymbols;var ee=Object.prototype.hasOwnProperty,te=Object.prototype.propertyIsEnumerable;var Z=(a,t,r)=>t in a?me(a,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):a[t]=r,$=(a,t)=>{for(var r in t||(t={}))ee.call(t,r)&&Z(a,r,t[r]);if(B)for(var r of B(t))te.call(t,r)&&Z(a,r,t[r]);return a},W=(a,t)=>fe(a,pe(t));var re=(a,t)=>{var r={};for(var _ in a)ee.call(a,_)&&t.indexOf(_)<0&&(r[_]=a[_]);if(a!=null&&B)for(var _ of B(a))t.indexOf(_)<0&&te.call(a,_)&&(r[_]=a[_]);return r};import{S as he,i as Ee,s as ve,e as ge,c as $e,a as Re,d as P,b as G,f as k,g as I,t as Le,h as Oe,j as Ae,k as Pe,l as R,m as Te,n as S,o as L,p as U,q as O,r as Ve,u as Ie,v as F,w as b,x as z,y as D,z as j,A as q,B as w,C,D as Y,E as ie}from"./chunks/vendor-4833417e.js";import{s as be,b as De}from"./chunks/paths-4b3c6e7e.js";function we(a){let t,r,_;const l=[a[1]||{}];var s=a[0][0];function d(o){let n={};for(let c=0;c<l.length;c+=1)n=C(n,l[c]);return{props:n}}return s&&(t=new s(d())),{c(){t&&b(t.$$.fragment),r=R()},l(o){t&&z(t.$$.fragment,o),r=R()},m(o,n){t&&D(t,o,n),I(o,r,n),_=!0},p(o,n){const c=n&2?j(l,[q(o[1]||{})]):{};if(s!==(s=o[0][0])){if(t){S();const u=t;L(u.$$.fragment,1,0,()=>{w(u,1)}),U()}s?(t=new s(d()),b(t.$$.fragment),O(t.$$.fragment,1),D(t,r.parentNode,r)):t=null}else s&&t.$set(c)},i(o){_||(t&&O(t.$$.fragment,o),_=!0)},o(o){t&&L(t.$$.fragment,o),_=!1},d(o){o&&P(r),t&&w(t,o)}}}function ye(a){let t,r,_;const l=[a[1]||{}];var s=a[0][0];function d(o){let n={$$slots:{default:[Ue]},$$scope:{ctx:o}};for(let c=0;c<l.length;c+=1)n=C(n,l[c]);return{props:n}}return s&&(t=new s(d(a))),{c(){t&&b(t.$$.fragment),r=R()},l(o){t&&z(t.$$.fragment,o),r=R()},m(o,n){t&&D(t,o,n),I(o,r,n),_=!0},p(o,n){const c=n&2?j(l,[q(o[1]||{})]):{};if(n&525&&(c.$$scope={dirty:n,ctx:o}),s!==(s=o[0][0])){if(t){S();const u=t;L(u.$$.fragment,1,0,()=>{w(u,1)}),U()}s?(t=new s(d(o)),b(t.$$.fragment),O(t.$$.fragment,1),D(t,r.parentNode,r)):t=null}else s&&t.$set(c)},i(o){_||(t&&O(t.$$.fragment,o),_=!0)},o(o){t&&L(t.$$.fragment,o),_=!1},d(o){o&&P(r),t&&w(t,o)}}}function ke(a){let t,r,_;const l=[a[2]||{}];var s=a[0][1];function d(o){let n={};for(let c=0;c<l.length;c+=1)n=C(n,l[c]);return{props:n}}return s&&(t=new s(d())),{c(){t&&b(t.$$.fragment),r=R()},l(o){t&&z(t.$$.fragment,o),r=R()},m(o,n){t&&D(t,o,n),I(o,r,n),_=!0},p(o,n){const c=n&4?j(l,[q(o[2]||{})]):{};if(s!==(s=o[0][1])){if(t){S();const u=t;L(u.$$.fragment,1,0,()=>{w(u,1)}),U()}s?(t=new s(d()),b(t.$$.fragment),O(t.$$.fragment,1),D(t,r.parentNode,r)):t=null}else s&&t.$set(c)},i(o){_||(t&&O(t.$$.fragment,o),_=!0)},o(o){t&&L(t.$$.fragment,o),_=!1},d(o){o&&P(r),t&&w(t,o)}}}function xe(a){let t,r,_;const l=[a[2]||{}];var s=a[0][1];function d(o){let n={$$slots:{default:[Se]},$$scope:{ctx:o}};for(let c=0;c<l.length;c+=1)n=C(n,l[c]);return{props:n}}return s&&(t=new s(d(a))),{c(){t&&b(t.$$.fragment),r=R()},l(o){t&&z(t.$$.fragment,o),r=R()},m(o,n){t&&D(t,o,n),I(o,r,n),_=!0},p(o,n){const c=n&4?j(l,[q(o[2]||{})]):{};if(n&521&&(c.$$scope={dirty:n,ctx:o}),s!==(s=o[0][1])){if(t){S();const u=t;L(u.$$.fragment,1,0,()=>{w(u,1)}),U()}s?(t=new s(d(o)),b(t.$$.fragment),O(t.$$.fragment,1),D(t,r.parentNode,r)):t=null}else s&&t.$set(c)},i(o){_||(t&&O(t.$$.fragment,o),_=!0)},o(o){t&&L(t.$$.fragment,o),_=!1},d(o){o&&P(r),t&&w(t,o)}}}function Se(a){let t,r,_;const l=[a[3]||{}];var s=a[0][2];function d(o){let n={};for(let c=0;c<l.length;c+=1)n=C(n,l[c]);return{props:n}}return s&&(t=new s(d())),{c(){t&&b(t.$$.fragment),r=R()},l(o){t&&z(t.$$.fragment,o),r=R()},m(o,n){t&&D(t,o,n),I(o,r,n),_=!0},p(o,n){const c=n&8?j(l,[q(o[3]||{})]):{};if(s!==(s=o[0][2])){if(t){S();const u=t;L(u.$$.fragment,1,0,()=>{w(u,1)}),U()}s?(t=new s(d()),b(t.$$.fragment),O(t.$$.fragment,1),D(t,r.parentNode,r)):t=null}else s&&t.$set(c)},i(o){_||(t&&O(t.$$.fragment,o),_=!0)},o(o){t&&L(t.$$.fragment,o),_=!1},d(o){o&&P(r),t&&w(t,o)}}}function Ue(a){let t,r,_,l;const s=[xe,ke],d=[];function o(n,c){return n[0][2]?0:1}return t=o(a),r=d[t]=s[t](a),{c(){r.c(),_=R()},l(n){r.l(n),_=R()},m(n,c){d[t].m(n,c),I(n,_,c),l=!0},p(n,c){let u=t;t=o(n),t===u?d[t].p(n,c):(S(),L(d[u],1,1,()=>{d[u]=null}),U(),r=d[t],r?r.p(n,c):(r=d[t]=s[t](n),r.c()),O(r,1),r.m(_.parentNode,_))},i(n){l||(O(r),l=!0)},o(n){L(r),l=!1},d(n){d[t].d(n),n&&P(_)}}}function oe(a){let t,r=a[5]&&_e(a);return{c(){t=ge("div"),r&&r.c(),this.h()},l(_){t=$e(_,"DIV",{id:!0,"aria-live":!0,"aria-atomic":!0,style:!0});var l=Re(t);r&&r.l(l),l.forEach(P),this.h()},h(){G(t,"id","svelte-announcer"),G(t,"aria-live","assertive"),G(t,"aria-atomic","true"),k(t,"position","absolute"),k(t,"left","0"),k(t,"top","0"),k(t,"clip","rect(0 0 0 0)"),k(t,"clip-path","inset(50%)"),k(t,"overflow","hidden"),k(t,"white-space","nowrap"),k(t,"width","1px"),k(t,"height","1px")},m(_,l){I(_,t,l),r&&r.m(t,null)},p(_,l){_[5]?r?r.p(_,l):(r=_e(_),r.c(),r.m(t,null)):r&&(r.d(1),r=null)},d(_){_&&P(t),r&&r.d()}}}function _e(a){let t;return{c(){t=Le(a[6])},l(r){t=Oe(r,a[6])},m(r,_){I(r,t,_)},p(r,_){_&64&&Ae(t,r[6])},d(r){r&&P(t)}}}function Ne(a){let t,r,_,l,s;const d=[ye,we],o=[];function n(u,f){return u[0][1]?0:1}t=n(a),r=o[t]=d[t](a);let c=a[4]&&oe(a);return{c(){r.c(),_=Pe(),c&&c.c(),l=R()},l(u){r.l(u),_=Te(u),c&&c.l(u),l=R()},m(u,f){o[t].m(u,f),I(u,_,f),c&&c.m(u,f),I(u,l,f),s=!0},p(u,[f]){let m=t;t=n(u),t===m?o[t].p(u,f):(S(),L(o[m],1,1,()=>{o[m]=null}),U(),r=o[t],r?r.p(u,f):(r=o[t]=d[t](u),r.c()),O(r,1),r.m(_.parentNode,_)),u[4]?c?c.p(u,f):(c=oe(u),c.c(),c.m(l.parentNode,l)):c&&(c.d(1),c=null)},i(u){s||(O(r),s=!0)},o(u){L(r),s=!1},d(u){o[t].d(u),u&&P(_),c&&c.d(u),u&&P(l)}}}function ze(a,t,r){let{stores:_}=t,{page:l}=t,{components:s}=t,{props_0:d=null}=t,{props_1:o=null}=t,{props_2:n=null}=t;Ve("__svelte__",_),Ie(_.page.notify);let c=!1,u=!1,f=null;return F(()=>{const m=_.page.subscribe(()=>{c&&(r(5,u=!0),r(6,f=document.title||"untitled page"))});return r(4,c=!0),m}),a.$$set=m=>{"stores"in m&&r(7,_=m.stores),"page"in m&&r(8,l=m.page),"components"in m&&r(0,s=m.components),"props_0"in m&&r(1,d=m.props_0),"props_1"in m&&r(2,o=m.props_1),"props_2"in m&&r(3,n=m.props_2)},a.$$.update=()=>{a.$$.dirty&384&&_.page.set(l)},[s,d,o,n,c,u,f,_,l]}class je extends he{constructor(t){super();Ee(this,t,ze,Ne,ve,{stores:7,page:8,components:0,props_0:1,props_1:2,props_2:3})}}const qe="modulepreload",se={},Ce="/docs/transformers/pr_16143/en/_app/",i=function(t,r){return!r||r.length===0?t():Promise.all(r.map(_=>{if(_=`${Ce}${_}`,_ in se)return;se[_]=!0;const l=_.endsWith(".css"),s=l?'[rel="stylesheet"]':"";if(document.querySelector(`link[href="${_}"]${s}`))return;const d=document.createElement("link");if(d.rel=l?"stylesheet":qe,l||(d.as="script",d.crossOrigin=""),d.href=_,document.head.appendChild(d),l)return new Promise((o,n)=>{d.addEventListener("load",o),d.addEventListener("error",n)})})).then(()=>t())},e=[()=>i(()=>import("./pages/__layout.svelte-52826f4d.js"),["pages/__layout.svelte-52826f4d.js","assets/pages/__layout.svelte-a5c8879b.css","chunks/vendor-4833417e.js","chunks/paths-4b3c6e7e.js"]),()=>i(()=>import("./error.svelte-7219ae09.js"),["error.svelte-7219ae09.js","chunks/vendor-4833417e.js"]),()=>i(()=>import("./pages/index.mdx-78213d4b.js"),["pages/index.mdx-78213d4b.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/converting_tensorflow_models.mdx-9367eeca.js"),["pages/converting_tensorflow_models.mdx-9367eeca.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/autoclass_tutorial.mdx-256b03d8.js"),["pages/autoclass_tutorial.mdx-256b03d8.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/pipeline_tutorial.mdx-8f24abfb.js"),["pages/pipeline_tutorial.mdx-8f24abfb.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/tokenizer_summary.mdx-9d9de8d8.js"),["pages/tokenizer_summary.mdx-9d9de8d8.js","chunks/vendor-4833417e.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/add_new_pipeline.mdx-86ba54ca.js"),["pages/add_new_pipeline.mdx-86ba54ca.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/custom_datasets.mdx-a1b148bf.js"),["pages/custom_datasets.mdx-a1b148bf.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/fast_tokenizers.mdx-1a58673f.js"),["pages/fast_tokenizers.mdx-1a58673f.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/troubleshooting.mdx-e0bea728.js"),["pages/troubleshooting.mdx-e0bea728.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/create_a_model.mdx-44804e70.js"),["pages/create_a_model.mdx-44804e70.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/add_new_model.mdx-07427b7e.js"),["pages/add_new_model.mdx-07427b7e.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/custom_models.mdx-12924744.js"),["pages/custom_models.mdx-12924744.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_sharing.mdx-3b3df35f.js"),["pages/model_sharing.mdx-3b3df35f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_summary.mdx-4a71079d.js"),["pages/model_summary.mdx-4a71079d.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/preprocessing.mdx-daaa6c3c.js"),["pages/preprocessing.mdx-daaa6c3c.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/serialization.mdx-809e82fb.js"),["pages/serialization.mdx-809e82fb.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/contributing.mdx-24741e59.js"),["pages/contributing.mdx-24741e59.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/installation.mdx-ccd851fb.js"),["pages/installation.mdx-ccd851fb.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/optimizer_schedules.mdx-d0982fc0.js"),["pages/main_classes/optimizer_schedules.mdx-d0982fc0.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/feature_extractor.mdx-045f07b4.js"),["pages/main_classes/feature_extractor.mdx-045f07b4.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/keras_callbacks.mdx-7ee92fa9.js"),["pages/main_classes/keras_callbacks.mdx-7ee92fa9.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/text_generation.mdx-5e23a84f.js"),["pages/main_classes/text_generation.mdx-5e23a84f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/configuration.mdx-5be476eb.js"),["pages/main_classes/configuration.mdx-5be476eb.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/data_collator.mdx-5221ee9b.js"),["pages/main_classes/data_collator.mdx-5221ee9b.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/main_classes/processors.mdx-9cb76e12.js"),["pages/main_classes/processors.mdx-9cb76e12.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/deepspeed.mdx-f19f464c.js"),["pages/main_classes/deepspeed.mdx-f19f464c.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/pipelines.mdx-dd4bd714.js"),["pages/main_classes/pipelines.mdx-dd4bd714.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/tokenizer.mdx-187685a5.js"),["pages/main_classes/tokenizer.mdx-187685a5.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/callback.mdx-9aed4ad7.js"),["pages/main_classes/callback.mdx-9aed4ad7.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/logging.mdx-8b2a9a6f.js"),["pages/main_classes/logging.mdx-8b2a9a6f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/trainer.mdx-a51a0aac.js"),["pages/main_classes/trainer.mdx-a51a0aac.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/output.mdx-bc3fe8ad.js"),["pages/main_classes/output.mdx-bc3fe8ad.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/model.mdx-5e2df875.js"),["pages/main_classes/model.mdx-5e2df875.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/main_classes/onnx.mdx-da087fbf.js"),["pages/main_classes/onnx.mdx-da087fbf.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/multilingual.mdx-0feae7c7.js"),["pages/multilingual.mdx-0feae7c7.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/task_summary.mdx-1d811e69.js"),["pages/task_summary.mdx-1d811e69.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/parallelism.mdx-f6d6de66.js"),["pages/parallelism.mdx-f6d6de66.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/performance.mdx-a6515fea.js"),["pages/performance.mdx-a6515fea.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/run_scripts.mdx-dea66c26.js"),["pages/run_scripts.mdx-dea66c26.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/accelerate.mdx-8837c56a.js"),["pages/accelerate.mdx-8837c56a.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/benchmarks.mdx-680f04c0.js"),["pages/benchmarks.mdx-680f04c0.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlockFw-27a176a0.js","chunks/CopyButton-dacfbfaf.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/perplexity.mdx-36aeb100.js"),["pages/perplexity.mdx-36aeb100.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/philosophy.mdx-9ffd6814.js"),["pages/philosophy.mdx-9ffd6814.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/bertology.mdx-1663513a.js"),["pages/bertology.mdx-1663513a.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/community.mdx-df0c9955.js"),["pages/community.mdx-df0c9955.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/debugging.mdx-9163dbf4.js"),["pages/debugging.mdx-9163dbf4.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/migration.mdx-040dfdaa.js"),["pages/migration.mdx-040dfdaa.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/vision-text-dual-encoder.mdx-bcc9f3e9.js"),["pages/model_doc/vision-text-dual-encoder.mdx-bcc9f3e9.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/speech-encoder-decoder.mdx-a4b010d9.js"),["pages/model_doc/speech-encoder-decoder.mdx-a4b010d9.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/vision-encoder-decoder.mdx-b5cd5518.js"),["pages/model_doc/vision-encoder-decoder.mdx-b5cd5518.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/blenderbot-small.mdx-047cfb96.js"),["pages/model_doc/blenderbot-small.mdx-047cfb96.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/speech_to_text_2.mdx-9f3459dc.js"),["pages/model_doc/speech_to_text_2.mdx-9f3459dc.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/wav2vec2_phoneme.mdx-985dde42.js"),["pages/model_doc/wav2vec2_phoneme.mdx-985dde42.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/bert-generation.mdx-eb6fdeef.js"),["pages/model_doc/bert-generation.mdx-eb6fdeef.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/bigbird_pegasus.mdx-ad01f788.js"),["pages/model_doc/bigbird_pegasus.mdx-ad01f788.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/encoder-decoder.mdx-c8393a8f.js"),["pages/model_doc/encoder-decoder.mdx-c8393a8f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/speech_to_text.mdx-35ea0919.js"),["pages/model_doc/speech_to_text.mdx-35ea0919.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xlm-prophetnet.mdx-97382af9.js"),["pages/model_doc/xlm-prophetnet.mdx-97382af9.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xlm-roberta-xl.mdx-480f1600.js"),["pages/model_doc/xlm-roberta-xl.mdx-480f1600.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/bert-japanese.mdx-b3af231a.js"),["pages/model_doc/bert-japanese.mdx-b3af231a.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/megatron-bert.mdx-d23a647f.js"),["pages/model_doc/megatron-bert.mdx-d23a647f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/megatron_gpt2.mdx-da7be0c9.js"),["pages/model_doc/megatron_gpt2.mdx-da7be0c9.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/nystromformer.mdx-a520f5f1.js"),["pages/model_doc/nystromformer.mdx-a520f5f1.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/unispeech-sat.mdx-33d28e6f.js"),["pages/model_doc/unispeech-sat.mdx-33d28e6f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xlsr_wav2vec2.mdx-7f80d6d2.js"),["pages/model_doc/xlsr_wav2vec2.mdx-7f80d6d2.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/squeezebert.mdx-a1f791b4.js"),["pages/model_doc/squeezebert.mdx-a1f791b4.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/visual_bert.mdx-56bcaade.js"),["pages/model_doc/visual_bert.mdx-56bcaade.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xlm-roberta.mdx-d3abcebe.js"),["pages/model_doc/xlm-roberta.mdx-d3abcebe.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/blenderbot.mdx-2f4c855d.js"),["pages/model_doc/blenderbot.mdx-2f4c855d.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/deberta-v2.mdx-e77d92af.js"),["pages/model_doc/deberta-v2.mdx-e77d92af.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/distilbert.mdx-6a7e081a.js"),["pages/model_doc/distilbert.mdx-6a7e081a.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/layoutlmv2.mdx-fb521e03.js"),["pages/model_doc/layoutlmv2.mdx-fb521e03.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/longformer.mdx-94451db4.js"),["pages/model_doc/longformer.mdx-94451db4.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/maskformer.mdx-07585051.js"),["pages/model_doc/maskformer.mdx-07585051.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/mobilebert.mdx-194d75d4.js"),["pages/model_doc/mobilebert.mdx-194d75d4.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/openai-gpt.mdx-55c17d26.js"),["pages/model_doc/openai-gpt.mdx-55c17d26.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/poolformer.mdx-9f52b149.js"),["pages/model_doc/poolformer.mdx-9f52b149.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/prophetnet.mdx-8afd09ba.js"),["pages/model_doc/prophetnet.mdx-8afd09ba.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/transfo-xl.mdx-64ec66da.js"),["pages/model_doc/transfo-xl.mdx-64ec66da.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/camembert.mdx-90807659.js"),["pages/model_doc/camembert.mdx-90807659.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/layoutxlm.mdx-9404196f.js"),["pages/model_doc/layoutxlm.mdx-9404196f.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/perceiver.mdx-6efe9939.js"),["pages/model_doc/perceiver.mdx-6efe9939.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/retribert.mdx-3029d10f.js"),["pages/model_doc/retribert.mdx-3029d10f.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/segformer.mdx-50f6b8f8.js"),["pages/model_doc/segformer.mdx-50f6b8f8.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/unispeech.mdx-dcd6680a.js"),["pages/model_doc/unispeech.mdx-dcd6680a.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/bertweet.mdx-fda755db.js"),["pages/model_doc/bertweet.mdx-fda755db.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/big_bird.mdx-b3c9bc9a.js"),["pages/model_doc/big_bird.mdx-b3c9bc9a.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/convbert.mdx-b107085e.js"),["pages/model_doc/convbert.mdx-b107085e.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/convnext.mdx-f632ad47.js"),["pages/model_doc/convnext.mdx-f632ad47.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/data2vec.mdx-72db0e89.js"),["pages/model_doc/data2vec.mdx-72db0e89.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/dialogpt.mdx-fc003640.js"),["pages/model_doc/dialogpt.mdx-fc003640.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/flaubert.mdx-04b8963f.js"),["pages/model_doc/flaubert.mdx-04b8963f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/imagegpt.mdx-5a1cdb73.js"),["pages/model_doc/imagegpt.mdx-5a1cdb73.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/layoutlm.mdx-2ae0b366.js"),["pages/model_doc/layoutlm.mdx-2ae0b366.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/reformer.mdx-15f80e57.js"),["pages/model_doc/reformer.mdx-15f80e57.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/roformer.mdx-0f80020b.js"),["pages/model_doc/roformer.mdx-0f80020b.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/splinter.mdx-c24b4ae5.js"),["pages/model_doc/splinter.mdx-c24b4ae5.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/wav2vec2.mdx-30b5c1d2.js"),["pages/model_doc/wav2vec2.mdx-30b5c1d2.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/barthez.mdx-a610f6fe.js"),["pages/model_doc/barthez.mdx-a610f6fe.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/bartpho.mdx-039ff953.js"),["pages/model_doc/bartpho.mdx-039ff953.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/deberta.mdx-e5952704.js"),["pages/model_doc/deberta.mdx-e5952704.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/electra.mdx-1a5b84b2.js"),["pages/model_doc/electra.mdx-1a5b84b2.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/gpt_neo.mdx-852dcbc0.js"),["pages/model_doc/gpt_neo.mdx-852dcbc0.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/herbert.mdx-01a55eeb.js"),["pages/model_doc/herbert.mdx-01a55eeb.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/m2m_100.mdx-9de67568.js"),["pages/model_doc/m2m_100.mdx-9de67568.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/pegasus.mdx-a33c23d3.js"),["pages/model_doc/pegasus.mdx-a33c23d3.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/phobert.mdx-4a24b305.js"),["pages/model_doc/phobert.mdx-4a24b305.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/qdqbert.mdx-19d93d0d.js"),["pages/model_doc/qdqbert.mdx-19d93d0d.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/rembert.mdx-8524b264.js"),["pages/model_doc/rembert.mdx-8524b264.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/roberta.mdx-002140a3.js"),["pages/model_doc/roberta.mdx-002140a3.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/vit_mae.mdx-eac363aa.js"),["pages/model_doc/vit_mae.mdx-eac363aa.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/albert.mdx-077766e3.js"),["pages/model_doc/albert.mdx-077766e3.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/canine.mdx-9959cf94.js"),["pages/model_doc/canine.mdx-9959cf94.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/funnel.mdx-0bc15d28.js"),["pages/model_doc/funnel.mdx-0bc15d28.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/hubert.mdx-3dda35f6.js"),["pages/model_doc/hubert.mdx-3dda35f6.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/lxmert.mdx-e3257043.js"),["pages/model_doc/lxmert.mdx-e3257043.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/marian.mdx-352a73d2.js"),["pages/model_doc/marian.mdx-352a73d2.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/plbart.mdx-627ee826.js"),["pages/model_doc/plbart.mdx-627ee826.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/t5v1.1.mdx-bd30f4e4.js"),["pages/model_doc/t5v1.1.mdx-bd30f4e4.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/ibert.mdx-6980b055.js"),["pages/model_doc/ibert.mdx-6980b055.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/mbart.mdx-4e986a75.js"),["pages/model_doc/mbart.mdx-4e986a75.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/mluke.mdx-0a6a8f21.js"),["pages/model_doc/mluke.mdx-0a6a8f21.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/mpnet.mdx-a2ac5620.js"),["pages/model_doc/mpnet.mdx-a2ac5620.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/realm.mdx-d3389e24.js"),["pages/model_doc/realm.mdx-d3389e24.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/sew-d.mdx-7aaffe1b.js"),["pages/model_doc/sew-d.mdx-7aaffe1b.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/tapas.mdx-8e33d727.js"),["pages/model_doc/tapas.mdx-8e33d727.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/model_doc/trocr.mdx-6100bb6b.js"),["pages/model_doc/trocr.mdx-6100bb6b.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/wavlm.mdx-fc6fcdac.js"),["pages/model_doc/wavlm.mdx-fc6fcdac.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xlnet.mdx-8e96e680.js"),["pages/model_doc/xlnet.mdx-8e96e680.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xls_r.mdx-c4ead8f3.js"),["pages/model_doc/xls_r.mdx-c4ead8f3.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/auto.mdx-47c818f3.js"),["pages/model_doc/auto.mdx-47c818f3.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/bart.mdx-0957e0a7.js"),["pages/model_doc/bart.mdx-0957e0a7.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/beit.mdx-80b0b9ff.js"),["pages/model_doc/beit.mdx-80b0b9ff.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/bert.mdx-384687c8.js"),["pages/model_doc/bert.mdx-384687c8.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/bort.mdx-af290f70.js"),["pages/model_doc/bort.mdx-af290f70.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/byt5.mdx-e9ed0683.js"),["pages/model_doc/byt5.mdx-e9ed0683.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/clip.mdx-b328d557.js"),["pages/model_doc/clip.mdx-b328d557.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/ctrl.mdx-9c3e0c29.js"),["pages/model_doc/ctrl.mdx-9c3e0c29.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/deit.mdx-eff820cf.js"),["pages/model_doc/deit.mdx-eff820cf.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/detr.mdx-50053350.js"),["pages/model_doc/detr.mdx-50053350.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/fnet.mdx-45669f92.js"),["pages/model_doc/fnet.mdx-45669f92.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/fsmt.mdx-34675b34.js"),["pages/model_doc/fsmt.mdx-34675b34.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/gpt2.mdx-620e835d.js"),["pages/model_doc/gpt2.mdx-620e835d.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/gptj.mdx-a69ba9a2.js"),["pages/model_doc/gptj.mdx-a69ba9a2.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/luke.mdx-33abb078.js"),["pages/model_doc/luke.mdx-33abb078.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/swin.mdx-e4cc30a1.js"),["pages/model_doc/swin.mdx-e4cc30a1.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/vilt.mdx-7817df10.js"),["pages/model_doc/vilt.mdx-7817df10.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xglm.mdx-b5ed5977.js"),["pages/model_doc/xglm.mdx-b5ed5977.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/yoso.mdx-73a9322a.js"),["pages/model_doc/yoso.mdx-73a9322a.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/cpm.mdx-a271ebe3.js"),["pages/model_doc/cpm.mdx-a271ebe3.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/model_doc/dit.mdx-28d290cd.js"),["pages/model_doc/dit.mdx-28d290cd.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/dpr.mdx-c851abb6.js"),["pages/model_doc/dpr.mdx-c851abb6.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/led.mdx-d4a9a769.js"),["pages/model_doc/led.mdx-d4a9a769.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/mt5.mdx-4373368c.js"),["pages/model_doc/mt5.mdx-4373368c.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/rag.mdx-2450ac40.js"),["pages/model_doc/rag.mdx-2450ac40.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/sew.mdx-a39f8b59.js"),["pages/model_doc/sew.mdx-a39f8b59.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/vit.mdx-555822b3.js"),["pages/model_doc/vit.mdx-555822b3.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/xlm.mdx-b523781d.js"),["pages/model_doc/xlm.mdx-b523781d.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/model_doc/t5.mdx-84f497d3.js"),["pages/model_doc/t5.mdx-84f497d3.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/notebooks.mdx-229c9f2b.js"),["pages/notebooks.mdx-229c9f2b.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/pr_checks.mdx-69a387c3.js"),["pages/pr_checks.mdx-69a387c3.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/quicktour.mdx-8a4295b9.js"),["pages/quicktour.mdx-8a4295b9.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/sagemaker.mdx-d221e67e.js"),["pages/sagemaker.mdx-d221e67e.js","chunks/vendor-4833417e.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/glossary.mdx-04c6e6d1.js"),["pages/glossary.mdx-04c6e6d1.js","chunks/vendor-4833417e.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/internal/tokenization_utils.mdx-eeb10b9c.js"),["pages/internal/tokenization_utils.mdx-eeb10b9c.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/internal/generation_utils.mdx-8b17d2b2.js"),["pages/internal/generation_utils.mdx-8b17d2b2.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/internal/pipelines_utils.mdx-885320a3.js"),["pages/internal/pipelines_utils.mdx-885320a3.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/internal/modeling_utils.mdx-1b04e493.js"),["pages/internal/modeling_utils.mdx-1b04e493.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/internal/trainer_utils.mdx-b7a1b196.js"),["pages/internal/trainer_utils.mdx-b7a1b196.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/internal/file_utils.mdx-23310578.js"),["pages/internal/file_utils.mdx-23310578.js","chunks/vendor-4833417e.js","chunks/Docstring-4f315ed9.js","chunks/IconCopyLink-4b81c553.js"]),()=>i(()=>import("./pages/training.mdx-378a8a6f.js"),["pages/training.mdx-378a8a6f.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/DocNotebookDropdown-ecff2a90.js"]),()=>i(()=>import("./pages/testing.mdx-614baedb.js"),["pages/testing.mdx-614baedb.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/tasks/sequence_classification.mdx-d07399b9.js"),["pages/tasks/sequence_classification.mdx-d07399b9.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/tasks/audio_classification.mdx-cab2e6ac.js"),["pages/tasks/audio_classification.mdx-cab2e6ac.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/tasks/image_classification.mdx-e10d3b71.js"),["pages/tasks/image_classification.mdx-e10d3b71.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"]),()=>i(()=>import("./pages/tasks/token_classification.mdx-5446e455.js"),["pages/tasks/token_classification.mdx-5446e455.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/tasks/question_answering.mdx-8babb2cc.js"),["pages/tasks/question_answering.mdx-8babb2cc.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/tasks/language_modeling.mdx-e8a233ab.js"),["pages/tasks/language_modeling.mdx-e8a233ab.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/tasks/multiple_choice.mdx-7c0ab1b6.js"),["pages/tasks/multiple_choice.mdx-7c0ab1b6.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/tasks/summarization.mdx-34e69920.js"),["pages/tasks/summarization.mdx-34e69920.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/tasks/translation.mdx-9674fec0.js"),["pages/tasks/translation.mdx-9674fec0.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js","chunks/CodeBlockFw-27a176a0.js"]),()=>i(()=>import("./pages/tasks/asr.mdx-b010f320.js"),["pages/tasks/asr.mdx-b010f320.js","chunks/vendor-4833417e.js","chunks/Tip-fffd6df1.js","chunks/Youtube-27813aed.js","chunks/IconCopyLink-4b81c553.js","chunks/CodeBlock-6a3d1b46.js","chunks/CopyButton-dacfbfaf.js"])],Je=[[/^\/$/,[e[0],e[2]],[e[1]]],[/^\/converting_tensorflow_models\/?$/,[e[0],e[3]],[e[1]]],[/^\/autoclass_tutorial\/?$/,[e[0],e[4]],[e[1]]],[/^\/pipeline_tutorial\/?$/,[e[0],e[5]],[e[1]]],[/^\/tokenizer_summary\/?$/,[e[0],e[6]],[e[1]]],[/^\/add_new_pipeline\/?$/,[e[0],e[7]],[e[1]]],[/^\/custom_datasets\/?$/,[e[0],e[8]],[e[1]]],[/^\/fast_tokenizers\/?$/,[e[0],e[9]],[e[1]]],[/^\/troubleshooting\/?$/,[e[0],e[10]],[e[1]]],[/^\/create_a_model\/?$/,[e[0],e[11]],[e[1]]],[/^\/add_new_model\/?$/,[e[0],e[12]],[e[1]]],[/^\/custom_models\/?$/,[e[0],e[13]],[e[1]]],[/^\/model_sharing\/?$/,[e[0],e[14]],[e[1]]],[/^\/model_summary\/?$/,[e[0],e[15]],[e[1]]],[/^\/preprocessing\/?$/,[e[0],e[16]],[e[1]]],[/^\/serialization\/?$/,[e[0],e[17]],[e[1]]],[/^\/contributing\/?$/,[e[0],e[18]],[e[1]]],[/^\/installation\/?$/,[e[0],e[19]],[e[1]]],[/^\/main_classes\/optimizer_schedules\/?$/,[e[0],e[20]],[e[1]]],[/^\/main_classes\/feature_extractor\/?$/,[e[0],e[21]],[e[1]]],[/^\/main_classes\/keras_callbacks\/?$/,[e[0],e[22]],[e[1]]],[/^\/main_classes\/text_generation\/?$/,[e[0],e[23]],[e[1]]],[/^\/main_classes\/configuration\/?$/,[e[0],e[24]],[e[1]]],[/^\/main_classes\/data_collator\/?$/,[e[0],e[25]],[e[1]]],[/^\/main_classes\/processors\/?$/,[e[0],e[26]],[e[1]]],[/^\/main_classes\/deepspeed\/?$/,[e[0],e[27]],[e[1]]],[/^\/main_classes\/pipelines\/?$/,[e[0],e[28]],[e[1]]],[/^\/main_classes\/tokenizer\/?$/,[e[0],e[29]],[e[1]]],[/^\/main_classes\/callback\/?$/,[e[0],e[30]],[e[1]]],[/^\/main_classes\/logging\/?$/,[e[0],e[31]],[e[1]]],[/^\/main_classes\/trainer\/?$/,[e[0],e[32]],[e[1]]],[/^\/main_classes\/output\/?$/,[e[0],e[33]],[e[1]]],[/^\/main_classes\/model\/?$/,[e[0],e[34]],[e[1]]],[/^\/main_classes\/onnx\/?$/,[e[0],e[35]],[e[1]]],[/^\/multilingual\/?$/,[e[0],e[36]],[e[1]]],[/^\/task_summary\/?$/,[e[0],e[37]],[e[1]]],[/^\/parallelism\/?$/,[e[0],e[38]],[e[1]]],[/^\/performance\/?$/,[e[0],e[39]],[e[1]]],[/^\/run_scripts\/?$/,[e[0],e[40]],[e[1]]],[/^\/accelerate\/?$/,[e[0],e[41]],[e[1]]],[/^\/benchmarks\/?$/,[e[0],e[42]],[e[1]]],[/^\/perplexity\/?$/,[e[0],e[43]],[e[1]]],[/^\/philosophy\/?$/,[e[0],e[44]],[e[1]]],[/^\/bertology\/?$/,[e[0],e[45]],[e[1]]],[/^\/community\/?$/,[e[0],e[46]],[e[1]]],[/^\/debugging\/?$/,[e[0],e[47]],[e[1]]],[/^\/migration\/?$/,[e[0],e[48]],[e[1]]],[/^\/model_doc\/vision-text-dual-encoder\/?$/,[e[0],e[49]],[e[1]]],[/^\/model_doc\/speech-encoder-decoder\/?$/,[e[0],e[50]],[e[1]]],[/^\/model_doc\/vision-encoder-decoder\/?$/,[e[0],e[51]],[e[1]]],[/^\/model_doc\/blenderbot-small\/?$/,[e[0],e[52]],[e[1]]],[/^\/model_doc\/speech_to_text_2\/?$/,[e[0],e[53]],[e[1]]],[/^\/model_doc\/wav2vec2_phoneme\/?$/,[e[0],e[54]],[e[1]]],[/^\/model_doc\/bert-generation\/?$/,[e[0],e[55]],[e[1]]],[/^\/model_doc\/bigbird_pegasus\/?$/,[e[0],e[56]],[e[1]]],[/^\/model_doc\/encoder-decoder\/?$/,[e[0],e[57]],[e[1]]],[/^\/model_doc\/speech_to_text\/?$/,[e[0],e[58]],[e[1]]],[/^\/model_doc\/xlm-prophetnet\/?$/,[e[0],e[59]],[e[1]]],[/^\/model_doc\/xlm-roberta-xl\/?$/,[e[0],e[60]],[e[1]]],[/^\/model_doc\/bert-japanese\/?$/,[e[0],e[61]],[e[1]]],[/^\/model_doc\/megatron-bert\/?$/,[e[0],e[62]],[e[1]]],[/^\/model_doc\/megatron_gpt2\/?$/,[e[0],e[63]],[e[1]]],[/^\/model_doc\/nystromformer\/?$/,[e[0],e[64]],[e[1]]],[/^\/model_doc\/unispeech-sat\/?$/,[e[0],e[65]],[e[1]]],[/^\/model_doc\/xlsr_wav2vec2\/?$/,[e[0],e[66]],[e[1]]],[/^\/model_doc\/squeezebert\/?$/,[e[0],e[67]],[e[1]]],[/^\/model_doc\/visual_bert\/?$/,[e[0],e[68]],[e[1]]],[/^\/model_doc\/xlm-roberta\/?$/,[e[0],e[69]],[e[1]]],[/^\/model_doc\/blenderbot\/?$/,[e[0],e[70]],[e[1]]],[/^\/model_doc\/deberta-v2\/?$/,[e[0],e[71]],[e[1]]],[/^\/model_doc\/distilbert\/?$/,[e[0],e[72]],[e[1]]],[/^\/model_doc\/layoutlmv2\/?$/,[e[0],e[73]],[e[1]]],[/^\/model_doc\/longformer\/?$/,[e[0],e[74]],[e[1]]],[/^\/model_doc\/maskformer\/?$/,[e[0],e[75]],[e[1]]],[/^\/model_doc\/mobilebert\/?$/,[e[0],e[76]],[e[1]]],[/^\/model_doc\/openai-gpt\/?$/,[e[0],e[77]],[e[1]]],[/^\/model_doc\/poolformer\/?$/,[e[0],e[78]],[e[1]]],[/^\/model_doc\/prophetnet\/?$/,[e[0],e[79]],[e[1]]],[/^\/model_doc\/transfo-xl\/?$/,[e[0],e[80]],[e[1]]],[/^\/model_doc\/camembert\/?$/,[e[0],e[81]],[e[1]]],[/^\/model_doc\/layoutxlm\/?$/,[e[0],e[82]],[e[1]]],[/^\/model_doc\/perceiver\/?$/,[e[0],e[83]],[e[1]]],[/^\/model_doc\/retribert\/?$/,[e[0],e[84]],[e[1]]],[/^\/model_doc\/segformer\/?$/,[e[0],e[85]],[e[1]]],[/^\/model_doc\/unispeech\/?$/,[e[0],e[86]],[e[1]]],[/^\/model_doc\/bertweet\/?$/,[e[0],e[87]],[e[1]]],[/^\/model_doc\/big_bird\/?$/,[e[0],e[88]],[e[1]]],[/^\/model_doc\/convbert\/?$/,[e[0],e[89]],[e[1]]],[/^\/model_doc\/convnext\/?$/,[e[0],e[90]],[e[1]]],[/^\/model_doc\/data2vec\/?$/,[e[0],e[91]],[e[1]]],[/^\/model_doc\/dialogpt\/?$/,[e[0],e[92]],[e[1]]],[/^\/model_doc\/flaubert\/?$/,[e[0],e[93]],[e[1]]],[/^\/model_doc\/imagegpt\/?$/,[e[0],e[94]],[e[1]]],[/^\/model_doc\/layoutlm\/?$/,[e[0],e[95]],[e[1]]],[/^\/model_doc\/reformer\/?$/,[e[0],e[96]],[e[1]]],[/^\/model_doc\/roformer\/?$/,[e[0],e[97]],[e[1]]],[/^\/model_doc\/splinter\/?$/,[e[0],e[98]],[e[1]]],[/^\/model_doc\/wav2vec2\/?$/,[e[0],e[99]],[e[1]]],[/^\/model_doc\/barthez\/?$/,[e[0],e[100]],[e[1]]],[/^\/model_doc\/bartpho\/?$/,[e[0],e[101]],[e[1]]],[/^\/model_doc\/deberta\/?$/,[e[0],e[102]],[e[1]]],[/^\/model_doc\/electra\/?$/,[e[0],e[103]],[e[1]]],[/^\/model_doc\/gpt_neo\/?$/,[e[0],e[104]],[e[1]]],[/^\/model_doc\/herbert\/?$/,[e[0],e[105]],[e[1]]],[/^\/model_doc\/m2m_100\/?$/,[e[0],e[106]],[e[1]]],[/^\/model_doc\/pegasus\/?$/,[e[0],e[107]],[e[1]]],[/^\/model_doc\/phobert\/?$/,[e[0],e[108]],[e[1]]],[/^\/model_doc\/qdqbert\/?$/,[e[0],e[109]],[e[1]]],[/^\/model_doc\/rembert\/?$/,[e[0],e[110]],[e[1]]],[/^\/model_doc\/roberta\/?$/,[e[0],e[111]],[e[1]]],[/^\/model_doc\/vit_mae\/?$/,[e[0],e[112]],[e[1]]],[/^\/model_doc\/albert\/?$/,[e[0],e[113]],[e[1]]],[/^\/model_doc\/canine\/?$/,[e[0],e[114]],[e[1]]],[/^\/model_doc\/funnel\/?$/,[e[0],e[115]],[e[1]]],[/^\/model_doc\/hubert\/?$/,[e[0],e[116]],[e[1]]],[/^\/model_doc\/lxmert\/?$/,[e[0],e[117]],[e[1]]],[/^\/model_doc\/marian\/?$/,[e[0],e[118]],[e[1]]],[/^\/model_doc\/plbart\/?$/,[e[0],e[119]],[e[1]]],[/^\/model_doc\/t5v1\.1\/?$/,[e[0],e[120]],[e[1]]],[/^\/model_doc\/ibert\/?$/,[e[0],e[121]],[e[1]]],[/^\/model_doc\/mbart\/?$/,[e[0],e[122]],[e[1]]],[/^\/model_doc\/mluke\/?$/,[e[0],e[123]],[e[1]]],[/^\/model_doc\/mpnet\/?$/,[e[0],e[124]],[e[1]]],[/^\/model_doc\/realm\/?$/,[e[0],e[125]],[e[1]]],[/^\/model_doc\/sew-d\/?$/,[e[0],e[126]],[e[1]]],[/^\/model_doc\/tapas\/?$/,[e[0],e[127]],[e[1]]],[/^\/model_doc\/trocr\/?$/,[e[0],e[128]],[e[1]]],[/^\/model_doc\/wavlm\/?$/,[e[0],e[129]],[e[1]]],[/^\/model_doc\/xlnet\/?$/,[e[0],e[130]],[e[1]]],[/^\/model_doc\/xls_r\/?$/,[e[0],e[131]],[e[1]]],[/^\/model_doc\/auto\/?$/,[e[0],e[132]],[e[1]]],[/^\/model_doc\/bart\/?$/,[e[0],e[133]],[e[1]]],[/^\/model_doc\/beit\/?$/,[e[0],e[134]],[e[1]]],[/^\/model_doc\/bert\/?$/,[e[0],e[135]],[e[1]]],[/^\/model_doc\/bort\/?$/,[e[0],e[136]],[e[1]]],[/^\/model_doc\/byt5\/?$/,[e[0],e[137]],[e[1]]],[/^\/model_doc\/clip\/?$/,[e[0],e[138]],[e[1]]],[/^\/model_doc\/ctrl\/?$/,[e[0],e[139]],[e[1]]],[/^\/model_doc\/deit\/?$/,[e[0],e[140]],[e[1]]],[/^\/model_doc\/detr\/?$/,[e[0],e[141]],[e[1]]],[/^\/model_doc\/fnet\/?$/,[e[0],e[142]],[e[1]]],[/^\/model_doc\/fsmt\/?$/,[e[0],e[143]],[e[1]]],[/^\/model_doc\/gpt2\/?$/,[e[0],e[144]],[e[1]]],[/^\/model_doc\/gptj\/?$/,[e[0],e[145]],[e[1]]],[/^\/model_doc\/luke\/?$/,[e[0],e[146]],[e[1]]],[/^\/model_doc\/swin\/?$/,[e[0],e[147]],[e[1]]],[/^\/model_doc\/vilt\/?$/,[e[0],e[148]],[e[1]]],[/^\/model_doc\/xglm\/?$/,[e[0],e[149]],[e[1]]],[/^\/model_doc\/yoso\/?$/,[e[0],e[150]],[e[1]]],[/^\/model_doc\/cpm\/?$/,[e[0],e[151]],[e[1]]],[/^\/model_doc\/dit\/?$/,[e[0],e[152]],[e[1]]],[/^\/model_doc\/dpr\/?$/,[e[0],e[153]],[e[1]]],[/^\/model_doc\/led\/?$/,[e[0],e[154]],[e[1]]],[/^\/model_doc\/mt5\/?$/,[e[0],e[155]],[e[1]]],[/^\/model_doc\/rag\/?$/,[e[0],e[156]],[e[1]]],[/^\/model_doc\/sew\/?$/,[e[0],e[157]],[e[1]]],[/^\/model_doc\/vit\/?$/,[e[0],e[158]],[e[1]]],[/^\/model_doc\/xlm\/?$/,[e[0],e[159]],[e[1]]],[/^\/model_doc\/t5\/?$/,[e[0],e[160]],[e[1]]],[/^\/notebooks\/?$/,[e[0],e[161]],[e[1]]],[/^\/pr_checks\/?$/,[e[0],e[162]],[e[1]]],[/^\/quicktour\/?$/,[e[0],e[163]],[e[1]]],[/^\/sagemaker\/?$/,[e[0],e[164]],[e[1]]],[/^\/glossary\/?$/,[e[0],e[165]],[e[1]]],[/^\/internal\/tokenization_utils\/?$/,[e[0],e[166]],[e[1]]],[/^\/internal\/generation_utils\/?$/,[e[0],e[167]],[e[1]]],[/^\/internal\/pipelines_utils\/?$/,[e[0],e[168]],[e[1]]],[/^\/internal\/modeling_utils\/?$/,[e[0],e[169]],[e[1]]],[/^\/internal\/trainer_utils\/?$/,[e[0],e[170]],[e[1]]],[/^\/internal\/file_utils\/?$/,[e[0],e[171]],[e[1]]],[/^\/training\/?$/,[e[0],e[172]],[e[1]]],[/^\/testing\/?$/,[e[0],e[173]],[e[1]]],[/^\/tasks\/sequence_classification\/?$/,[e[0],e[174]],[e[1]]],[/^\/tasks\/audio_classification\/?$/,[e[0],e[175]],[e[1]]],[/^\/tasks\/image_classification\/?$/,[e[0],e[176]],[e[1]]],[/^\/tasks\/token_classification\/?$/,[e[0],e[177]],[e[1]]],[/^\/tasks\/question_answering\/?$/,[e[0],e[178]],[e[1]]],[/^\/tasks\/language_modeling\/?$/,[e[0],e[179]],[e[1]]],[/^\/tasks\/multiple_choice\/?$/,[e[0],e[180]],[e[1]]],[/^\/tasks\/summarization\/?$/,[e[0],e[181]],[e[1]]],[/^\/tasks\/translation\/?$/,[e[0],e[182]],[e[1]]],[/^\/tasks\/asr\/?$/,[e[0],e[183]],[e[1]]]],Ke=[e[0](),e[1]()];function Be(a){let t=a.baseURI;if(!t){const r=a.getElementsByTagName("base");t=r.length?r[0].href:a.URL}return t}function We(a,t){return a==="/"||t==="ignore"?a:t==="never"?a.endsWith("/")?a.slice(0,-1):a:t==="always"&&/\/[^./]+$/.test(a)?a+"/":a}const ue="sveltekit:scroll";let N={};try{N=JSON.parse(sessionStorage[ue])}catch{}function M(a){N[a]=X()}function X(){return{x:pageXOffset,y:pageYOffset}}function ne(a){return a.composedPath().find(r=>r instanceof Node&&r.nodeName.toUpperCase()==="A")}function ae(a){return a instanceof SVGAElement?new URL(a.href.baseVal,document.baseURI):new URL(a.href)}class Ye{constructor({base:t,routes:r,trailing_slash:_,renderer:l}){var d,o;this.base=t,this.routes=r,this.trailing_slash=_,this.navigating=0,this.renderer=l,l.router=this,this.enabled=!0,this.initialized=!1,document.body.setAttribute("tabindex","-1"),this.current_history_index=(o=(d=history.state)==null?void 0:d["sveltekit:index"])!=null?o:0,this.current_history_index===0&&history.replaceState(W($({},history.state),{"sveltekit:index":0}),"",location.href);const s=N[this.current_history_index];s&&scrollTo(s.x,s.y),this.hash_navigating=!1,this.callbacks={before_navigate:[],after_navigate:[]}}init_listeners(){history.scrollRestoration="manual",addEventListener("beforeunload",l=>{let s=!1;const d={from:this.renderer.current.url,to:null,cancel:()=>s=!0};this.callbacks.before_navigate.forEach(o=>o(d)),s?(l.preventDefault(),l.returnValue=""):history.scrollRestoration="auto"}),addEventListener("visibilitychange",()=>{if(document.visibilityState==="hidden"){M(this.current_history_index);try{sessionStorage[ue]=JSON.stringify(N)}catch{}}});const t=l=>{const s=ne(l);s&&s.href&&s.hasAttribute("sveltekit:prefetch")&&this.prefetch(ae(s))};let r;const _=l=>{clearTimeout(r),r=setTimeout(()=>{var s;(s=l.target)==null||s.dispatchEvent(new CustomEvent("sveltekit:trigger_prefetch",{bubbles:!0}))},20)};addEventListener("touchstart",t),addEventListener("mousemove",_),addEventListener("sveltekit:trigger_prefetch",t),addEventListener("click",l=>{if(!this.enabled||l.button||l.which!==1||l.metaKey||l.ctrlKey||l.shiftKey||l.altKey||l.defaultPrevented)return;const s=ne(l);if(!s||!s.href)return;const d=s instanceof SVGAElement,o=ae(s);if(o.toString()===location.href){location.hash||l.preventDefault();return}if(!d&&o.origin==="null")return;const c=(s.getAttribute("rel")||"").split(/\s+/);if(s.hasAttribute("download")||c&&c.includes("external")||(d?s.target.baseVal:s.target))return;const[u,f]=o.href.split("#");if(f!==void 0&&u===location.href.split("#")[0]){this.hash_navigating=!0,M(this.current_history_index),this.renderer.update_page_store(new URL(o.href));return}this._navigate({url:o,scroll:s.hasAttribute("sveltekit:noscroll")?X():null,keepfocus:!1,chain:[],details:{state:{},replaceState:!1},accepted:()=>l.preventDefault(),blocked:()=>l.preventDefault()})}),addEventListener("popstate",l=>{if(l.state&&this.enabled){if(l.state["sveltekit:index"]===this.current_history_index)return;this._navigate({url:new URL(location.href),scroll:N[l.state["sveltekit:index"]],keepfocus:!1,chain:[],details:null,accepted:()=>{this.current_history_index=l.state["sveltekit:index"]},blocked:()=>{const s=this.current_history_index-l.state["sveltekit:index"];history.go(s)}})}}),addEventListener("hashchange",()=>{this.hash_navigating&&(this.hash_navigating=!1,history.replaceState(W($({},history.state),{"sveltekit:index":++this.current_history_index}),"",location.href))}),this.initialized=!0}owns(t){return t.origin===location.origin&&t.pathname.startsWith(this.base)}parse(t){if(this.owns(t)){const r=decodeURI(t.pathname.slice(this.base.length)||"/");return{id:t.pathname+t.search,routes:this.routes.filter(([_])=>_.test(r)),url:t,path:r,initial:!this.initialized}}}async goto(t,{noscroll:r=!1,replaceState:_=!1,keepfocus:l=!1,state:s={}}={},d){const o=new URL(t,Be(document));return this.enabled?this._navigate({url:o,scroll:r?X():null,keepfocus:l,chain:d,details:{state:s,replaceState:_},accepted:()=>{},blocked:()=>{}}):(location.href=o.href,new Promise(()=>{}))}enable(){this.enabled=!0}disable(){this.enabled=!1}async prefetch(t){const r=this.parse(t);if(!r)throw new Error("Attempted to prefetch a URL that does not belong to this app");return this.renderer.load(r)}after_navigate(t){F(()=>(this.callbacks.after_navigate.push(t),()=>{const r=this.callbacks.after_navigate.indexOf(t);this.callbacks.after_navigate.splice(r,1)}))}before_navigate(t){F(()=>(this.callbacks.before_navigate.push(t),()=>{const r=this.callbacks.before_navigate.indexOf(t);this.callbacks.before_navigate.splice(r,1)}))}async _navigate({url:t,scroll:r,keepfocus:_,chain:l,details:s,accepted:d,blocked:o}){const n=this.renderer.current.url;let c=!1;const u={from:n,to:t,cancel:()=>c=!0};if(this.callbacks.before_navigate.forEach(h=>h(u)),c){o();return}const f=this.parse(t);if(!f)return location.href=t.href,new Promise(()=>{});M(this.current_history_index),d(),this.navigating++;const m=We(t.pathname,this.trailing_slash);f.url=new URL(t.origin+m+t.search+t.hash);const p=this.navigating_token={};if(await this.renderer.handle_navigation(f,l,!1,{scroll:r,keepfocus:_}),this.navigating--,this.navigating_token===p){if(!this.navigating){const h={from:n,to:t};this.callbacks.after_navigate.forEach(y=>y(h))}if(s){const h=s.replaceState?0:1;s.state["sveltekit:index"]=this.current_history_index+=h,history[s.replaceState?"replaceState":"pushState"](s.state,"",f.url)}}}}function le(a){return a instanceof Error||a&&a.name&&a.message?a:new Error(JSON.stringify(a))}function Ge(a){let t=5381,r=a.length;if(typeof a=="string")for(;r;)t=t*33^a.charCodeAt(--r);else for(;r;)t=t*33^a[--r];return(t>>>0).toString(36)}function de(a){const t=a.status&&a.status>=400&&a.status<=599&&!a.redirect;if(a.error||t){const r=a.status;if(!a.error&&t)return{status:r||500,error:new Error};const _=typeof a.error=="string"?new Error(a.error):a.error;return _ instanceof Error?!r||r<400||r>599?(console.warn('"error" returned from load() without a valid status code \u2014 defaulting to 500'),{status:500,error:_}):{status:r,error:_}:{status:500,error:new Error(`"error" property returned from load() must be a string or instance of Error, received type "${typeof _}"`)}}if(a.redirect){if(!a.status||Math.floor(a.status/100)!==3)return{status:500,error:new Error('"redirect" property returned from load() must be accompanied by a 3xx status code')};if(typeof a.redirect!="string")return{status:500,error:new Error('"redirect" property returned from load() must be a string')}}if(a.context)throw new Error('You are returning "context" from a load function. "context" was renamed to "stuff", please adjust your code accordingly.');return a}function ce(a){const t=Y(a);let r=!0;function _(){r=!0,t.update(d=>d)}function l(d){r=!1,t.set(d)}function s(d){let o;return t.subscribe(n=>{(o===void 0||r&&n!==o)&&d(o=n)})}return{notify:_,set:l,subscribe:s}}function Me(){const{set:a,subscribe:t}=Y(!1),r="1647278773957";let _;async function l(){clearTimeout(_);const d=await fetch(`${De}/_app/version.json`,{headers:{pragma:"no-cache","cache-control":"no-cache"}});if(d.ok){const{version:o}=await d.json(),n=o!==r;return n&&(a(!0),clearTimeout(_)),n}else throw new Error(`Version check failed: ${d.status}`)}return{subscribe:t,check:l}}function Fe(a,t){const r=typeof a=="string"?a:a.url;let _=`script[data-type="svelte-data"][data-url=${JSON.stringify(r)}]`;t&&typeof t.body=="string"&&(_+=`[data-body="${Ge(t.body)}"]`);const l=document.querySelector(_);if(l&&l.textContent){const s=JSON.parse(l.textContent),{body:d}=s,o=re(s,["body"]);return Promise.resolve(new Response(d,o))}return fetch(a,t)}class Xe{constructor({Root:t,fallback:r,target:_,session:l}){this.Root=t,this.fallback=r,this.router,this.target=_,this.started=!1,this.session_id=1,this.invalid=new Set,this.invalidating=null,this.autoscroll=!0,this.updating=!1,this.current={url:null,session_id:0,branch:[]},this.cache=new Map,this.loading={id:null,promise:null},this.stores={url:ce({}),page:ce({}),navigating:Y(null),session:Y(l),updated:Me()},this.$session=null,this.root=null;let s=!1;this.stores.session.subscribe(async d=>{if(this.$session=d,!s||!this.router)return;this.session_id+=1;const o=this.router.parse(new URL(location.href));o&&this.update(o,[],!0)}),s=!0}disable_scroll_handling(){(this.updating||!this.started)&&(this.autoscroll=!1)}async start({status:t,error:r,nodes:_,params:l}){const s=new URL(location.href),d=[];let o={},n,c;try{for(let u=0;u<_.length;u+=1){const f=u===_.length-1;let m;if(f){const h=document.querySelector('[data-type="svelte-props"]');h&&(m=JSON.parse(h.textContent))}const p=await this._load_node({module:await _[u],url:s,params:l,stuff:o,status:f?t:void 0,error:f?r:void 0,props:m});if(m&&(p.uses.dependencies.add(s.href),p.uses.url=!0),d.push(p),p&&p.loaded)if(p.loaded.error){if(r)throw p.loaded.error;c={status:p.loaded.status,error:p.loaded.error,url:s}}else p.loaded.stuff&&(o=$($({},o),p.loaded.stuff))}n=c?await this._load_error(c):await this._get_navigation_result_from_branch({url:s,params:l,stuff:o,branch:d,status:t,error:r})}catch(u){if(r)throw u;n=await this._load_error({status:500,error:le(u),url:s})}if(n.redirect){location.href=new URL(n.redirect,location.href).href;return}this._init(n)}async handle_navigation(t,r,_,l){this.started&&this.stores.navigating.set({from:this.current.url,to:t.url}),await this.update(t,r,_,l)}async update(t,r,_,l){var n,c,u;const s=this.token={};let d=await this._get_navigation_result(t,_);if(!d){location.href=t.url.href;return}if(s!==this.token)return;if(this.invalid.clear(),d.redirect)if(r.length>10||r.includes(t.url.pathname))d=await this._load_error({status:500,error:new Error("Redirect loop"),url:t.url});else{this.router?this.router.goto(new URL(d.redirect,t.url).href,{},[...r,t.url.pathname]):location.href=new URL(d.redirect,location.href).href;return}else if(((c=(n=d.props)==null?void 0:n.page)==null?void 0:c.status)>=400&&await this.stores.updated.check()){location.href=t.url.href;return}if(this.updating=!0,this.started?(this.current=d.state,this.root.$set(d.props),this.stores.navigating.set(null)):this._init(d),l){const{scroll:f,keepfocus:m}=l;if(m||((u=getSelection())==null||u.removeAllRanges(),document.body.focus()),await ie(),this.autoscroll){const p=t.url.hash&&document.getElementById(t.url.hash.slice(1));f?scrollTo(f.x,f.y):p?p.scrollIntoView():scrollTo(0,0)}}else await ie();if(this.loading.promise=null,this.loading.id=null,this.autoscroll=!0,this.updating=!1,d.props.page&&(this.page=d.props.page),!this.router)return;const o=d.state.branch[d.state.branch.length-1];o&&o.module.router===!1?this.router.disable():this.router.enable()}load(t){return this.loading.promise=this._get_navigation_result(t,!1),this.loading.id=t.id,this.loading.promise}invalidate(t){return this.invalid.add(t),this.invalidating||(this.invalidating=Promise.resolve().then(async()=>{const r=this.router&&this.router.parse(new URL(location.href));r&&await this.update(r,[],!0),this.invalidating=null})),this.invalidating}update_page_store(t){this.stores.page.set(W($({},this.page),{url:t})),this.stores.page.notify()}_init(t){this.current=t.state;const r=document.querySelector("style[data-svelte]");if(r&&r.remove(),this.page=t.props.page,this.root=new this.Root({target:this.target,props:$({stores:this.stores},t.props),hydrate:!0}),this.started=!0,this.router){const _={from:null,to:new URL(location.href)};this.router.callbacks.after_navigate.forEach(l=>l(_))}}async _get_navigation_result(t,r){if(this.loading.id===t.id&&this.loading.promise)return this.loading.promise;for(let _=0;_<t.routes.length;_+=1){const l=t.routes[_];let s=_+1;for(;s<t.routes.length;){const o=t.routes[s];if(o[0].toString()===l[0].toString())o[1].forEach(n=>n()),s+=1;else break}const d=await this._load({route:l,info:t},r);if(d)return d}if(t.initial)return await this._load_error({status:404,error:new Error(`Not found: ${t.url.pathname}`),url:t.url})}async _get_navigation_result_from_branch({url:t,params:r,stuff:_,branch:l,status:s,error:d}){const o=l.filter(Boolean),n=o.find(m=>m.loaded&&m.loaded.redirect),c={redirect:n&&n.loaded?n.loaded.redirect:void 0,state:{url:t,params:r,branch:l,session_id:this.session_id},props:{components:o.map(m=>m.module.default)}};for(let m=0;m<o.length;m+=1){const p=o[m].loaded;c.props[`props_${m}`]=p?await p.props:null}if(!this.current.url||t.href!==this.current.url.href){c.props.page={url:t,params:r,status:s,error:d,stuff:_};const m=(p,h)=>{Object.defineProperty(c.props.page,p,{get:()=>{throw new Error(`$page.${p} has been replaced by $page.url.${h}`)}})};m("origin","origin"),m("path","pathname"),m("query","searchParams")}const u=o[o.length-1],f=u.loaded&&u.loaded.maxage;if(f){const m=t.pathname+t.search;let p=!1;const h=()=>{this.cache.get(m)===c&&this.cache.delete(m),T(),clearTimeout(y)},y=setTimeout(h,f*1e3),T=this.stores.session.subscribe(()=>{p&&h()});p=!0,this.cache.set(m,c)}return c}async _load_node({status:t,error:r,module:_,url:l,params:s,stuff:d,props:o}){const n={module:_,uses:{params:new Set,url:!1,session:!1,stuff:!1,dependencies:new Set},loaded:null,stuff:d};o&&n.uses.dependencies.add(l.href);const c={};for(const f in s)Object.defineProperty(c,f,{get(){return n.uses.params.add(f),s[f]},enumerable:!0});const u=this.$session;if(_.load){const{started:f}=this,m={params:c,props:o||{},get url(){return n.uses.url=!0,l},get session(){return n.uses.session=!0,u},get stuff(){return n.uses.stuff=!0,$({},d)},fetch(h,y){const T=typeof h=="string"?h:h.url,{href:V}=new URL(T,l);return n.uses.dependencies.add(V),f?fetch(h,y):Fe(h,y)}};r&&(m.status=t,m.error=r);const p=await _.load.call(null,m);if(!p)throw new Error("load function must return a value");n.loaded=de(p),n.loaded.stuff&&(n.stuff=n.loaded.stuff)}else o&&(n.loaded=de({props:o}));return n}async _load({route:t,info:{url:r,path:_}},l){const s=r.pathname+r.search;if(!l){const E=this.cache.get(s);if(E)return E}const[d,o,n,c,u]=t,f=c?c(d.exec(_)):{},m=this.current.url&&{url:s!==this.current.url.pathname+this.current.url.search,params:Object.keys(f).filter(E=>this.current.params[E]!==f[E]),session:this.session_id!==this.current.session_id};let p=[],h={},y=!1,T=200,V;o.forEach(E=>E());e:for(let E=0;E<o.length;E+=1){let v;try{if(!o[E])continue;const g=await o[E](),A=this.current.branch[E];if(!A||g!==A.module||m.url&&A.uses.url||m.params.some(x=>A.uses.params.has(x))||m.session&&A.uses.session||Array.from(A.uses.dependencies).some(x=>this.invalid.has(x))||y&&A.uses.stuff){let x={};const H=u&&E===o.length-1;if(H){const K=await fetch(`${r.pathname}${r.pathname.endsWith("/")?"":"/"}__data.json${r.search}`,{headers:{"x-sveltekit-load":"true"}});if(K.ok){const Q=K.headers.get("x-sveltekit-location");if(Q)return{redirect:Q,props:{},state:this.current};x=await K.json()}else T=K.status,V=new Error("Failed to load data")}if(V||(v=await this._load_node({module:g,url:r,params:f,props:x,stuff:h})),v&&(H&&(v.uses.url=!0),v.loaded)){if(v.loaded.fallthrough)return;if(v.loaded.error&&(T=v.loaded.status,V=v.loaded.error),v.loaded.redirect)return{redirect:v.loaded.redirect,props:{},state:this.current};v.loaded.stuff&&(y=!0)}}else v=A}catch(g){T=500,V=le(g)}if(V){for(;E--;)if(n[E]){let g,A,J=E;for(;!(A=p[J]);)J-=1;try{if(g=await this._load_node({status:T,error:V,module:await n[E](),url:r,params:f,stuff:A.stuff}),g&&g.loaded&&g.loaded.error)continue;g&&g.loaded&&g.loaded.stuff&&(h=$($({},h),g.loaded.stuff)),p=p.slice(0,J+1).concat(g);break e}catch{continue}}return await this._load_error({status:T,error:V,url:r})}else v&&v.loaded&&v.loaded.stuff&&(h=$($({},h),v.loaded.stuff)),p.push(v)}return await this._get_navigation_result_from_branch({url:r,params:f,stuff:h,branch:p,status:T,error:V})}async _load_error({status:t,error:r,url:_}){var c,u;const l={},s=await this._load_node({module:await this.fallback[0],url:_,params:l,stuff:{}}),d=await this._load_node({status:t,error:r,module:await this.fallback[1],url:_,params:l,stuff:s&&s.loaded&&s.loaded.stuff||{}}),o=[s,d],n=$($({},(c=s==null?void 0:s.loaded)==null?void 0:c.stuff),(u=d==null?void 0:d.loaded)==null?void 0:u.stuff);return await this._get_navigation_result_from_branch({url:_,params:l,stuff:n,branch:o,status:t,error:r})}}async function et({paths:a,target:t,session:r,route:_,spa:l,trailing_slash:s,hydrate:d}){const o=new Xe({Root:je,fallback:Ke,target:t,session:r}),n=_?new Ye({base:a.base,routes:Je,trailing_slash:s,renderer:o}):null;be(a),d&&await o.start(d),n&&(l&&n.goto(location.href,{replaceState:!0},[]),n.init_listeners()),dispatchEvent(new CustomEvent("sveltekit:start"))}export{et as start};
229
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/manifest.json
{ ".svelte-kit/runtime/client/start.js": { "file": "start-7cd6c1d0.js", "src": ".svelte-kit/runtime/client/start.js", "isEntry": true, "imports": [ "_vendor-4833417e.js", "_paths-4b3c6e7e.js" ], "dynamicImports": [ "src/routes/__layout.svelte", ".svelte-kit/runtime/components/error.svelte", "src/routes/index.mdx", "src/routes/converting_tensorflow_models.mdx", "src/routes/autoclass_tutorial.mdx", "src/routes/pipeline_tutorial.mdx", "src/routes/tokenizer_summary.mdx", "src/routes/add_new_pipeline.mdx", "src/routes/custom_datasets.mdx", "src/routes/fast_tokenizers.mdx", "src/routes/troubleshooting.mdx", "src/routes/create_a_model.mdx", "src/routes/add_new_model.mdx", "src/routes/custom_models.mdx", "src/routes/model_sharing.mdx", "src/routes/model_summary.mdx", "src/routes/preprocessing.mdx", "src/routes/serialization.mdx", "src/routes/contributing.mdx", "src/routes/installation.mdx", "src/routes/main_classes/optimizer_schedules.mdx", "src/routes/main_classes/feature_extractor.mdx", "src/routes/main_classes/keras_callbacks.mdx", "src/routes/main_classes/text_generation.mdx", "src/routes/main_classes/configuration.mdx", "src/routes/main_classes/data_collator.mdx", "src/routes/main_classes/processors.mdx", "src/routes/main_classes/deepspeed.mdx", "src/routes/main_classes/pipelines.mdx", "src/routes/main_classes/tokenizer.mdx", "src/routes/main_classes/callback.mdx", "src/routes/main_classes/logging.mdx", "src/routes/main_classes/trainer.mdx", "src/routes/main_classes/output.mdx", "src/routes/main_classes/model.mdx", "src/routes/main_classes/onnx.mdx", "src/routes/multilingual.mdx", "src/routes/task_summary.mdx", "src/routes/parallelism.mdx", "src/routes/performance.mdx", "src/routes/run_scripts.mdx", "src/routes/accelerate.mdx", "src/routes/benchmarks.mdx", "src/routes/perplexity.mdx", "src/routes/philosophy.mdx", "src/routes/bertology.mdx", "src/routes/community.mdx", "src/routes/debugging.mdx", "src/routes/migration.mdx", "src/routes/model_doc/vision-text-dual-encoder.mdx", "src/routes/model_doc/speech-encoder-decoder.mdx", "src/routes/model_doc/vision-encoder-decoder.mdx", "src/routes/model_doc/blenderbot-small.mdx", "src/routes/model_doc/speech_to_text_2.mdx", "src/routes/model_doc/wav2vec2_phoneme.mdx", "src/routes/model_doc/bert-generation.mdx", "src/routes/model_doc/bigbird_pegasus.mdx", "src/routes/model_doc/encoder-decoder.mdx", "src/routes/model_doc/speech_to_text.mdx", "src/routes/model_doc/xlm-prophetnet.mdx", "src/routes/model_doc/xlm-roberta-xl.mdx", "src/routes/model_doc/bert-japanese.mdx", "src/routes/model_doc/megatron-bert.mdx", "src/routes/model_doc/megatron_gpt2.mdx", "src/routes/model_doc/nystromformer.mdx", "src/routes/model_doc/unispeech-sat.mdx", "src/routes/model_doc/xlsr_wav2vec2.mdx", "src/routes/model_doc/squeezebert.mdx", "src/routes/model_doc/visual_bert.mdx", "src/routes/model_doc/xlm-roberta.mdx", "src/routes/model_doc/blenderbot.mdx", "src/routes/model_doc/deberta-v2.mdx", "src/routes/model_doc/distilbert.mdx", "src/routes/model_doc/layoutlmv2.mdx", "src/routes/model_doc/longformer.mdx", "src/routes/model_doc/maskformer.mdx", "src/routes/model_doc/mobilebert.mdx", "src/routes/model_doc/openai-gpt.mdx", "src/routes/model_doc/poolformer.mdx", "src/routes/model_doc/prophetnet.mdx", "src/routes/model_doc/transfo-xl.mdx", "src/routes/model_doc/camembert.mdx", "src/routes/model_doc/layoutxlm.mdx", "src/routes/model_doc/perceiver.mdx", "src/routes/model_doc/retribert.mdx", "src/routes/model_doc/segformer.mdx", "src/routes/model_doc/unispeech.mdx", "src/routes/model_doc/bertweet.mdx", "src/routes/model_doc/big_bird.mdx", "src/routes/model_doc/convbert.mdx", "src/routes/model_doc/convnext.mdx", "src/routes/model_doc/data2vec.mdx", "src/routes/model_doc/dialogpt.mdx", "src/routes/model_doc/flaubert.mdx", "src/routes/model_doc/imagegpt.mdx", "src/routes/model_doc/layoutlm.mdx", "src/routes/model_doc/reformer.mdx", "src/routes/model_doc/roformer.mdx", "src/routes/model_doc/splinter.mdx", "src/routes/model_doc/wav2vec2.mdx", "src/routes/model_doc/barthez.mdx", "src/routes/model_doc/bartpho.mdx", "src/routes/model_doc/deberta.mdx", "src/routes/model_doc/electra.mdx", "src/routes/model_doc/gpt_neo.mdx", "src/routes/model_doc/herbert.mdx", "src/routes/model_doc/m2m_100.mdx", "src/routes/model_doc/pegasus.mdx", "src/routes/model_doc/phobert.mdx", "src/routes/model_doc/qdqbert.mdx", "src/routes/model_doc/rembert.mdx", "src/routes/model_doc/roberta.mdx", "src/routes/model_doc/vit_mae.mdx", "src/routes/model_doc/albert.mdx", "src/routes/model_doc/canine.mdx", "src/routes/model_doc/funnel.mdx", "src/routes/model_doc/hubert.mdx", "src/routes/model_doc/lxmert.mdx", "src/routes/model_doc/marian.mdx", "src/routes/model_doc/plbart.mdx", "src/routes/model_doc/t5v1.1.mdx", "src/routes/model_doc/ibert.mdx", "src/routes/model_doc/mbart.mdx", "src/routes/model_doc/mluke.mdx", "src/routes/model_doc/mpnet.mdx", "src/routes/model_doc/realm.mdx", "src/routes/model_doc/sew-d.mdx", "src/routes/model_doc/tapas.mdx", "src/routes/model_doc/trocr.mdx", "src/routes/model_doc/wavlm.mdx", "src/routes/model_doc/xlnet.mdx", "src/routes/model_doc/xls_r.mdx", "src/routes/model_doc/auto.mdx", "src/routes/model_doc/bart.mdx", "src/routes/model_doc/beit.mdx", "src/routes/model_doc/bert.mdx", "src/routes/model_doc/bort.mdx", "src/routes/model_doc/byt5.mdx", "src/routes/model_doc/clip.mdx", "src/routes/model_doc/ctrl.mdx", "src/routes/model_doc/deit.mdx", "src/routes/model_doc/detr.mdx", "src/routes/model_doc/fnet.mdx", "src/routes/model_doc/fsmt.mdx", "src/routes/model_doc/gpt2.mdx", "src/routes/model_doc/gptj.mdx", "src/routes/model_doc/luke.mdx", "src/routes/model_doc/swin.mdx", "src/routes/model_doc/vilt.mdx", "src/routes/model_doc/xglm.mdx", "src/routes/model_doc/yoso.mdx", "src/routes/model_doc/cpm.mdx", "src/routes/model_doc/dit.mdx", "src/routes/model_doc/dpr.mdx", "src/routes/model_doc/led.mdx", "src/routes/model_doc/mt5.mdx", "src/routes/model_doc/rag.mdx", "src/routes/model_doc/sew.mdx", "src/routes/model_doc/vit.mdx", "src/routes/model_doc/xlm.mdx", "src/routes/model_doc/t5.mdx", "src/routes/notebooks.mdx", "src/routes/pr_checks.mdx", "src/routes/quicktour.mdx", "src/routes/sagemaker.mdx", "src/routes/glossary.mdx", "src/routes/internal/tokenization_utils.mdx", "src/routes/internal/generation_utils.mdx", "src/routes/internal/pipelines_utils.mdx", "src/routes/internal/modeling_utils.mdx", "src/routes/internal/trainer_utils.mdx", "src/routes/internal/file_utils.mdx", "src/routes/training.mdx", "src/routes/testing.mdx", "src/routes/tasks/sequence_classification.mdx", "src/routes/tasks/audio_classification.mdx", "src/routes/tasks/image_classification.mdx", "src/routes/tasks/token_classification.mdx", "src/routes/tasks/question_answering.mdx", "src/routes/tasks/language_modeling.mdx", "src/routes/tasks/multiple_choice.mdx", "src/routes/tasks/summarization.mdx", "src/routes/tasks/translation.mdx", "src/routes/tasks/asr.mdx" ] }, "src/routes/__layout.svelte": { "file": "pages/__layout.svelte-52826f4d.js", "src": "src/routes/__layout.svelte", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_paths-4b3c6e7e.js" ], "css": [ "assets/pages/__layout.svelte-a5c8879b.css" ] }, ".svelte-kit/runtime/components/error.svelte": { "file": "error.svelte-7219ae09.js", "src": ".svelte-kit/runtime/components/error.svelte", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js" ] }, "src/routes/index.mdx": { "file": "pages/index.mdx-78213d4b.js", "src": "src/routes/index.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/converting_tensorflow_models.mdx": { "file": "pages/converting_tensorflow_models.mdx-9367eeca.js", "src": "src/routes/converting_tensorflow_models.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/autoclass_tutorial.mdx": { "file": "pages/autoclass_tutorial.mdx-256b03d8.js", "src": "src/routes/autoclass_tutorial.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/pipeline_tutorial.mdx": { "file": "pages/pipeline_tutorial.mdx-8f24abfb.js", "src": "src/routes/pipeline_tutorial.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tokenizer_summary.mdx": { "file": "pages/tokenizer_summary.mdx-9d9de8d8.js", "src": "src/routes/tokenizer_summary.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/add_new_pipeline.mdx": { "file": "pages/add_new_pipeline.mdx-86ba54ca.js", "src": "src/routes/add_new_pipeline.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/custom_datasets.mdx": { "file": "pages/custom_datasets.mdx-a1b148bf.js", "src": "src/routes/custom_datasets.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/fast_tokenizers.mdx": { "file": "pages/fast_tokenizers.mdx-1a58673f.js", "src": "src/routes/fast_tokenizers.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/troubleshooting.mdx": { "file": "pages/troubleshooting.mdx-e0bea728.js", "src": "src/routes/troubleshooting.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/create_a_model.mdx": { "file": "pages/create_a_model.mdx-44804e70.js", "src": "src/routes/create_a_model.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/add_new_model.mdx": { "file": "pages/add_new_model.mdx-07427b7e.js", "src": "src/routes/add_new_model.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/custom_models.mdx": { "file": "pages/custom_models.mdx-12924744.js", "src": "src/routes/custom_models.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_sharing.mdx": { "file": "pages/model_sharing.mdx-3b3df35f.js", "src": "src/routes/model_sharing.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_summary.mdx": { "file": "pages/model_summary.mdx-4a71079d.js", "src": "src/routes/model_summary.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/preprocessing.mdx": { "file": "pages/preprocessing.mdx-daaa6c3c.js", "src": "src/routes/preprocessing.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/serialization.mdx": { "file": "pages/serialization.mdx-809e82fb.js", "src": "src/routes/serialization.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/contributing.mdx": { "file": "pages/contributing.mdx-24741e59.js", "src": "src/routes/contributing.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/installation.mdx": { "file": "pages/installation.mdx-ccd851fb.js", "src": "src/routes/installation.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/optimizer_schedules.mdx": { "file": "pages/main_classes/optimizer_schedules.mdx-d0982fc0.js", "src": "src/routes/main_classes/optimizer_schedules.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/feature_extractor.mdx": { "file": "pages/main_classes/feature_extractor.mdx-045f07b4.js", "src": "src/routes/main_classes/feature_extractor.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/keras_callbacks.mdx": { "file": "pages/main_classes/keras_callbacks.mdx-7ee92fa9.js", "src": "src/routes/main_classes/keras_callbacks.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/text_generation.mdx": { "file": "pages/main_classes/text_generation.mdx-5e23a84f.js", "src": "src/routes/main_classes/text_generation.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/configuration.mdx": { "file": "pages/main_classes/configuration.mdx-5be476eb.js", "src": "src/routes/main_classes/configuration.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/data_collator.mdx": { "file": "pages/main_classes/data_collator.mdx-5221ee9b.js", "src": "src/routes/main_classes/data_collator.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/main_classes/processors.mdx": { "file": "pages/main_classes/processors.mdx-9cb76e12.js", "src": "src/routes/main_classes/processors.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/deepspeed.mdx": { "file": "pages/main_classes/deepspeed.mdx-f19f464c.js", "src": "src/routes/main_classes/deepspeed.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/pipelines.mdx": { "file": "pages/main_classes/pipelines.mdx-dd4bd714.js", "src": "src/routes/main_classes/pipelines.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/tokenizer.mdx": { "file": "pages/main_classes/tokenizer.mdx-187685a5.js", "src": "src/routes/main_classes/tokenizer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/callback.mdx": { "file": "pages/main_classes/callback.mdx-9aed4ad7.js", "src": "src/routes/main_classes/callback.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/logging.mdx": { "file": "pages/main_classes/logging.mdx-8b2a9a6f.js", "src": "src/routes/main_classes/logging.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/trainer.mdx": { "file": "pages/main_classes/trainer.mdx-a51a0aac.js", "src": "src/routes/main_classes/trainer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/output.mdx": { "file": "pages/main_classes/output.mdx-bc3fe8ad.js", "src": "src/routes/main_classes/output.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/model.mdx": { "file": "pages/main_classes/model.mdx-5e2df875.js", "src": "src/routes/main_classes/model.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/main_classes/onnx.mdx": { "file": "pages/main_classes/onnx.mdx-da087fbf.js", "src": "src/routes/main_classes/onnx.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/multilingual.mdx": { "file": "pages/multilingual.mdx-0feae7c7.js", "src": "src/routes/multilingual.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/task_summary.mdx": { "file": "pages/task_summary.mdx-1d811e69.js", "src": "src/routes/task_summary.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/parallelism.mdx": { "file": "pages/parallelism.mdx-f6d6de66.js", "src": "src/routes/parallelism.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/performance.mdx": { "file": "pages/performance.mdx-a6515fea.js", "src": "src/routes/performance.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/run_scripts.mdx": { "file": "pages/run_scripts.mdx-dea66c26.js", "src": "src/routes/run_scripts.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/accelerate.mdx": { "file": "pages/accelerate.mdx-8837c56a.js", "src": "src/routes/accelerate.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/benchmarks.mdx": { "file": "pages/benchmarks.mdx-680f04c0.js", "src": "src/routes/benchmarks.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlockFw-27a176a0.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/perplexity.mdx": { "file": "pages/perplexity.mdx-36aeb100.js", "src": "src/routes/perplexity.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/philosophy.mdx": { "file": "pages/philosophy.mdx-9ffd6814.js", "src": "src/routes/philosophy.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/bertology.mdx": { "file": "pages/bertology.mdx-1663513a.js", "src": "src/routes/bertology.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/community.mdx": { "file": "pages/community.mdx-df0c9955.js", "src": "src/routes/community.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/debugging.mdx": { "file": "pages/debugging.mdx-9163dbf4.js", "src": "src/routes/debugging.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/migration.mdx": { "file": "pages/migration.mdx-040dfdaa.js", "src": "src/routes/migration.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/vision-text-dual-encoder.mdx": { "file": "pages/model_doc/vision-text-dual-encoder.mdx-bcc9f3e9.js", "src": "src/routes/model_doc/vision-text-dual-encoder.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/speech-encoder-decoder.mdx": { "file": "pages/model_doc/speech-encoder-decoder.mdx-a4b010d9.js", "src": "src/routes/model_doc/speech-encoder-decoder.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/vision-encoder-decoder.mdx": { "file": "pages/model_doc/vision-encoder-decoder.mdx-b5cd5518.js", "src": "src/routes/model_doc/vision-encoder-decoder.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/blenderbot-small.mdx": { "file": "pages/model_doc/blenderbot-small.mdx-047cfb96.js", "src": "src/routes/model_doc/blenderbot-small.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/speech_to_text_2.mdx": { "file": "pages/model_doc/speech_to_text_2.mdx-9f3459dc.js", "src": "src/routes/model_doc/speech_to_text_2.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/wav2vec2_phoneme.mdx": { "file": "pages/model_doc/wav2vec2_phoneme.mdx-985dde42.js", "src": "src/routes/model_doc/wav2vec2_phoneme.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/bert-generation.mdx": { "file": "pages/model_doc/bert-generation.mdx-eb6fdeef.js", "src": "src/routes/model_doc/bert-generation.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/bigbird_pegasus.mdx": { "file": "pages/model_doc/bigbird_pegasus.mdx-ad01f788.js", "src": "src/routes/model_doc/bigbird_pegasus.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/encoder-decoder.mdx": { "file": "pages/model_doc/encoder-decoder.mdx-c8393a8f.js", "src": "src/routes/model_doc/encoder-decoder.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/speech_to_text.mdx": { "file": "pages/model_doc/speech_to_text.mdx-35ea0919.js", "src": "src/routes/model_doc/speech_to_text.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xlm-prophetnet.mdx": { "file": "pages/model_doc/xlm-prophetnet.mdx-97382af9.js", "src": "src/routes/model_doc/xlm-prophetnet.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xlm-roberta-xl.mdx": { "file": "pages/model_doc/xlm-roberta-xl.mdx-480f1600.js", "src": "src/routes/model_doc/xlm-roberta-xl.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/bert-japanese.mdx": { "file": "pages/model_doc/bert-japanese.mdx-b3af231a.js", "src": "src/routes/model_doc/bert-japanese.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/megatron-bert.mdx": { "file": "pages/model_doc/megatron-bert.mdx-d23a647f.js", "src": "src/routes/model_doc/megatron-bert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/megatron_gpt2.mdx": { "file": "pages/model_doc/megatron_gpt2.mdx-da7be0c9.js", "src": "src/routes/model_doc/megatron_gpt2.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/nystromformer.mdx": { "file": "pages/model_doc/nystromformer.mdx-a520f5f1.js", "src": "src/routes/model_doc/nystromformer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/unispeech-sat.mdx": { "file": "pages/model_doc/unispeech-sat.mdx-33d28e6f.js", "src": "src/routes/model_doc/unispeech-sat.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xlsr_wav2vec2.mdx": { "file": "pages/model_doc/xlsr_wav2vec2.mdx-7f80d6d2.js", "src": "src/routes/model_doc/xlsr_wav2vec2.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/squeezebert.mdx": { "file": "pages/model_doc/squeezebert.mdx-a1f791b4.js", "src": "src/routes/model_doc/squeezebert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/visual_bert.mdx": { "file": "pages/model_doc/visual_bert.mdx-56bcaade.js", "src": "src/routes/model_doc/visual_bert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xlm-roberta.mdx": { "file": "pages/model_doc/xlm-roberta.mdx-d3abcebe.js", "src": "src/routes/model_doc/xlm-roberta.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/blenderbot.mdx": { "file": "pages/model_doc/blenderbot.mdx-2f4c855d.js", "src": "src/routes/model_doc/blenderbot.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/deberta-v2.mdx": { "file": "pages/model_doc/deberta-v2.mdx-e77d92af.js", "src": "src/routes/model_doc/deberta-v2.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/distilbert.mdx": { "file": "pages/model_doc/distilbert.mdx-6a7e081a.js", "src": "src/routes/model_doc/distilbert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/layoutlmv2.mdx": { "file": "pages/model_doc/layoutlmv2.mdx-fb521e03.js", "src": "src/routes/model_doc/layoutlmv2.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/longformer.mdx": { "file": "pages/model_doc/longformer.mdx-94451db4.js", "src": "src/routes/model_doc/longformer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/maskformer.mdx": { "file": "pages/model_doc/maskformer.mdx-07585051.js", "src": "src/routes/model_doc/maskformer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/mobilebert.mdx": { "file": "pages/model_doc/mobilebert.mdx-194d75d4.js", "src": "src/routes/model_doc/mobilebert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/openai-gpt.mdx": { "file": "pages/model_doc/openai-gpt.mdx-55c17d26.js", "src": "src/routes/model_doc/openai-gpt.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/poolformer.mdx": { "file": "pages/model_doc/poolformer.mdx-9f52b149.js", "src": "src/routes/model_doc/poolformer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/prophetnet.mdx": { "file": "pages/model_doc/prophetnet.mdx-8afd09ba.js", "src": "src/routes/model_doc/prophetnet.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/transfo-xl.mdx": { "file": "pages/model_doc/transfo-xl.mdx-64ec66da.js", "src": "src/routes/model_doc/transfo-xl.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/camembert.mdx": { "file": "pages/model_doc/camembert.mdx-90807659.js", "src": "src/routes/model_doc/camembert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/layoutxlm.mdx": { "file": "pages/model_doc/layoutxlm.mdx-9404196f.js", "src": "src/routes/model_doc/layoutxlm.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/perceiver.mdx": { "file": "pages/model_doc/perceiver.mdx-6efe9939.js", "src": "src/routes/model_doc/perceiver.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/retribert.mdx": { "file": "pages/model_doc/retribert.mdx-3029d10f.js", "src": "src/routes/model_doc/retribert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/segformer.mdx": { "file": "pages/model_doc/segformer.mdx-50f6b8f8.js", "src": "src/routes/model_doc/segformer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/unispeech.mdx": { "file": "pages/model_doc/unispeech.mdx-dcd6680a.js", "src": "src/routes/model_doc/unispeech.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/bertweet.mdx": { "file": "pages/model_doc/bertweet.mdx-fda755db.js", "src": "src/routes/model_doc/bertweet.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/big_bird.mdx": { "file": "pages/model_doc/big_bird.mdx-b3c9bc9a.js", "src": "src/routes/model_doc/big_bird.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/convbert.mdx": { "file": "pages/model_doc/convbert.mdx-b107085e.js", "src": "src/routes/model_doc/convbert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/convnext.mdx": { "file": "pages/model_doc/convnext.mdx-f632ad47.js", "src": "src/routes/model_doc/convnext.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/data2vec.mdx": { "file": "pages/model_doc/data2vec.mdx-72db0e89.js", "src": "src/routes/model_doc/data2vec.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/dialogpt.mdx": { "file": "pages/model_doc/dialogpt.mdx-fc003640.js", "src": "src/routes/model_doc/dialogpt.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/flaubert.mdx": { "file": "pages/model_doc/flaubert.mdx-04b8963f.js", "src": "src/routes/model_doc/flaubert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/imagegpt.mdx": { "file": "pages/model_doc/imagegpt.mdx-5a1cdb73.js", "src": "src/routes/model_doc/imagegpt.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/layoutlm.mdx": { "file": "pages/model_doc/layoutlm.mdx-2ae0b366.js", "src": "src/routes/model_doc/layoutlm.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/reformer.mdx": { "file": "pages/model_doc/reformer.mdx-15f80e57.js", "src": "src/routes/model_doc/reformer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/roformer.mdx": { "file": "pages/model_doc/roformer.mdx-0f80020b.js", "src": "src/routes/model_doc/roformer.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/splinter.mdx": { "file": "pages/model_doc/splinter.mdx-c24b4ae5.js", "src": "src/routes/model_doc/splinter.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/wav2vec2.mdx": { "file": "pages/model_doc/wav2vec2.mdx-30b5c1d2.js", "src": "src/routes/model_doc/wav2vec2.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/barthez.mdx": { "file": "pages/model_doc/barthez.mdx-a610f6fe.js", "src": "src/routes/model_doc/barthez.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/bartpho.mdx": { "file": "pages/model_doc/bartpho.mdx-039ff953.js", "src": "src/routes/model_doc/bartpho.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/deberta.mdx": { "file": "pages/model_doc/deberta.mdx-e5952704.js", "src": "src/routes/model_doc/deberta.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/electra.mdx": { "file": "pages/model_doc/electra.mdx-1a5b84b2.js", "src": "src/routes/model_doc/electra.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/gpt_neo.mdx": { "file": "pages/model_doc/gpt_neo.mdx-852dcbc0.js", "src": "src/routes/model_doc/gpt_neo.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/herbert.mdx": { "file": "pages/model_doc/herbert.mdx-01a55eeb.js", "src": "src/routes/model_doc/herbert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/m2m_100.mdx": { "file": "pages/model_doc/m2m_100.mdx-9de67568.js", "src": "src/routes/model_doc/m2m_100.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/pegasus.mdx": { "file": "pages/model_doc/pegasus.mdx-a33c23d3.js", "src": "src/routes/model_doc/pegasus.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/phobert.mdx": { "file": "pages/model_doc/phobert.mdx-4a24b305.js", "src": "src/routes/model_doc/phobert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/qdqbert.mdx": { "file": "pages/model_doc/qdqbert.mdx-19d93d0d.js", "src": "src/routes/model_doc/qdqbert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/rembert.mdx": { "file": "pages/model_doc/rembert.mdx-8524b264.js", "src": "src/routes/model_doc/rembert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/roberta.mdx": { "file": "pages/model_doc/roberta.mdx-002140a3.js", "src": "src/routes/model_doc/roberta.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/vit_mae.mdx": { "file": "pages/model_doc/vit_mae.mdx-eac363aa.js", "src": "src/routes/model_doc/vit_mae.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/albert.mdx": { "file": "pages/model_doc/albert.mdx-077766e3.js", "src": "src/routes/model_doc/albert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/canine.mdx": { "file": "pages/model_doc/canine.mdx-9959cf94.js", "src": "src/routes/model_doc/canine.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/funnel.mdx": { "file": "pages/model_doc/funnel.mdx-0bc15d28.js", "src": "src/routes/model_doc/funnel.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/hubert.mdx": { "file": "pages/model_doc/hubert.mdx-3dda35f6.js", "src": "src/routes/model_doc/hubert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/lxmert.mdx": { "file": "pages/model_doc/lxmert.mdx-e3257043.js", "src": "src/routes/model_doc/lxmert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/marian.mdx": { "file": "pages/model_doc/marian.mdx-352a73d2.js", "src": "src/routes/model_doc/marian.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/plbart.mdx": { "file": "pages/model_doc/plbart.mdx-627ee826.js", "src": "src/routes/model_doc/plbart.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/t5v1.1.mdx": { "file": "pages/model_doc/t5v1.1.mdx-bd30f4e4.js", "src": "src/routes/model_doc/t5v1.1.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/ibert.mdx": { "file": "pages/model_doc/ibert.mdx-6980b055.js", "src": "src/routes/model_doc/ibert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/mbart.mdx": { "file": "pages/model_doc/mbart.mdx-4e986a75.js", "src": "src/routes/model_doc/mbart.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/mluke.mdx": { "file": "pages/model_doc/mluke.mdx-0a6a8f21.js", "src": "src/routes/model_doc/mluke.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/mpnet.mdx": { "file": "pages/model_doc/mpnet.mdx-a2ac5620.js", "src": "src/routes/model_doc/mpnet.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/realm.mdx": { "file": "pages/model_doc/realm.mdx-d3389e24.js", "src": "src/routes/model_doc/realm.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/sew-d.mdx": { "file": "pages/model_doc/sew-d.mdx-7aaffe1b.js", "src": "src/routes/model_doc/sew-d.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/tapas.mdx": { "file": "pages/model_doc/tapas.mdx-8e33d727.js", "src": "src/routes/model_doc/tapas.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/trocr.mdx": { "file": "pages/model_doc/trocr.mdx-6100bb6b.js", "src": "src/routes/model_doc/trocr.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/wavlm.mdx": { "file": "pages/model_doc/wavlm.mdx-fc6fcdac.js", "src": "src/routes/model_doc/wavlm.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xlnet.mdx": { "file": "pages/model_doc/xlnet.mdx-8e96e680.js", "src": "src/routes/model_doc/xlnet.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xls_r.mdx": { "file": "pages/model_doc/xls_r.mdx-c4ead8f3.js", "src": "src/routes/model_doc/xls_r.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/auto.mdx": { "file": "pages/model_doc/auto.mdx-47c818f3.js", "src": "src/routes/model_doc/auto.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/bart.mdx": { "file": "pages/model_doc/bart.mdx-0957e0a7.js", "src": "src/routes/model_doc/bart.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/beit.mdx": { "file": "pages/model_doc/beit.mdx-80b0b9ff.js", "src": "src/routes/model_doc/beit.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/bert.mdx": { "file": "pages/model_doc/bert.mdx-384687c8.js", "src": "src/routes/model_doc/bert.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/bort.mdx": { "file": "pages/model_doc/bort.mdx-af290f70.js", "src": "src/routes/model_doc/bort.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/byt5.mdx": { "file": "pages/model_doc/byt5.mdx-e9ed0683.js", "src": "src/routes/model_doc/byt5.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/clip.mdx": { "file": "pages/model_doc/clip.mdx-b328d557.js", "src": "src/routes/model_doc/clip.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/ctrl.mdx": { "file": "pages/model_doc/ctrl.mdx-9c3e0c29.js", "src": "src/routes/model_doc/ctrl.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/deit.mdx": { "file": "pages/model_doc/deit.mdx-eff820cf.js", "src": "src/routes/model_doc/deit.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/detr.mdx": { "file": "pages/model_doc/detr.mdx-50053350.js", "src": "src/routes/model_doc/detr.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/fnet.mdx": { "file": "pages/model_doc/fnet.mdx-45669f92.js", "src": "src/routes/model_doc/fnet.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/fsmt.mdx": { "file": "pages/model_doc/fsmt.mdx-34675b34.js", "src": "src/routes/model_doc/fsmt.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/gpt2.mdx": { "file": "pages/model_doc/gpt2.mdx-620e835d.js", "src": "src/routes/model_doc/gpt2.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/gptj.mdx": { "file": "pages/model_doc/gptj.mdx-a69ba9a2.js", "src": "src/routes/model_doc/gptj.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/luke.mdx": { "file": "pages/model_doc/luke.mdx-33abb078.js", "src": "src/routes/model_doc/luke.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/swin.mdx": { "file": "pages/model_doc/swin.mdx-e4cc30a1.js", "src": "src/routes/model_doc/swin.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/vilt.mdx": { "file": "pages/model_doc/vilt.mdx-7817df10.js", "src": "src/routes/model_doc/vilt.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xglm.mdx": { "file": "pages/model_doc/xglm.mdx-b5ed5977.js", "src": "src/routes/model_doc/xglm.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/yoso.mdx": { "file": "pages/model_doc/yoso.mdx-73a9322a.js", "src": "src/routes/model_doc/yoso.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/cpm.mdx": { "file": "pages/model_doc/cpm.mdx-a271ebe3.js", "src": "src/routes/model_doc/cpm.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/model_doc/dit.mdx": { "file": "pages/model_doc/dit.mdx-28d290cd.js", "src": "src/routes/model_doc/dit.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/dpr.mdx": { "file": "pages/model_doc/dpr.mdx-c851abb6.js", "src": "src/routes/model_doc/dpr.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/led.mdx": { "file": "pages/model_doc/led.mdx-d4a9a769.js", "src": "src/routes/model_doc/led.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/mt5.mdx": { "file": "pages/model_doc/mt5.mdx-4373368c.js", "src": "src/routes/model_doc/mt5.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/rag.mdx": { "file": "pages/model_doc/rag.mdx-2450ac40.js", "src": "src/routes/model_doc/rag.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/sew.mdx": { "file": "pages/model_doc/sew.mdx-a39f8b59.js", "src": "src/routes/model_doc/sew.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/vit.mdx": { "file": "pages/model_doc/vit.mdx-555822b3.js", "src": "src/routes/model_doc/vit.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/xlm.mdx": { "file": "pages/model_doc/xlm.mdx-b523781d.js", "src": "src/routes/model_doc/xlm.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/model_doc/t5.mdx": { "file": "pages/model_doc/t5.mdx-84f497d3.js", "src": "src/routes/model_doc/t5.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/notebooks.mdx": { "file": "pages/notebooks.mdx-229c9f2b.js", "src": "src/routes/notebooks.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/pr_checks.mdx": { "file": "pages/pr_checks.mdx-69a387c3.js", "src": "src/routes/pr_checks.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/quicktour.mdx": { "file": "pages/quicktour.mdx-8a4295b9.js", "src": "src/routes/quicktour.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/sagemaker.mdx": { "file": "pages/sagemaker.mdx-d221e67e.js", "src": "src/routes/sagemaker.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/glossary.mdx": { "file": "pages/glossary.mdx-04c6e6d1.js", "src": "src/routes/glossary.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/internal/tokenization_utils.mdx": { "file": "pages/internal/tokenization_utils.mdx-eeb10b9c.js", "src": "src/routes/internal/tokenization_utils.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/internal/generation_utils.mdx": { "file": "pages/internal/generation_utils.mdx-8b17d2b2.js", "src": "src/routes/internal/generation_utils.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/internal/pipelines_utils.mdx": { "file": "pages/internal/pipelines_utils.mdx-885320a3.js", "src": "src/routes/internal/pipelines_utils.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/internal/modeling_utils.mdx": { "file": "pages/internal/modeling_utils.mdx-1b04e493.js", "src": "src/routes/internal/modeling_utils.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/internal/trainer_utils.mdx": { "file": "pages/internal/trainer_utils.mdx-b7a1b196.js", "src": "src/routes/internal/trainer_utils.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_CodeBlock-6a3d1b46.js", "_IconCopyLink-4b81c553.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/internal/file_utils.mdx": { "file": "pages/internal/file_utils.mdx-23310578.js", "src": "src/routes/internal/file_utils.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Docstring-4f315ed9.js", "_IconCopyLink-4b81c553.js" ] }, "src/routes/training.mdx": { "file": "pages/training.mdx-378a8a6f.js", "src": "src/routes/training.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_DocNotebookDropdown-ecff2a90.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/testing.mdx": { "file": "pages/testing.mdx-614baedb.js", "src": "src/routes/testing.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/sequence_classification.mdx": { "file": "pages/tasks/sequence_classification.mdx-d07399b9.js", "src": "src/routes/tasks/sequence_classification.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/audio_classification.mdx": { "file": "pages/tasks/audio_classification.mdx-cab2e6ac.js", "src": "src/routes/tasks/audio_classification.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/image_classification.mdx": { "file": "pages/tasks/image_classification.mdx-e10d3b71.js", "src": "src/routes/tasks/image_classification.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/token_classification.mdx": { "file": "pages/tasks/token_classification.mdx-5446e455.js", "src": "src/routes/tasks/token_classification.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/question_answering.mdx": { "file": "pages/tasks/question_answering.mdx-8babb2cc.js", "src": "src/routes/tasks/question_answering.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/language_modeling.mdx": { "file": "pages/tasks/language_modeling.mdx-e8a233ab.js", "src": "src/routes/tasks/language_modeling.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/multiple_choice.mdx": { "file": "pages/tasks/multiple_choice.mdx-7c0ab1b6.js", "src": "src/routes/tasks/multiple_choice.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/summarization.mdx": { "file": "pages/tasks/summarization.mdx-34e69920.js", "src": "src/routes/tasks/summarization.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/translation.mdx": { "file": "pages/tasks/translation.mdx-9674fec0.js", "src": "src/routes/tasks/translation.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CodeBlockFw-27a176a0.js", "_CopyButton-dacfbfaf.js" ] }, "src/routes/tasks/asr.mdx": { "file": "pages/tasks/asr.mdx-b010f320.js", "src": "src/routes/tasks/asr.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-4833417e.js", "_Tip-fffd6df1.js", "_Youtube-27813aed.js", "_IconCopyLink-4b81c553.js", "_CodeBlock-6a3d1b46.js", "_CopyButton-dacfbfaf.js" ] }, "_vendor-4833417e.js": { "file": "chunks/vendor-4833417e.js" }, "_paths-4b3c6e7e.js": { "file": "chunks/paths-4b3c6e7e.js" }, "_IconCopyLink-4b81c553.js": { "file": "chunks/IconCopyLink-4b81c553.js", "imports": [ "_vendor-4833417e.js" ] }, "_Tip-fffd6df1.js": { "file": "chunks/Tip-fffd6df1.js", "imports": [ "_vendor-4833417e.js" ] }, "_CodeBlock-6a3d1b46.js": { "file": "chunks/CodeBlock-6a3d1b46.js", "imports": [ "_vendor-4833417e.js", "_CopyButton-dacfbfaf.js" ] }, "_CopyButton-dacfbfaf.js": { "file": "chunks/CopyButton-dacfbfaf.js", "imports": [ "_vendor-4833417e.js" ] }, "_CodeBlockFw-27a176a0.js": { "file": "chunks/CodeBlockFw-27a176a0.js", "imports": [ "_vendor-4833417e.js", "_CopyButton-dacfbfaf.js" ] }, "_Youtube-27813aed.js": { "file": "chunks/Youtube-27813aed.js", "imports": [ "_vendor-4833417e.js" ] }, "_DocNotebookDropdown-ecff2a90.js": { "file": "chunks/DocNotebookDropdown-ecff2a90.js", "imports": [ "_vendor-4833417e.js" ] }, "_Docstring-4f315ed9.js": { "file": "chunks/Docstring-4f315ed9.js", "imports": [ "_vendor-4833417e.js", "_IconCopyLink-4b81c553.js" ] } }
230
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js
var rt=Object.defineProperty,st=Object.defineProperties;var nt=Object.getOwnPropertyDescriptors;var be=Object.getOwnPropertySymbols;var it=Object.prototype.hasOwnProperty,ot=Object.prototype.propertyIsEnumerable;var ke=(a,e,t)=>e in a?rt(a,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):a[e]=t,we=(a,e)=>{for(var t in e||(e={}))it.call(e,t)&&ke(a,t,e[t]);if(be)for(var t of be(e))ot.call(e,t)&&ke(a,t,e[t]);return a},Ee=(a,e)=>st(a,nt(e));import{S as Re,i as Xe,s as Ye,e as v,c as b,a as k,d as h,b as _,f as se,g as S,l as Q,L as ye,t as z,h as B,F as d,a0 as je,Q as he,j as ne,Y as qe,R as We,k as P,m as C,a1 as Ze,a2 as Fe,q as W,w as Oe,x as Ue,y as Qe,o as U,B as Je,n as Ke,p as $e,G as de,W as et,X as tt,v as ft,E as ct,Z as ut}from"./vendor-4833417e.js";import{I as lt}from"./IconCopyLink-4b81c553.js";function Le(a){let e,t;return{c(){e=v("div"),this.h()},l(l){e=b(l,"DIV",{class:!0,style:!0,id:!0});var s=k(e);s.forEach(h),this.h()},h(){_(e,"class",t="absolute text-base py-1.5 p-2 break-words bg-white border-2 border-black dark:border-gray-500 rounded shadow-alternate-xl z-50 "+(a[0].length>1e3?"max-w-lg":"max-w-xs")),se(e,"top",a[2]+15+"px"),se(e,"left",a[1]+15+"px"),_(e,"id",a[3])},m(l,s){S(l,e,s),e.innerHTML=a[0]},p(l,s){s&1&&(e.innerHTML=l[0]),s&1&&t!==(t="absolute text-base py-1.5 p-2 break-words bg-white border-2 border-black dark:border-gray-500 rounded shadow-alternate-xl z-50 "+(l[0].length>1e3?"max-w-lg":"max-w-xs"))&&_(e,"class",t),s&4&&se(e,"top",l[2]+15+"px"),s&2&&se(e,"left",l[1]+15+"px"),s&8&&_(e,"id",l[3])},d(l){l&&h(e)}}}function ht(a){let e,t=a[0]&&Le(a);return{c(){t&&t.c(),e=Q()},l(l){t&&t.l(l),e=Q()},m(l,s){t&&t.m(l,s),S(l,e,s)},p(l,[s]){l[0]?t?t.p(l,s):(t=Le(l),t.c(),t.m(e.parentNode,e)):t&&(t.d(1),t=null)},i:ye,o:ye,d(l){t&&t.d(l),l&&h(e)}}}function dt(a,e,t){let{txt:l}=e,{x:s}=e,{y:c}=e,{id:n}=e;return a.$$set=o=>{"txt"in o&&t(0,l=o.txt),"x"in o&&t(1,s=o.x),"y"in o&&t(2,c=o.y),"id"in o&&t(3,n=o.id)},[l,s,c,n]}class pt extends Re{constructor(e){super();Xe(this,e,dt,ht,Ye,{txt:0,x:1,y:2,id:3})}}const Pe="docstring-tooltip";function at(a,e){let t;function l(o){n(),t=new pt({props:{txt:e,x:o.pageX,y:o.pageY,id:Pe},target:document.body})}function s(o){t.$set({x:o.pageX,y:o.pageY})}function c(){t.$destroy()}function n(){var u;const o=document.getElementById(Pe);o&&((u=o.parentNode)==null||u.removeChild(o))}return a.addEventListener("mouseover",l),a.addEventListener("mouseleave",c),a.addEventListener("mousemove",s),{destroy(){a.removeEventListener("mouseover",l),a.removeEventListener("mouseleave",c),a.removeEventListener("mousemove",s)}}}function Ce(a,e,t){const l=a.slice();return l[16]=e[t].title,l[7]=e[t].parametersDescription,l}function Ae(a,e,t){const l=a.slice();return l[5]=e[t].anchor,l[19]=e[t].description,l}function Ne(a,e,t){const l=a.slice();return l[6]=e[t].name,l[22]=e[t].val,l}function Se(a){let e,t,l=a[6]+"",s,c,n=a[22]+"",o,u,p,r,i;function f(){return a[12](a[6])}return{c(){e=v("span"),t=v("span"),s=z(l),c=v("span"),o=z(n),this.h()},l(m){e=b(m,"SPAN",{class:!0});var w=k(e);t=b(w,"SPAN",{class:!0});var H=k(t);s=B(H,l),c=b(H,"SPAN",{class:!0});var G=k(c);o=B(G,n),G.forEach(h),H.forEach(h),w.forEach(h),this.h()},h(){_(c,"class","opacity-60"),_(t,"class","rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black"),_(e,"class",u="comma "+(a[10][a[6]]?"cursor-pointer":"cursor-default"))},m(m,w){S(m,e,w),d(e,t),d(t,s),d(t,c),d(c,o),r||(i=[je(p=at.call(null,e,a[10][a[6]]||"")),he(e,"click",Ze(Fe(f)))],r=!0)},p(m,w){a=m,w&1&&l!==(l=a[6]+"")&&ne(s,l),w&1&&n!==(n=a[22]+"")&&ne(o,n),w&1&&u!==(u="comma "+(a[10][a[6]]?"cursor-pointer":"cursor-default"))&&_(e,"class",u),p&&qe(p.update)&&w&1&&p.update.call(null,a[10][a[6]]||"")},d(m){m&&h(e),r=!1,We(i)}}}function Me(a){let e,t,l,s,c=Ge(a[3])+"",n,o,u,p;return{c(){e=v("span"),t=z("\u2192"),l=P(),s=v("span"),this.h()},l(r){e=b(r,"SPAN",{class:!0});var i=k(e);t=B(i,"\u2192"),i.forEach(h),l=C(r),s=b(r,"SPAN",{class:!0});var f=k(s);f.forEach(h),this.h()},h(){_(e,"class","font-bold"),_(s,"class",n="rounded hover:bg-gray-400 "+(a[2]?"cursor-pointer":"cursor-default"))},m(r,i){S(r,e,i),d(e,t),S(r,l,i),S(r,s,i),s.innerHTML=c,u||(p=[je(o=at.call(null,s,a[2]||"")),he(s,"click",Ze(Fe(a[13])))],u=!0)},p(r,i){i&8&&c!==(c=Ge(r[3])+"")&&(s.innerHTML=c),i&4&&n!==(n="rounded hover:bg-gray-400 "+(r[2]?"cursor-pointer":"cursor-default"))&&_(s,"class",n),o&&qe(o.update)&&i&4&&o.update.call(null,r[2]||"")},d(r){r&&h(e),r&&h(l),r&&h(s),u=!1,We(p)}}}function He(a){var p;let e,t,l,s=((p=a[7])==null?void 0:p.length)+"",c,n,o,u;return{c(){e=v("div"),t=v("button"),l=z("Expand "),c=z(s),n=z(" parameters"),this.h()},l(r){e=b(r,"DIV",{class:!0});var i=k(e);t=b(i,"BUTTON",{class:!0});var f=k(t);l=B(f,"Expand "),c=B(f,s),n=B(f," parameters"),f.forEach(h),i.forEach(h),this.h()},h(){_(t,"class","absolute leading-tight px-3 py-1.5 dark:bg-gray-900 bg-black text-gray-200 hover:text-white rounded-xl bottom-12 ring-offset-2 hover:ring-black hover:ring-2"),_(e,"class","absolute inset-0 bg-gradient-to-t from-white to-white/0 dark:from-gray-950 dark:to-gray-950/0 z-10 flex justify-center")},m(r,i){S(r,e,i),d(e,t),d(t,l),d(t,c),d(t,n),o||(u=he(t,"click",a[14]),o=!0)},p(r,i){var f;i&128&&s!==(s=((f=r[7])==null?void 0:f.length)+"")&&ne(c,s)},d(r){r&&h(e),o=!1,u()}}}function Te(a){let e,t,l,s,c,n,o=a[7],u=[];for(let r=0;r<o.length;r+=1)u[r]=xe(Ae(a,o,r));const p=r=>U(u[r],1,1,()=>{u[r]=null});return{c(){e=v("p"),t=z("Parameters "),l=v("span"),s=P(),c=v("ul");for(let r=0;r<u.length;r+=1)u[r].c();this.h()},l(r){e=b(r,"P",{class:!0});var i=k(e);t=B(i,"Parameters "),l=b(i,"SPAN",{class:!0}),k(l).forEach(h),i.forEach(h),s=C(r),c=b(r,"UL",{class:!0});var f=k(c);for(let m=0;m<u.length;m+=1)u[m].l(f);f.forEach(h),this.h()},h(){_(l,"class","flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"),_(e,"class","flex items-center font-semibold !mt-2 !mb-2 text-gray-800"),_(c,"class","px-2")},m(r,i){S(r,e,i),d(e,t),d(e,l),S(r,s,i),S(r,c,i);for(let f=0;f<u.length;f+=1)u[f].m(c,null);n=!0},p(r,i){if(i&128){o=r[7];let f;for(f=0;f<o.length;f+=1){const m=Ae(r,o,f);u[f]?(u[f].p(m,i),W(u[f],1)):(u[f]=xe(m),u[f].c(),W(u[f],1),u[f].m(c,null))}for(Ke(),f=o.length;f<u.length;f+=1)p(f);$e()}},i(r){if(!n){for(let i=0;i<o.length;i+=1)W(u[i]);n=!0}},o(r){u=u.filter(Boolean);for(let i=0;i<u.length;i+=1)U(u[i]);n=!1},d(r){r&&h(e),r&&h(s),r&&h(c),de(u,r)}}}function xe(a){let e,t,l,s,c,n,o,u,p,r=a[19]+"",i,f;return c=new lt({props:{classNames:"text-smd"}}),{c(){e=v("li"),t=v("span"),l=v("a"),s=v("span"),Oe(c.$$.fragment),u=P(),p=v("span"),i=P(),this.h()},l(m){e=b(m,"LI",{class:!0});var w=k(e);t=b(w,"SPAN",{class:!0});var H=k(t);l=b(H,"A",{id:!0,class:!0,href:!0});var G=k(l);s=b(G,"SPAN",{});var R=k(s);Ue(c.$$.fragment,R),R.forEach(h),G.forEach(h),u=C(H),p=b(H,"SPAN",{});var Z=k(p);Z.forEach(h),H.forEach(h),i=C(w),w.forEach(h),this.h()},h(){_(l,"id",n=a[5]),_(l,"class","header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),_(l,"href",o=`#${a[5]}`),_(t,"class","group flex space-x-1.5 items-start"),_(e,"class","text-base !pl-4 my-3")},m(m,w){S(m,e,w),d(e,t),d(t,l),d(l,s),Qe(c,s,null),d(t,u),d(t,p),p.innerHTML=r,d(e,i),f=!0},p(m,w){(!f||w&128&&n!==(n=m[5]))&&_(l,"id",n),(!f||w&128&&o!==(o=`#${m[5]}`))&&_(l,"href",o),(!f||w&128)&&r!==(r=m[19]+"")&&(p.innerHTML=r)},i(m){f||(W(c.$$.fragment,m),f=!0)},o(m){U(c.$$.fragment,m),f=!1},d(m){m&&h(e),Je(c)}}}function Ve(a){let e,t=a[1],l=[];for(let s=0;s<t.length;s+=1)l[s]=De(Ce(a,t,s));return{c(){for(let s=0;s<l.length;s+=1)l[s].c();e=Q()},l(s){for(let c=0;c<l.length;c+=1)l[c].l(s);e=Q()},m(s,c){for(let n=0;n<l.length;n+=1)l[n].m(s,c);S(s,e,c)},p(s,c){if(c&2){t=s[1];let n;for(n=0;n<t.length;n+=1){const o=Ce(s,t,n);l[n]?l[n].p(o,c):(l[n]=De(o),l[n].c(),l[n].m(e.parentNode,e))}for(;n<l.length;n+=1)l[n].d(1);l.length=t.length}},d(s){de(l,s),s&&h(e)}}}function De(a){let e,t=a[16]+"",l,s,c,n,o,u=a[7]+"";return{c(){e=v("p"),l=z(t),s=P(),c=v("span"),n=P(),o=v("p"),this.h()},l(p){e=b(p,"P",{class:!0});var r=k(e);l=B(r,t),s=C(r),c=b(r,"SPAN",{class:!0}),k(c).forEach(h),r.forEach(h),n=C(p),o=b(p,"P",{});var i=k(o);i.forEach(h),this.h()},h(){_(c,"class","flex-auto border-t-2 ml-3"),_(e,"class","flex items-center font-semibold")},m(p,r){S(p,e,r),d(e,l),d(e,s),d(e,c),S(p,n,r),S(p,o,r),o.innerHTML=u},p(p,r){r&2&&t!==(t=p[16]+"")&&ne(l,t),r&2&&u!==(u=p[7]+"")&&(o.innerHTML=u)},d(p){p&&h(e),p&&h(n),p&&h(o)}}}function Ie(a){let e,t,l,s,c,n,o,u,p,r=(a[2]||"")+"",i=!!a[3]&&ze(a);return{c(){e=v("div"),t=v("p"),l=z("Returns"),s=P(),i&&i.c(),c=P(),n=v("span"),u=P(),p=v("p"),this.h()},l(f){e=b(f,"DIV",{class:!0,id:!0});var m=k(e);t=b(m,"P",{class:!0});var w=k(t);l=B(w,"Returns"),w.forEach(h),s=C(m),i&&i.l(m),c=C(m),n=b(m,"SPAN",{class:!0}),k(n).forEach(h),m.forEach(h),u=C(f),p=b(f,"P",{class:!0});var H=k(p);H.forEach(h),this.h()},h(){_(t,"class","text-base"),_(n,"class","flex-auto border-t-2 border-gray-100 dark:border-gray-700"),_(e,"class","flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800"),_(e,"id",o=`${a[5]}.returns`),_(p,"class","text-base")},m(f,m){S(f,e,m),d(e,t),d(t,l),d(e,s),i&&i.m(e,null),d(e,c),d(e,n),S(f,u,m),S(f,p,m),p.innerHTML=r},p(f,m){f[3]?i?i.p(f,m):(i=ze(f),i.c(),i.m(e,c)):i&&(i.d(1),i=null),m&32&&o!==(o=`${f[5]}.returns`)&&_(e,"id",o),m&4&&r!==(r=(f[2]||"")+"")&&(p.innerHTML=r)},d(f){f&&h(e),i&&i.d(),f&&h(u),f&&h(p)}}}function ze(a){let e,t;return{c(){e=new et,t=Q(),this.h()},l(l){e=tt(l),t=Q(),this.h()},h(){e.a=t},m(l,s){e.m(a[3],l,s),S(l,t,s)},p(l,s){s&8&&e.p(l[3])},d(l){l&&h(t),l&&e.d()}}}function mt(a){let e,t,l,s=Be(a[6])+"",c,n,o,u,p,r,i,f,m,w,H,G,R,Z,E,A,Y,J,ie,ee,K,oe,fe,ce,M,te,le,ae,re,X;o=new lt({});let F=a[0],T=[];for(let g=0;g<F.length;g+=1)T[g]=Se(Ne(a,F,g));let x=a[3]&&Me(a),V=a[9]&&He(a),N=!!a[7]&&Te(a),D=a[1]&&Ve(a),I=!!a[3]&&Ie(a);return{c(){e=v("div"),t=v("span"),l=new et,c=P(),n=v("a"),Oe(o.$$.fragment),p=P(),r=v("a"),i=v("span"),f=z("<"),m=P(),w=v("span"),H=z("source"),G=P(),R=v("span"),Z=z(">"),E=P(),A=v("p"),Y=v("span"),J=z("("),ie=P();for(let g=0;g<T.length;g+=1)T[g].c();ee=P(),K=v("span"),oe=z(")"),fe=P(),x&&x.c(),ce=P(),M=v("div"),V&&V.c(),te=P(),N&&N.c(),le=P(),D&&D.c(),ae=P(),I&&I.c(),this.h()},l(g){e=b(g,"DIV",{});var y=k(e);t=b(y,"SPAN",{class:!0,id:!0});var L=k(t);l=tt(L),c=C(L),n=b(L,"A",{id:!0,class:!0,href:!0});var $=k(n);Ue(o.$$.fragment,$),$.forEach(h),p=C(L),r=b(L,"A",{class:!0,href:!0,target:!0});var O=k(r);i=b(O,"SPAN",{});var pe=k(i);f=B(pe,"<"),pe.forEach(h),m=C(O),w=b(O,"SPAN",{class:!0});var me=k(w);H=B(me,"source"),me.forEach(h),G=C(O),R=b(O,"SPAN",{});var _e=k(R);Z=B(_e,">"),_e.forEach(h),O.forEach(h),L.forEach(h),E=C(y),A=b(y,"P",{class:!0});var j=k(A);Y=b(j,"SPAN",{});var ge=k(Y);J=B(ge,"("),ge.forEach(h),ie=C(j);for(let ue=0;ue<T.length;ue+=1)T[ue].l(j);ee=C(j),K=b(j,"SPAN",{});var ve=k(K);oe=B(ve,")"),ve.forEach(h),fe=C(j),x&&x.l(j),j.forEach(h),ce=C(y),M=b(y,"DIV",{class:!0});var q=k(M);V&&V.l(q),te=C(q),N&&N.l(q),le=C(q),D&&D.l(q),ae=C(q),I&&I.l(q),q.forEach(h),y.forEach(h),this.h()},h(){l.a=c,_(n,"id",a[5]),_(n,"class","header-link invisible with-hover:group-hover:visible pr-2"),_(n,"href",u="#"+a[5]),_(w,"class","hidden md:block mx-0.5 hover:!underline"),_(r,"class","!ml-auto !text-gray-400 !no-underline text-sm flex items-center"),_(r,"href",a[4]),_(r,"target","_blank"),_(t,"class","group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5"),_(t,"id",a[5]),_(A,"class","font-mono text-xs md:text-sm !leading-relaxed !my-6"),_(M,"class",re="!mb-10 relative docstring-details "+(a[9]?"max-h-96 overflow-hidden":""))},m(g,y){S(g,e,y),d(e,t),l.m(s,t),d(t,c),d(t,n),Qe(o,n,null),d(t,p),d(t,r),d(r,i),d(i,f),d(r,m),d(r,w),d(w,H),d(r,G),d(r,R),d(R,Z),d(e,E),d(e,A),d(A,Y),d(Y,J),d(A,ie);for(let L=0;L<T.length;L+=1)T[L].m(A,null);d(A,ee),d(A,K),d(K,oe),d(A,fe),x&&x.m(A,null),d(e,ce),d(e,M),V&&V.m(M,null),d(M,te),N&&N.m(M,null),d(M,le),D&&D.m(M,null),d(M,ae),I&&I.m(M,null),a[15](M),X=!0},p(g,[y]){if((!X||y&64)&&s!==(s=Be(g[6])+"")&&l.p(s),(!X||y&32)&&_(n,"id",g[5]),(!X||y&32&&u!==(u="#"+g[5]))&&_(n,"href",u),(!X||y&16)&&_(r,"href",g[4]),(!X||y&32)&&_(t,"id",g[5]),y&3105){F=g[0];let L;for(L=0;L<F.length;L+=1){const $=Ne(g,F,L);T[L]?T[L].p($,y):(T[L]=Se($),T[L].c(),T[L].m(A,ee))}for(;L<T.length;L+=1)T[L].d(1);T.length=F.length}g[3]?x?x.p(g,y):(x=Me(g),x.c(),x.m(A,null)):x&&(x.d(1),x=null),g[9]?V?V.p(g,y):(V=He(g),V.c(),V.m(M,te)):V&&(V.d(1),V=null),g[7]?N?(N.p(g,y),y&128&&W(N,1)):(N=Te(g),N.c(),W(N,1),N.m(M,le)):N&&(Ke(),U(N,1,1,()=>{N=null}),$e()),g[1]?D?D.p(g,y):(D=Ve(g),D.c(),D.m(M,ae)):D&&(D.d(1),D=null),g[3]?I?I.p(g,y):(I=Ie(g),I.c(),I.m(M,null)):I&&(I.d(1),I=null),(!X||y&512&&re!==(re="!mb-10 relative docstring-details "+(g[9]?"max-h-96 overflow-hidden":"")))&&_(M,"class",re)},i(g){X||(W(o.$$.fragment,g),W(N),X=!0)},o(g){U(o.$$.fragment,g),U(N),X=!1},d(g){g&&h(e),Je(o),de(T,g),x&&x.d(),V&&V.d(),N&&N.d(),D&&D.d(),I&&I.d(),a[15](null)}}}function Be(a){if(a.startsWith("class ")){const e=a.substring(6).split("."),t=e.pop();return`<h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">${e.join(".")}.</span><span class="font-semibold">${t}</span></span></h3>`}else return`<h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>${a}</span></h4>`}function Ge(a){const e=/\s*<p>(((?!<p>).)*)<\/p>\s*/gms;return a.replace(e,(t,l)=>`<span>${l}</span>`)}function _t(a,e,t){let{anchor:l}=e,{name:s}=e,{parameters:c=[]}=e,{parametersDescription:n}=e,{parameterGroups:o}=e,{returnDescription:u}=e,{returnType:p}=e,{source:r}=e,i,f=!1;const m=(n==null?void 0:n.reduce((E,A)=>{const{name:Y,description:J}=A;return Ee(we({},E),{[Y]:J})},{}))||{};ft(()=>{const{hash:E}=window.location,A=!!E&&(n==null?void 0:n.some(({anchor:Y})=>Y===E.substring(1)));t(9,f=!A&&i.clientHeight>500)});async function w(E,A){A&&(t(9,f=!1),await ct(),window.location.hash=E)}const H=E=>w(`${l}.${E}`,!!m[E]),G=()=>w(`${l}.returns`,!!u),R=()=>t(9,f=!1);function Z(E){ut[E?"unshift":"push"](()=>{i=E,t(8,i)})}return a.$$set=E=>{"anchor"in E&&t(5,l=E.anchor),"name"in E&&t(6,s=E.name),"parameters"in E&&t(0,c=E.parameters),"parametersDescription"in E&&t(7,n=E.parametersDescription),"parameterGroups"in E&&t(1,o=E.parameterGroups),"returnDescription"in E&&t(2,u=E.returnDescription),"returnType"in E&&t(3,p=E.returnType),"source"in E&&t(4,r=E.source)},[c,o,u,p,r,l,s,n,i,f,m,w,H,G,R,Z]}class kt extends Re{constructor(e){super();Xe(this,e,_t,mt,Ye,{anchor:5,name:6,parameters:0,parametersDescription:7,parameterGroups:1,returnDescription:2,returnType:3,source:4})}}export{kt as D};
231
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js
import{S as b,i as C,s as B,e as m,w as E,k as M,c as p,a as _,x as k,d as h,m as q,b as v,g as w,F as g,y as D,Q as f,q as H,o as I,B as L,R as N}from"./vendor-4833417e.js";import{C as O}from"./CopyButton-dacfbfaf.js";function R(s){let e,n,a,u,i,r,d,l;return a=new O({props:{classNames:"transition duration-200 ease-in-out "+(s[2]&&"opacity-0"),label:"code excerpt",value:s[0]}}),{c(){e=m("div"),n=m("div"),E(a.$$.fragment),u=M(),i=m("pre"),this.h()},l(t){e=p(t,"DIV",{class:!0});var o=_(e);n=p(o,"DIV",{class:!0});var c=_(n);k(a.$$.fragment,c),c.forEach(h),u=q(o),i=p(o,"PRE",{});var y=_(i);y.forEach(h),o.forEach(h),this.h()},h(){v(n,"class","absolute top-2.5 right-4"),v(e,"class","code-block relative")},m(t,o){w(t,e,o),g(e,n),D(a,n,null),g(e,u),g(e,i),i.innerHTML=s[1],r=!0,d||(l=[f(e,"mouseover",s[3]),f(e,"focus",s[3]),f(e,"mouseout",s[4]),f(e,"blur",s[4])],d=!0)},p(t,[o]){const c={};o&4&&(c.classNames="transition duration-200 ease-in-out "+(t[2]&&"opacity-0")),o&1&&(c.value=t[0]),a.$set(c),(!r||o&2)&&(i.innerHTML=t[1])},i(t){r||(H(a.$$.fragment,t),r=!0)},o(t){I(a.$$.fragment,t),r=!1},d(t){t&&h(e),L(a),d=!1,N(l)}}}function S(s,e,n){let a=!0,{code:u=""}=e,{highlighted:i=""}=e;function r(){n(2,a=!1)}function d(){n(2,a=!0)}return s.$$set=l=>{"code"in l&&n(0,u=l.code),"highlighted"in l&&n(1,i=l.highlighted)},[u,i,a,r,d]}class F extends b{constructor(e){super();C(this,e,S,R,B,{code:0,highlighted:1})}}export{F as C};
232
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js
import{D as X,S as R,i as D,s as A,O as $,P as k,a as d,d as m,b as f,g as w,F as v,L as z,e as E,k as C,t as J,c as N,m as I,h as ee,Q as V,q as g,l as O,n as F,o as b,B as M,p as L,w as B,y as P,j as te,G as le,U as q,x,V as se,R as re,W as H,X as K}from"./vendor-4833417e.js";import{C as Y}from"./CopyButton-dacfbfaf.js";const S={};function Z(a){return S[a]||(S[a]=X("group1")),S[a]}function ne(a){let e,l,r,s,n,i;return{c(){e=$("svg"),l=$("defs"),r=$("clipPath"),s=$("rect"),n=$("g"),i=$("path"),this.h()},l(u){e=k(u,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var o=d(e);l=k(o,"defs",{});var t=d(l);r=k(t,"clipPath",{id:!0});var c=d(r);s=k(c,"rect",{x:!0,y:!0,width:!0,height:!0,fill:!0}),d(s).forEach(m),c.forEach(m),t.forEach(m),n=k(o,"g",{"clip-path":!0});var h=d(n);i=k(h,"path",{d:!0,fill:!0}),d(i).forEach(m),h.forEach(m),o.forEach(m),this.h()},h(){f(s,"x","3.05"),f(s,"y","0.5"),f(s,"width","25.73"),f(s,"height","31"),f(s,"fill","none"),f(r,"id","a"),f(i,"d","M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z"),f(i,"fill","#ee4c2c"),f(n,"clip-path","url(#a)"),f(e,"class",a[0]),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),f(e,"aria-hidden","true"),f(e,"focusable","false"),f(e,"role","img"),f(e,"width","1em"),f(e,"height","1em"),f(e,"preserveAspectRatio","xMidYMid meet"),f(e,"viewBox","0 0 32 32")},m(u,o){w(u,e,o),v(e,l),v(l,r),v(r,s),v(e,n),v(n,i)},p(u,[o]){o&1&&f(e,"class",u[0])},i:z,o:z,d(u){u&&m(e)}}}function ae(a,e,l){let{classNames:r=""}=e;return a.$$set=s=>{"classNames"in s&&l(0,r=s.classNames)},[r]}class ie extends R{constructor(e){super();D(this,e,ae,ne,A,{classNames:0})}}function oe(a){let e,l,r,s;return{c(){e=$("svg"),l=$("path"),r=$("path"),s=$("path"),this.h()},l(n){e=k(n,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var i=d(e);l=k(i,"path",{d:!0,fill:!0}),d(l).forEach(m),r=k(i,"path",{d:!0,fill:!0}),d(r).forEach(m),s=k(i,"path",{d:!0,fill:!0}),d(s).forEach(m),i.forEach(m),this.h()},h(){f(l,"d","M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z"),f(l,"fill","#E55B2D"),f(r,"d","M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z"),f(r,"fill","#ED8E24"),f(s,"d","M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z"),f(s,"fill","#F8BF3C"),f(e,"class",a[0]),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),f(e,"aria-hidden","true"),f(e,"focusable","false"),f(e,"role","img"),f(e,"width","0.94em"),f(e,"height","1em"),f(e,"preserveAspectRatio","xMidYMid meet"),f(e,"viewBox","0 0 256 274")},m(n,i){w(n,e,i),v(e,l),v(e,r),v(e,s)},p(n,[i]){i&1&&f(e,"class",n[0])},i:z,o:z,d(n){n&&m(e)}}}function ue(a,e,l){let{classNames:r=""}=e;return a.$$set=s=>{"classNames"in s&&l(0,r=s.classNames)},[r]}class ce extends R{constructor(e){super();D(this,e,ue,oe,A,{classNames:0})}}function T(a,e,l){const r=a.slice();return r[8]=e[l],r[10]=l,r}function G(a){let e,l,r;var s=a[8].icon;function n(i){return{props:{classNames:"mr-1.5"}}}return s&&(e=new s(n())),{c(){e&&B(e.$$.fragment),l=O()},l(i){e&&x(e.$$.fragment,i),l=O()},m(i,u){e&&P(e,i,u),w(i,l,u),r=!0},p(i,u){if(s!==(s=i[8].icon)){if(e){F();const o=e;b(o.$$.fragment,1,0,()=>{M(o,1)}),L()}s?(e=new s(n()),B(e.$$.fragment),g(e.$$.fragment,1),P(e,l.parentNode,l)):e=null}},i(i){r||(e&&g(e.$$.fragment,i),r=!0)},o(i){e&&b(e.$$.fragment,i),r=!1},d(i){i&&m(l),e&&M(e,i)}}}function U(a){let e,l,r,s=a[8].name+"",n,i,u,o,t,c,h,p=a[8].icon&&G(a);function W(){return a[6](a[8])}return{c(){e=E("button"),p&&p.c(),l=C(),r=E("p"),n=J(s),u=C(),this.h()},l(y){e=N(y,"BUTTON",{class:!0});var _=d(e);p&&p.l(_),l=I(_),r=N(_,"P",{class:!0});var j=d(r);n=ee(j,s),j.forEach(m),u=I(_),_.forEach(m),this.h()},h(){f(r,"class",i="!m-0 "+a[8].classNames),f(e,"class",o="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-"+(a[10]?"r":"l")+" "+(a[8].group!==a[1]&&"text-gray-500 filter grayscale"))},m(y,_){w(y,e,_),p&&p.m(e,null),v(e,l),v(e,r),v(r,n),v(e,u),t=!0,c||(h=V(e,"click",W),c=!0)},p(y,_){a=y,a[8].icon?p?(p.p(a,_),_&1&&g(p,1)):(p=G(a),p.c(),g(p,1),p.m(e,l)):p&&(F(),b(p,1,1,()=>{p=null}),L()),(!t||_&1)&&s!==(s=a[8].name+"")&&te(n,s),(!t||_&1&&i!==(i="!m-0 "+a[8].classNames))&&f(r,"class",i),(!t||_&3&&o!==(o="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-"+(a[10]?"r":"l")+" "+(a[8].group!==a[1]&&"text-gray-500 filter grayscale")))&&f(e,"class",o)},i(y){t||(g(p),t=!0)},o(y){b(p),t=!1},d(y){y&&m(e),p&&p.d(),c=!1,h()}}}function fe(a){let e,l,r,s=a[3].filter(a[5]),n=[];for(let u=0;u<s.length;u+=1)n[u]=U(T(a,s,u));const i=u=>b(n[u],1,1,()=>{n[u]=null});return{c(){e=E("div"),l=E("div");for(let u=0;u<n.length;u+=1)n[u].c();this.h()},l(u){e=N(u,"DIV",{});var o=d(e);l=N(o,"DIV",{class:!0});var t=d(l);for(let c=0;c<n.length;c+=1)n[c].l(t);t.forEach(m),o.forEach(m),this.h()},h(){f(l,"class","bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none")},m(u,o){w(u,e,o),v(e,l);for(let t=0;t<n.length;t+=1)n[t].m(l,null);r=!0},p(u,[o]){if(o&27){s=u[3].filter(u[5]);let t;for(t=0;t<s.length;t+=1){const c=T(u,s,t);n[t]?(n[t].p(c,o),g(n[t],1)):(n[t]=U(c),n[t].c(),g(n[t],1),n[t].m(l,null))}for(F(),t=s.length;t<n.length;t+=1)i(t);L()}},i(u){if(!r){for(let o=0;o<s.length;o+=1)g(n[o]);r=!0}},o(u){n=n.filter(Boolean);for(let o=0;o<n.length;o+=1)b(n[o]);r=!1},d(u){u&&m(e),le(n,u)}}}function he(a,e,l){let r,{ids:s}=e;const n=s.join("-"),i=Z(n);q(a,i,h=>l(1,r=h));const u=[{id:"pt",classNames:"",icon:ie,name:"Pytorch",group:"group1"},{id:"tf",classNames:"",icon:ce,name:"TensorFlow",group:"group2"},{id:"stringapi",classNames:"text-blue-600",name:"String API",group:"group1"},{id:"readinstruction",classNames:"text-blue-600",name:"ReadInstruction",group:"group2"}];function o(h){se(i,r=h,r)}const t=h=>s.includes(h.id),c=h=>o(h.group);return a.$$set=h=>{"ids"in h&&l(0,s=h.ids)},[s,r,i,u,o,t,c]}class Q extends R{constructor(e){super();D(this,e,he,fe,A,{ids:0})}}function pe(a){let e,l,r,s,n,i,u=a[1].highlighted+"",o;return l=new Y({props:{classNames:"transition duration-200 ease-in-out "+(a[2]&&"opacity-0"),title:"Copy code excerpt to clipboard",value:a[1].code}}),n=new Q({props:{ids:a[4]}}),{c(){e=E("div"),B(l.$$.fragment),r=C(),s=E("pre"),B(n.$$.fragment),i=new H,this.h()},l(t){e=N(t,"DIV",{class:!0});var c=d(e);x(l.$$.fragment,c),c.forEach(m),r=I(t),s=N(t,"PRE",{});var h=d(s);x(n.$$.fragment,h),i=K(h),h.forEach(m),this.h()},h(){f(e,"class","absolute top-2.5 right-4"),i.a=null},m(t,c){w(t,e,c),P(l,e,null),w(t,r,c),w(t,s,c),P(n,s,null),i.m(u,s),o=!0},p(t,c){const h={};c&4&&(h.classNames="transition duration-200 ease-in-out "+(t[2]&&"opacity-0")),c&2&&(h.value=t[1].code),l.$set(h),(!o||c&2)&&u!==(u=t[1].highlighted+"")&&i.p(u)},i(t){o||(g(l.$$.fragment,t),g(n.$$.fragment,t),o=!0)},o(t){b(l.$$.fragment,t),b(n.$$.fragment,t),o=!1},d(t){t&&m(e),M(l),t&&m(r),t&&m(s),M(n)}}}function me(a){let e,l,r,s,n,i,u=a[0].highlighted+"",o;return l=new Y({props:{classNames:"transition duration-200 ease-in-out "+(a[2]&&"opacity-0"),title:"Copy code excerpt to clipboard",value:a[0].code}}),n=new Q({props:{ids:a[4]}}),{c(){e=E("div"),B(l.$$.fragment),r=C(),s=E("pre"),B(n.$$.fragment),i=new H,this.h()},l(t){e=N(t,"DIV",{class:!0});var c=d(e);x(l.$$.fragment,c),c.forEach(m),r=I(t),s=N(t,"PRE",{});var h=d(s);x(n.$$.fragment,h),i=K(h),h.forEach(m),this.h()},h(){f(e,"class","absolute top-2.5 right-4"),i.a=null},m(t,c){w(t,e,c),P(l,e,null),w(t,r,c),w(t,s,c),P(n,s,null),i.m(u,s),o=!0},p(t,c){const h={};c&4&&(h.classNames="transition duration-200 ease-in-out "+(t[2]&&"opacity-0")),c&1&&(h.value=t[0].code),l.$set(h),(!o||c&1)&&u!==(u=t[0].highlighted+"")&&i.p(u)},i(t){o||(g(l.$$.fragment,t),g(n.$$.fragment,t),o=!0)},o(t){b(l.$$.fragment,t),b(n.$$.fragment,t),o=!1},d(t){t&&m(e),M(l),t&&m(r),t&&m(s),M(n)}}}function de(a){let e,l,r,s,n,i;const u=[me,pe],o=[];function t(c,h){return c[3]==="group1"?0:1}return l=t(a),r=o[l]=u[l](a),{c(){e=E("div"),r.c(),this.h()},l(c){e=N(c,"DIV",{class:!0});var h=d(e);r.l(h),h.forEach(m),this.h()},h(){f(e,"class","code-block relative")},m(c,h){w(c,e,h),o[l].m(e,null),s=!0,n||(i=[V(e,"mouseover",a[6]),V(e,"focus",a[6]),V(e,"mouseout",a[7]),V(e,"focus",a[7])],n=!0)},p(c,[h]){let p=l;l=t(c),l===p?o[l].p(c,h):(F(),b(o[p],1,1,()=>{o[p]=null}),L(),r=o[l],r?r.p(c,h):(r=o[l]=u[l](c),r.c()),g(r,1),r.m(e,null))},i(c){s||(g(r),s=!0)},o(c){b(r),s=!1},d(c){c&&m(e),o[l].d(),n=!1,re(i)}}}function ge(a,e,l){let r,{group1:s}=e,{group2:n}=e;const i=[s.id,n.id],u=i.join("-"),o=Z(u);q(a,o,p=>l(3,r=p));let t=!0;function c(){l(2,t=!1)}function h(){l(2,t=!0)}return a.$$set=p=>{"group1"in p&&l(0,s=p.group1),"group2"in p&&l(1,n=p.group2)},[s,n,t,r,i,o,c,h]}class we extends R{constructor(e){super();D(this,e,ge,de,A,{group1:0,group2:1})}}export{we as C};
233
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js
let e="";function a(s){e=s.base,s.assets}export{e as b,a as s};
234
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js
import{S as m,i as u,s as d,O as n,P as o,a as h,d as i,b as a,g as p,F as g,L as c}from"./vendor-4833417e.js";function v(l){let e,r;return{c(){e=n("svg"),r=n("path"),this.h()},l(t){e=o(t,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var s=h(e);r=o(s,"path",{d:!0,fill:!0}),h(r).forEach(i),s.forEach(i),this.h()},h(){a(r,"d","M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z"),a(r,"fill","currentColor"),a(e,"class",l[0]),a(e,"xmlns","http://www.w3.org/2000/svg"),a(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),a(e,"aria-hidden","true"),a(e,"role","img"),a(e,"width","1em"),a(e,"height","1em"),a(e,"preserveAspectRatio","xMidYMid meet"),a(e,"viewBox","0 0 256 256")},m(t,s){p(t,e,s),g(e,r)},p(t,[s]){s&1&&a(e,"class",t[0])},i:c,o:c,d(t){t&&i(e)}}}function f(l,e,r){let{classNames:t=""}=e;return l.$$set=s=>{"classNames"in s&&r(0,t=s.classNames)},[t]}class x extends m{constructor(e){super();u(this,e,f,v,d,{classNames:0})}}export{x as I};
235
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js
import{S as w,i as x,s as N,O as g,P as p,a as h,d as m,b as l,g as C,F as b,L as y,e as _,k as D,t as S,c as v,m as I,h as q,f as T,j as O,w as V,x as H,y as k,Q as R,q as B,o as A,B as M,T as Z}from"./vendor-4833417e.js";function j(n){const e=document.createElement("textarea");document.body.appendChild(e),e.value=n,e.select(),document.execCommand("copy"),document.body.removeChild(e)}function F(n){let e,t,r,s;return{c(){e=g("svg"),t=g("path"),r=g("path"),s=g("rect"),this.h()},l(i){e=p(i,"svg",{class:!0,xmlns:!0,"aria-hidden":!0,fill:!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var a=h(e);t=p(a,"path",{d:!0,transform:!0}),h(t).forEach(m),r=p(a,"path",{d:!0,transform:!0}),h(r).forEach(m),s=p(a,"rect",{fill:!0,width:!0,height:!0}),h(s).forEach(m),a.forEach(m),this.h()},h(){l(t,"d","M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z"),l(t,"transform","translate(0)"),l(r,"d","M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z"),l(r,"transform","translate(0)"),l(s,"fill","none"),l(s,"width","32"),l(s,"height","32"),l(e,"class",n[0]),l(e,"xmlns","http://www.w3.org/2000/svg"),l(e,"aria-hidden","true"),l(e,"fill","currentColor"),l(e,"focusable","false"),l(e,"role","img"),l(e,"width","1em"),l(e,"height","1em"),l(e,"preserveAspectRatio","xMidYMid meet"),l(e,"viewBox","0 0 32 32")},m(i,a){C(i,e,a),b(e,t),b(e,r),b(e,s)},p(i,[a]){a&1&&l(e,"class",i[0])},i:y,o:y,d(i){i&&m(e)}}}function L(n,e,t){let{classNames:r=""}=e;return n.$$set=s=>{"classNames"in s&&t(0,r=s.classNames)},[r]}class P extends w{constructor(e){super();x(this,e,L,F,N,{classNames:0})}}function Q(n){let e,t,r,s,i;return{c(){e=_("div"),t=_("div"),r=D(),s=S(n[1]),this.h()},l(a){e=v(a,"DIV",{class:!0});var c=h(e);t=v(c,"DIV",{class:!0,style:!0}),h(t).forEach(m),r=I(c),s=q(c,n[1]),c.forEach(m),this.h()},h(){l(t,"class","absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0"),T(t,"border-left-color","transparent"),T(t,"border-right-color","transparent"),l(e,"class",i="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow "+n[2]+" "+n[0])},m(a,c){C(a,e,c),b(e,t),b(e,r),b(e,s)},p(a,[c]){c&2&&O(s,a[1]),c&5&&i!==(i="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow "+a[2]+" "+a[0])&&l(e,"class",i)},i:y,o:y,d(a){a&&m(e)}}}function U(n,e,t){let{classNames:r=""}=e,{label:s="Copied"}=e,{position:i="left-1/2 top-full transform -translate-x-1/2 translate-y-2"}=e;return n.$$set=a=>{"classNames"in a&&t(0,r=a.classNames),"label"in a&&t(1,s=a.label),"position"in a&&t(2,i=a.position)},[r,s,i]}class Y extends w{constructor(e){super();x(this,e,U,Q,N,{classNames:0,label:1,position:2})}}function z(n){let e,t,r,s,i,a,c,d,f;return t=new P({}),s=new Y({props:{classNames:n[4]?"opacity-100":"opacity-0"}}),{c(){e=_("button"),V(t.$$.fragment),r=D(),V(s.$$.fragment),this.h()},l(o){e=v(o,"BUTTON",{class:!0,title:!0,type:!0});var u=h(e);H(t.$$.fragment,u),r=I(u),H(s.$$.fragment,u),u.forEach(m),this.h()},h(){l(e,"class",i="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none "+n[0]+" "+(n[2]==="text"?"mx-0.5":"")+" "+(n[2]==="button"?"btn":"")+" "+(n[2]==="button-clear"?"py-1 px-2 border rounded-lg shadow-sm":"")+" "+(!n[4]&&["button-clear","text"].includes(n[2])?"text-gray-600":"")+" "+(n[4]?"text-green-500":"")),l(e,"title",a=n[3]||n[1]||"Copy to clipboard"),l(e,"type","button")},m(o,u){C(o,e,u),k(t,e,null),b(e,r),k(s,e,null),c=!0,d||(f=R(e,"click",n[5]),d=!0)},p(o,[u]){const E={};u&16&&(E.classNames=o[4]?"opacity-100":"opacity-0"),s.$set(E),(!c||u&21&&i!==(i="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none "+o[0]+" "+(o[2]==="text"?"mx-0.5":"")+" "+(o[2]==="button"?"btn":"")+" "+(o[2]==="button-clear"?"py-1 px-2 border rounded-lg shadow-sm":"")+" "+(!o[4]&&["button-clear","text"].includes(o[2])?"text-gray-600":"")+" "+(o[4]?"text-green-500":"")))&&l(e,"class",i),(!c||u&10&&a!==(a=o[3]||o[1]||"Copy to clipboard"))&&l(e,"title",a)},i(o){c||(B(t.$$.fragment,o),B(s.$$.fragment,o),c=!0)},o(o){A(t.$$.fragment,o),A(s.$$.fragment,o),c=!1},d(o){o&&m(e),M(t),M(s),d=!1,f()}}}function G(n,e,t){let{classNames:r=""}=e,{label:s=""}=e,{style:i="text"}=e,{title:a=""}=e,{value:c}=e,d=!1,f;Z(()=>{f&&clearTimeout(f)});function o(){j(c),t(4,d=!0),f&&clearTimeout(f),f=setTimeout(()=>{t(4,d=!1)},1e3)}return n.$$set=u=>{"classNames"in u&&t(0,r=u.classNames),"label"in u&&t(1,s=u.label),"style"in u&&t(2,i=u.style),"title"in u&&t(3,a=u.title),"value"in u&&t(6,c=u.value)},[r,s,i,a,d,o,c]}class K extends w{constructor(e){super();x(this,e,G,z,N,{classNames:0,label:1,style:2,title:3,value:6})}}export{K as C};
236
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js
import{S as u,i as n,s as m,e as d,c as f,a as h,d as o,b as s,N as c,g as p,L as l}from"./vendor-4833417e.js";function w(i){let e,r;return{c(){e=d("iframe"),this.h()},l(t){e=f(t,"IFRAME",{class:!0,src:!0,title:!0,frameborder:!0,allow:!0}),h(e).forEach(o),this.h()},h(){s(e,"class","w-full xl:w-4/6 h-80"),c(e.src,r="https://www.youtube-nocookie.com/embed/"+i[0])||s(e,"src",r),s(e,"title","YouTube video player"),s(e,"frameborder","0"),s(e,"allow","accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"),e.allowFullscreen=!0},m(t,a){p(t,e,a)},p(t,[a]){a&1&&!c(e.src,r="https://www.youtube-nocookie.com/embed/"+t[0])&&s(e,"src",r)},i:l,o:l,d(t){t&&o(e)}}}function b(i,e,r){let{id:t}=e;return i.$$set=a=>{"id"in a&&r(0,t=a.id)},[t]}class y extends u{constructor(e){super();n(this,e,b,w,m,{id:0})}}export{y as Y};
237
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/DocNotebookDropdown-ecff2a90.js
import{S as Y,i as j,s as H,H as y,e as A,c as V,a as O,d as v,b as d,g as D,F as z,Q as K,Y as me,I as U,J as W,K as q,q as h,o as b,v as he,Z as P,O as Q,P as Z,f as Be,L as X,w as E,x as R,y as S,B as I,k as G,m as T,n as L,p as J,_ as ge,l as M,t as be,h as we,j as ke,$ as De,G as Ne,N as ve}from"./vendor-4833417e.js";function Ee(s){let e,n,l,t,o,a;const f=s[7].default,i=y(f,s,s[6],null);return{c(){e=A("div"),n=A("ul"),i&&i.c(),this.h()},l(r){e=V(r,"DIV",{class:!0});var _=O(e);n=V(_,"UL",{class:!0});var p=O(n);i&&i.l(p),p.forEach(v),_.forEach(v),this.h()},h(){d(n,"class","min-w-full w-auto"),d(e,"class",l="absolute top-full mt-1 min-w-full w-auto bg-white rounded-xl overflow-hidden shadow-lg z-10 border border-gray-100 "+(s[2]==="right"?"right-0":"left-0")+" "+s[0])},m(r,_){D(r,e,_),z(e,n),i&&i.m(n,null),s[8](e),t=!0,o||(a=K(e,"click",function(){me(s[1])&&s[1].apply(this,arguments)}),o=!0)},p(r,[_]){s=r,i&&i.p&&(!t||_&64)&&U(i,f,s,s[6],t?q(f,s[6],_,null):W(s[6]),null),(!t||_&5&&l!==(l="absolute top-full mt-1 min-w-full w-auto bg-white rounded-xl overflow-hidden shadow-lg z-10 border border-gray-100 "+(s[2]==="right"?"right-0":"left-0")+" "+s[0]))&&d(e,"class",l)},i(r){t||(h(i,r),t=!0)},o(r){b(i,r),t=!1},d(r){r&&v(e),i&&i.d(r),s[8](null),o=!1,a()}}}function Se(s,e,n){let{$$slots:l={},$$scope:t}=e,{classNames:o=""}=e,{dropdownElement:a=void 0}=e,{forceAlignement:f=void 0}=e,{onClose:i}=e,r=f!=null?f:"left",_;he(()=>{var g,u;if(document.addEventListener("click",p),!f){const c=document.documentElement.clientWidth,m=(_==null?void 0:_.getBoundingClientRect())||{},N=(g=m.left)!=null?g:0,B=(u=m.width)!=null?u:0;n(2,r=N+B>c?"right":"left")}return()=>{document.removeEventListener("click",p)}});function p(g){const u=g.target;u!==a&&!(a==null?void 0:a.contains(u))&&i()}function C(g){P[g?"unshift":"push"](()=>{_=g,n(3,_)})}return s.$$set=g=>{"classNames"in g&&n(0,o=g.classNames),"dropdownElement"in g&&n(4,a=g.dropdownElement),"forceAlignement"in g&&n(5,f=g.forceAlignement),"onClose"in g&&n(1,i=g.onClose),"$$scope"in g&&n(6,t=g.$$scope)},[o,i,r,_,a,f,t,l,C]}class Ie extends Y{constructor(e){super();j(this,e,Se,Ee,H,{classNames:0,dropdownElement:4,forceAlignement:5,onClose:1})}}function Le(s){let e,n;return{c(){e=Q("svg"),n=Q("path"),this.h()},l(l){e=Z(l,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0,style:!0});var t=O(e);n=Z(t,"path",{d:!0,fill:!0}),O(n).forEach(v),t.forEach(v),this.h()},h(){d(n,"d","M7 10l5 5l5-5z"),d(n,"fill","currentColor"),d(e,"class",s[0]),d(e,"xmlns","http://www.w3.org/2000/svg"),d(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),d(e,"aria-hidden","true"),d(e,"focusable","false"),d(e,"role","img"),d(e,"width","1em"),d(e,"height","1em"),d(e,"preserveAspectRatio","xMidYMid meet"),d(e,"viewBox","0 0 24 24"),Be(e,"transform","rotate(360deg)")},m(l,t){D(l,e,t),z(e,n)},p(l,[t]){t&1&&d(e,"class",l[0])},i:X,o:X,d(l){l&&v(e)}}}function Je(s,e,n){let{classNames:l=""}=e;return s.$$set=t=>{"classNames"in t&&n(0,l=t.classNames)},[l]}class Me extends Y{constructor(e){super();j(this,e,Je,Le,H,{classNames:0})}}const Ae=s=>({}),x=s=>({}),Ve=s=>({}),$=s=>({});function Oe(s){let e,n,l,t,o,a=s[2]&&ee(s),f=s[10]&&te();return{c(){a&&a.c(),e=G(),n=be(s[4]),l=G(),f&&f.c(),t=M()},l(i){a&&a.l(i),e=T(i),n=we(i,s[4]),l=T(i),f&&f.l(i),t=M()},m(i,r){a&&a.m(i,r),D(i,e,r),D(i,n,r),D(i,l,r),f&&f.m(i,r),D(i,t,r),o=!0},p(i,r){i[2]?a?(a.p(i,r),r&4&&h(a,1)):(a=ee(i),a.c(),h(a,1),a.m(e.parentNode,e)):a&&(L(),b(a,1,1,()=>{a=null}),J()),(!o||r&16)&&ke(n,i[4]),i[10]?f?r&1024&&h(f,1):(f=te(),f.c(),h(f,1),f.m(t.parentNode,t)):f&&(L(),b(f,1,1,()=>{f=null}),J())},i(i){o||(h(a),h(f),o=!0)},o(i){b(a),b(f),o=!1},d(i){a&&a.d(i),i&&v(e),i&&v(n),i&&v(l),f&&f.d(i),i&&v(t)}}}function Re(s){let e;const n=s[14].button,l=y(n,s,s[18],$);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,o){l&&l.m(t,o),e=!0},p(t,o){l&&l.p&&(!e||o&262144)&&U(l,n,t,t[18],e?q(n,t[18],o,Ve):W(t[18]),$)},i(t){e||(h(l,t),e=!0)},o(t){b(l,t),e=!1},d(t){l&&l.d(t)}}}function ee(s){let e,n,l;var t=s[2];function o(a){return{props:{classNames:"mr-1.5 "+a[3]}}}return t&&(e=new t(o(s))),{c(){e&&E(e.$$.fragment),n=M()},l(a){e&&R(e.$$.fragment,a),n=M()},m(a,f){e&&S(e,a,f),D(a,n,f),l=!0},p(a,f){const i={};if(f&8&&(i.classNames="mr-1.5 "+a[3]),t!==(t=a[2])){if(e){L();const r=e;b(r.$$.fragment,1,0,()=>{I(r,1)}),J()}t?(e=new t(o(a)),E(e.$$.fragment),h(e.$$.fragment,1),S(e,n.parentNode,n)):e=null}else t&&e.$set(i)},i(a){l||(e&&h(e.$$.fragment,a),l=!0)},o(a){e&&b(e.$$.fragment,a),l=!1},d(a){a&&v(n),e&&I(e,a)}}}function te(s){let e,n;return e=new Me({props:{classNames:"-mr-1 text-gray-500"}}),{c(){E(e.$$.fragment)},l(l){R(e.$$.fragment,l)},m(l,t){S(e,l,t),n=!0},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){b(e.$$.fragment,l),n=!1},d(l){I(e,l)}}}function le(s){let e,n;return e=new Ie({props:{classNames:s[6]+" "+(s[9]?"v2-dropdown-menu hidden":""),dropdownElement:s[11],forceAlignement:s[5],onClose:s[16],$$slots:{default:[ye]},$$scope:{ctx:s}}}),{c(){E(e.$$.fragment)},l(l){R(e.$$.fragment,l)},m(l,t){S(e,l,t),n=!0},p(l,t){const o={};t&576&&(o.classNames=l[6]+" "+(l[9]?"v2-dropdown-menu hidden":"")),t&2048&&(o.dropdownElement=l[11]),t&32&&(o.forceAlignement=l[5]),t&4096&&(o.onClose=l[16]),t&262144&&(o.$$scope={dirty:t,ctx:l}),e.$set(o)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){b(e.$$.fragment,l),n=!1},d(l){I(e,l)}}}function ye(s){let e;const n=s[14].menu,l=y(n,s,s[18],x);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,o){l&&l.m(t,o),e=!0},p(t,o){l&&l.p&&(!e||o&262144)&&U(l,n,t,t[18],e?q(n,t[18],o,Ae):W(t[18]),x)},i(t){e||(h(l,t),e=!0)},o(t){b(l,t),e=!1},d(t){l&&l.d(t)}}}function Ue(s){let e,n,l,t,o,a,f,i,r,_,p;const C=[Re,Oe],g=[];function u(m,N){return m[13].button?0:1}l=u(s),t=g[l]=C[l](s);let c=(s[12]||s[9])&&le(s);return{c(){e=A("div"),n=A("button"),t.c(),a=G(),c&&c.c(),this.h()},l(m){e=V(m,"DIV",{class:!0,"selected-value":!0});var N=O(e);n=V(N,"BUTTON",{class:!0,type:!0});var B=O(n);t.l(B),B.forEach(v),a=T(N),c&&c.l(N),N.forEach(v),this.h()},h(){d(n,"class",o=""+s[1]+" "+(s[7]?"":"cursor-pointer w-full btn text-sm")+" "+(s[9]?"v2-dropdown-button":"")),d(n,"type","button"),d(e,"class",f="relative "+s[0]+" "+(s[9]?"v2-dropdown":"")),d(e,"selected-value",i=s[8]||void 0)},m(m,N){D(m,e,N),z(e,n),g[l].m(n,null),z(e,a),c&&c.m(e,null),s[17](e),r=!0,_||(p=K(n,"click",s[15]),_=!0)},p(m,[N]){let B=l;l=u(m),l===B?g[l].p(m,N):(L(),b(g[B],1,1,()=>{g[B]=null}),J(),t=g[l],t?t.p(m,N):(t=g[l]=C[l](m),t.c()),h(t,1),t.m(n,null)),(!r||N&642&&o!==(o=""+m[1]+" "+(m[7]?"":"cursor-pointer w-full btn text-sm")+" "+(m[9]?"v2-dropdown-button":"")))&&d(n,"class",o),m[12]||m[9]?c?(c.p(m,N),N&4608&&h(c,1)):(c=le(m),c.c(),h(c,1),c.m(e,null)):c&&(L(),b(c,1,1,()=>{c=null}),J()),(!r||N&513&&f!==(f="relative "+m[0]+" "+(m[9]?"v2-dropdown":"")))&&d(e,"class",f),(!r||N&256&&i!==(i=m[8]||void 0))&&d(e,"selected-value",i)},i(m){r||(h(t),h(c),r=!0)},o(m){b(t),b(c),r=!1},d(m){m&&v(e),g[l].d(),c&&c.d(),s[17](null),_=!1,p()}}}function We(s,e,n){let{$$slots:l={},$$scope:t}=e;const o=ge(l);let{classNames:a=""}=e,{btnClassNames:f=""}=e,{btnIcon:i=void 0}=e,{btnIconClassNames:r=""}=e,{btnLabel:_=""}=e,{forceMenuAlignement:p=void 0}=e,{menuClassNames:C=""}=e,{noBtnClass:g=void 0}=e,{selectedValue:u=void 0}=e,{useDeprecatedJS:c=!0}=e,{withBtnCaret:m=!1}=e,N,B=!1;const w=()=>n(12,B=!B),F=()=>n(12,B=!1);function pe(k){P[k?"unshift":"push"](()=>{N=k,n(11,N)})}return s.$$set=k=>{"classNames"in k&&n(0,a=k.classNames),"btnClassNames"in k&&n(1,f=k.btnClassNames),"btnIcon"in k&&n(2,i=k.btnIcon),"btnIconClassNames"in k&&n(3,r=k.btnIconClassNames),"btnLabel"in k&&n(4,_=k.btnLabel),"forceMenuAlignement"in k&&n(5,p=k.forceMenuAlignement),"menuClassNames"in k&&n(6,C=k.menuClassNames),"noBtnClass"in k&&n(7,g=k.noBtnClass),"selectedValue"in k&&n(8,u=k.selectedValue),"useDeprecatedJS"in k&&n(9,c=k.useDeprecatedJS),"withBtnCaret"in k&&n(10,m=k.withBtnCaret),"$$scope"in k&&n(18,t=k.$$scope)},[a,f,i,r,_,p,C,g,u,c,m,N,B,o,l,w,F,pe,t]}class ne extends Y{constructor(e){super();j(this,e,We,Ue,H,{classNames:0,btnClassNames:1,btnIcon:2,btnIconClassNames:3,btnLabel:4,forceMenuAlignement:5,menuClassNames:6,noBtnClass:7,selectedValue:8,useDeprecatedJS:9,withBtnCaret:10})}}function qe(s){let e,n,l,t=s[5]&&se(s);return{c(){t&&t.c(),e=G(),n=be(s[7])},l(o){t&&t.l(o),e=T(o),n=we(o,s[7])},m(o,a){t&&t.m(o,a),D(o,e,a),D(o,n,a),l=!0},p(o,a){o[5]?t?(t.p(o,a),a&32&&h(t,1)):(t=se(o),t.c(),h(t,1),t.m(e.parentNode,e)):t&&(L(),b(t,1,1,()=>{t=null}),J()),(!l||a&128)&&ke(n,o[7])},i(o){l||(h(t),l=!0)},o(o){b(t),l=!1},d(o){t&&t.d(o),o&&v(e),o&&v(n)}}}function ze(s){let e;const n=s[15].default,l=y(n,s,s[14],null);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,o){l&&l.m(t,o),e=!0},p(t,o){l&&l.p&&(!e||o&16384)&&U(l,n,t,t[14],e?q(n,t[14],o,null):W(t[14]),null)},i(t){e||(h(l,t),e=!0)},o(t){b(l,t),e=!1},d(t){l&&l.d(t)}}}function se(s){let e,n,l;var t=s[5];function o(a){return{props:{classNames:"mr-1.5 "+a[6]}}}return t&&(e=new t(o(s))),{c(){e&&E(e.$$.fragment),n=M()},l(a){e&&R(e.$$.fragment,a),n=M()},m(a,f){e&&S(e,a,f),D(a,n,f),l=!0},p(a,f){const i={};if(f&64&&(i.classNames="mr-1.5 "+a[6]),t!==(t=a[5])){if(e){L();const r=e;b(r.$$.fragment,1,0,()=>{I(r,1)}),J()}t?(e=new t(o(a)),E(e.$$.fragment),h(e.$$.fragment,1),S(e,n.parentNode,n)):e=null}else t&&e.$set(i)},i(a){l||(e&&h(e.$$.fragment,a),l=!0)},o(a){e&&b(e.$$.fragment,a),l=!1},d(a){a&&v(n),e&&I(e,a)}}}function Fe(s){let e,n,l,t,o,a,f,i,r,_;const p=[ze,qe],C=[];function g(u,c){return u[13].default?0:1}return l=g(s),t=C[l]=p[l](s),{c(){e=A("li"),n=A("a"),t.c(),this.h()},l(u){e=V(u,"LI",{});var c=O(e);n=V(c,"A",{class:!0,"data-label":!0,"data-url":!0,"data-value":!0,href:!0,rel:!0,target:!0});var m=O(n);t.l(m),m.forEach(v),c.forEach(v),this.h()},h(){d(n,"class",o="flex items-center hover:bg-gray-50 dark:hover:bg-gray-800 cursor-pointer px-3 py-1.5 whitespace-nowrap "+s[0]+" "+(s[9]?"hover:underline":"")+" "+(s[12]?"v2-dropdown-entry":"")),d(n,"data-label",s[1]),d(n,"data-url",s[2]),d(n,"data-value",s[3]),d(n,"href",s[4]),d(n,"rel",a=s[8]?"nofollow":void 0),d(n,"target",f=s[11]?"_blank":void 0)},m(u,c){D(u,e,c),z(e,n),C[l].m(n,null),i=!0,r||(_=K(n,"click",function(){me(s[10])&&s[10].apply(this,arguments)}),r=!0)},p(u,[c]){s=u;let m=l;l=g(s),l===m?C[l].p(s,c):(L(),b(C[m],1,1,()=>{C[m]=null}),J(),t=C[l],t?t.p(s,c):(t=C[l]=p[l](s),t.c()),h(t,1),t.m(n,null)),(!i||c&4609&&o!==(o="flex items-center hover:bg-gray-50 dark:hover:bg-gray-800 cursor-pointer px-3 py-1.5 whitespace-nowrap "+s[0]+" "+(s[9]?"hover:underline":"")+" "+(s[12]?"v2-dropdown-entry":"")))&&d(n,"class",o),(!i||c&2)&&d(n,"data-label",s[1]),(!i||c&4)&&d(n,"data-url",s[2]),(!i||c&8)&&d(n,"data-value",s[3]),(!i||c&16)&&d(n,"href",s[4]),(!i||c&256&&a!==(a=s[8]?"nofollow":void 0))&&d(n,"rel",a),(!i||c&2048&&f!==(f=s[11]?"_blank":void 0))&&d(n,"target",f)},i(u){i||(h(t),i=!0)},o(u){b(t),i=!1},d(u){u&&v(e),C[l].d(),r=!1,_()}}}function Ge(s,e,n){let{$$slots:l={},$$scope:t}=e;const o=ge(l);let{classNames:a=""}=e,{dataLabel:f=void 0}=e,{dataUrl:i=void 0}=e,{dataValue:r=void 0}=e,{href:_=void 0}=e,{icon:p=void 0}=e,{iconClassNames:C=""}=e,{label:g=""}=e,{noFollow:u=!1}=e,{underline:c=!1}=e,{onClick:m=()=>{}}=e,{targetBlank:N=!1}=e,{useDeprecatedJS:B=!0}=e;return s.$$set=w=>{"classNames"in w&&n(0,a=w.classNames),"dataLabel"in w&&n(1,f=w.dataLabel),"dataUrl"in w&&n(2,i=w.dataUrl),"dataValue"in w&&n(3,r=w.dataValue),"href"in w&&n(4,_=w.href),"icon"in w&&n(5,p=w.icon),"iconClassNames"in w&&n(6,C=w.iconClassNames),"label"in w&&n(7,g=w.label),"noFollow"in w&&n(8,u=w.noFollow),"underline"in w&&n(9,c=w.underline),"onClick"in w&&n(10,m=w.onClick),"targetBlank"in w&&n(11,N=w.targetBlank),"useDeprecatedJS"in w&&n(12,B=w.useDeprecatedJS),"$$scope"in w&&n(14,t=w.$$scope)},[a,f,i,r,_,p,C,g,u,c,m,N,B,o,t,l]}class Ce extends Y{constructor(e){super();j(this,e,Ge,Fe,H,{classNames:0,dataLabel:1,dataUrl:2,dataValue:3,href:4,icon:5,iconClassNames:6,label:7,noFollow:8,underline:9,onClick:10,targetBlank:11,useDeprecatedJS:12})}}const{window:Te}=De,Ye=s=>({}),ae=s=>({slot:"button"});function oe(s,e,n){const l=s.slice();return l[11]=e[n].label,l[12]=e[n].value,l}const je=s=>({}),ie=s=>({slot:"menu"}),He=s=>({}),fe=s=>({slot:"button"});function re(s,e,n){const l=s.slice();return l[11]=e[n].label,l[12]=e[n].value,l}const Ke=s=>({}),ue=s=>({slot:"menu"});function Pe(s){let e,n;return{c(){e=A("img"),this.h()},l(l){e=V(l,"IMG",{alt:!0,class:!0,src:!0}),this.h()},h(){d(e,"alt","Open In Colab"),d(e,"class","!m-0"),ve(e.src,n="https://colab.research.google.com/assets/colab-badge.svg")||d(e,"src",n)},m(l,t){D(l,e,t)},d(l){l&&v(e)}}}function Qe(s){let e;const n=s[6].default,l=y(n,s,s[10],fe),t=l||Pe();return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&U(l,n,o,o[10],e?q(n,o[10],a,He):W(o[10]),fe)},i(o){e||(h(t,o),e=!0)},o(o){b(t,o),e=!1},d(o){t&&t.d(o)}}}function ce(s){let e,n;function l(){return s[7](s[12])}return e=new Ce({props:{classNames:"text-sm !no-underline",iconClassNames:"text-gray-500",label:s[11],onClick:l,useDeprecatedJS:!1}}),{c(){E(e.$$.fragment)},l(t){R(e.$$.fragment,t)},m(t,o){S(e,t,o),n=!0},p(t,o){s=t},i(t){n||(h(e.$$.fragment,t),n=!0)},o(t){b(e.$$.fragment,t),n=!1},d(t){I(e,t)}}}function Ze(s){let e,n,l=s[2],t=[];for(let a=0;a<l.length;a+=1)t[a]=ce(re(s,l,a));const o=a=>b(t[a],1,1,()=>{t[a]=null});return{c(){for(let a=0;a<t.length;a+=1)t[a].c();e=M()},l(a){for(let f=0;f<t.length;f+=1)t[f].l(a);e=M()},m(a,f){for(let i=0;i<t.length;i+=1)t[i].m(a,f);D(a,e,f),n=!0},p(a,f){if(f&4){l=a[2];let i;for(i=0;i<l.length;i+=1){const r=re(a,l,i);t[i]?(t[i].p(r,f),h(t[i],1)):(t[i]=ce(r),t[i].c(),h(t[i],1),t[i].m(e.parentNode,e))}for(L(),i=l.length;i<t.length;i+=1)o(i);J()}},i(a){if(!n){for(let f=0;f<l.length;f+=1)h(t[f]);n=!0}},o(a){t=t.filter(Boolean);for(let f=0;f<t.length;f+=1)b(t[f]);n=!1},d(a){Ne(t,a),a&&v(e)}}}function Xe(s){let e;const n=s[6].default,l=y(n,s,s[10],ue),t=l||Ze(s);return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&U(l,n,o,o[10],e?q(n,o[10],a,Ke):W(o[10]),ue)},i(o){e||(h(t,o),e=!0)},o(o){b(t,o),e=!1},d(o){t&&t.d(o)}}}function xe(s){let e,n;return{c(){e=A("img"),this.h()},l(l){e=V(l,"IMG",{alt:!0,class:!0,src:!0}),this.h()},h(){d(e,"alt","Open In Studio Lab"),d(e,"class","!m-0"),ve(e.src,n="https://studiolab.sagemaker.aws/studiolab.svg")||d(e,"src",n)},m(l,t){D(l,e,t)},d(l){l&&v(e)}}}function $e(s){let e;const n=s[6].default,l=y(n,s,s[10],ae),t=l||xe();return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&U(l,n,o,o[10],e?q(n,o[10],a,Ye):W(o[10]),ae)},i(o){e||(h(t,o),e=!0)},o(o){b(t,o),e=!1},d(o){t&&t.d(o)}}}function de(s){let e,n;function l(){return s[8](s[12])}return e=new Ce({props:{classNames:"text-sm !no-underline",iconClassNames:"text-gray-500",label:s[11],onClick:l,useDeprecatedJS:!1}}),{c(){E(e.$$.fragment)},l(t){R(e.$$.fragment,t)},m(t,o){S(e,t,o),n=!0},p(t,o){s=t},i(t){n||(h(e.$$.fragment,t),n=!0)},o(t){b(e.$$.fragment,t),n=!1},d(t){I(e,t)}}}function et(s){let e,n,l=s[3],t=[];for(let a=0;a<l.length;a+=1)t[a]=de(oe(s,l,a));const o=a=>b(t[a],1,1,()=>{t[a]=null});return{c(){for(let a=0;a<t.length;a+=1)t[a].c();e=M()},l(a){for(let f=0;f<t.length;f+=1)t[f].l(a);e=M()},m(a,f){for(let i=0;i<t.length;i+=1)t[i].m(a,f);D(a,e,f),n=!0},p(a,f){if(f&8){l=a[3];let i;for(i=0;i<l.length;i+=1){const r=oe(a,l,i);t[i]?(t[i].p(r,f),h(t[i],1)):(t[i]=de(r),t[i].c(),h(t[i],1),t[i].m(e.parentNode,e))}for(L(),i=l.length;i<t.length;i+=1)o(i);J()}},i(a){if(!n){for(let f=0;f<l.length;f+=1)h(t[f]);n=!0}},o(a){t=t.filter(Boolean);for(let f=0;f<t.length;f+=1)b(t[f]);n=!1},d(a){Ne(t,a),a&&v(e)}}}function tt(s){let e;const n=s[6].default,l=y(n,s,s[10],ie),t=l||et(s);return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&U(l,n,o,o[10],e?q(n,o[10],a,je):W(o[10]),ie)},i(o){e||(h(t,o),e=!0)},o(o){b(t,o),e=!1},d(o){t&&t.d(o)}}}function lt(s){let e,n,l,t,o,a,f,i;return n=new ne({props:{btnLabel:"",classNames:"colab-dropdown",noBtnClass:!0,useDeprecatedJS:!1,$$slots:{menu:[Xe],button:[Qe]},$$scope:{ctx:s}}}),t=new ne({props:{btnLabel:"",classNames:"colab-dropdown",noBtnClass:!0,useDeprecatedJS:!1,$$slots:{menu:[tt],button:[$e]},$$scope:{ctx:s}}}),{c(){e=A("div"),E(n.$$.fragment),l=G(),E(t.$$.fragment),this.h()},l(r){e=V(r,"DIV",{class:!0});var _=O(e);R(n.$$.fragment,_),l=T(_),R(t.$$.fragment,_),_.forEach(v),this.h()},h(){d(e,"class",o="flex space-x-1 "+s[0])},m(r,_){D(r,e,_),S(n,e,null),z(e,l),S(t,e,null),s[9](e),a=!0,f||(i=K(Te,"resize",s[4]),f=!0)},p(r,[_]){const p={};_&1024&&(p.$$scope={dirty:_,ctx:r}),n.$set(p);const C={};_&1024&&(C.$$scope={dirty:_,ctx:r}),t.$set(C),(!a||_&1&&o!==(o="flex space-x-1 "+r[0]))&&d(e,"class",o)},i(r){a||(h(n.$$.fragment,r),h(t.$$.fragment,r),a=!0)},o(r){b(n.$$.fragment,r),b(t.$$.fragment,r),a=!1},d(r){r&&v(e),I(n),I(t),s[9](null),f=!1,i()}}}function _e(s){window.open(s)}function nt(s,e,n){let{$$slots:l={},$$scope:t}=e,{options:o=[]}=e,{classNames:a=""}=e,f;const i=o.filter(u=>u.value.includes("colab.research.google.com")),r=o.filter(u=>u.value.includes("studiolab.sagemaker.aws"));function _(){const u=document.querySelector(".prose-doc h1"),c=document.querySelector(".prose-doc h1 > span");if(u&&c){const{width:m}=u.getBoundingClientRect(),{width:N}=c.getBoundingClientRect();let B=0;for(let F=0;F<f.children.length;F++)B+=f.children.item(F).clientWidth;const w=20;m-N<B+w?f.classList.remove("absolute"):f.classList.add("absolute")}}he(()=>{_()});const p=u=>_e(u),C=u=>_e(u);function g(u){P[u?"unshift":"push"](()=>{f=u,n(1,f)})}return s.$$set=u=>{"options"in u&&n(5,o=u.options),"classNames"in u&&n(0,a=u.classNames),"$$scope"in u&&n(10,t=u.$$scope)},[a,f,i,r,_,o,l,p,C,g,t]}class at extends Y{constructor(e){super();j(this,e,nt,lt,H,{options:5,classNames:0})}}export{at as D};
238
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js
function d(){}function Q(t,n){for(const e in n)t[e]=n[e];return t}function P(t){return t()}function C(){return Object.create(null)}function g(t){t.forEach(P)}function z(t){return typeof t=="function"}function U(t,n){return t!=t?n==n:t!==n||t&&typeof t=="object"||typeof t=="function"}let x;function ht(t,n){return x||(x=document.createElement("a")),x.href=n,t===x.href}function V(t){return Object.keys(t).length===0}function X(t,...n){if(t==null)return d;const e=t.subscribe(...n);return e.unsubscribe?()=>e.unsubscribe():e}function mt(t,n,e){t.$$.on_destroy.push(X(n,e))}function pt(t,n,e,i){if(t){const r=O(t,n,e,i);return t[0](r)}}function O(t,n,e,i){return t[1]&&i?Q(e.ctx.slice(),t[1](i(n))):e.ctx}function yt(t,n,e,i){if(t[2]&&i){const r=t[2](i(e));if(n.dirty===void 0)return r;if(typeof r=="object"){const u=[],s=Math.max(n.dirty.length,r.length);for(let o=0;o<s;o+=1)u[o]=n.dirty[o]|r[o];return u}return n.dirty|r}return n.dirty}function gt(t,n,e,i,r,u){if(r){const s=O(n,e,i,u);t.p(s,r)}}function bt(t){if(t.ctx.length>32){const n=[],e=t.ctx.length/32;for(let i=0;i<e;i++)n[i]=-1;return n}return-1}function xt(t){const n={};for(const e in t)n[e]=!0;return n}function wt(t,n,e){return t.set(e),n}function $t(t){return t&&z(t.destroy)?t.destroy:d}let A=!1;function Y(){A=!0}function Z(){A=!1}function tt(t,n,e,i){for(;t<n;){const r=t+(n-t>>1);e(r)<=i?t=r+1:n=r}return t}function nt(t){if(t.hydrate_init)return;t.hydrate_init=!0;let n=t.childNodes;if(t.nodeName==="HEAD"){const c=[];for(let l=0;l<n.length;l++){const f=n[l];f.claim_order!==void 0&&c.push(f)}n=c}const e=new Int32Array(n.length+1),i=new Int32Array(n.length);e[0]=-1;let r=0;for(let c=0;c<n.length;c++){const l=n[c].claim_order,f=(r>0&&n[e[r]].claim_order<=l?r+1:tt(1,r,b=>n[e[b]].claim_order,l))-1;i[c]=e[f]+1;const a=f+1;e[a]=c,r=Math.max(a,r)}const u=[],s=[];let o=n.length-1;for(let c=e[r]+1;c!=0;c=i[c-1]){for(u.push(n[c-1]);o>=c;o--)s.push(n[o]);o--}for(;o>=0;o--)s.push(n[o]);u.reverse(),s.sort((c,l)=>c.claim_order-l.claim_order);for(let c=0,l=0;c<s.length;c++){for(;l<u.length&&s[c].claim_order>=u[l].claim_order;)l++;const f=l<u.length?u[l]:null;t.insertBefore(s[c],f)}}function et(t,n){if(A){for(nt(t),(t.actual_end_child===void 0||t.actual_end_child!==null&&t.actual_end_child.parentElement!==t)&&(t.actual_end_child=t.firstChild);t.actual_end_child!==null&&t.actual_end_child.claim_order===void 0;)t.actual_end_child=t.actual_end_child.nextSibling;n!==t.actual_end_child?(n.claim_order!==void 0||n.parentNode!==t)&&t.insertBefore(n,t.actual_end_child):t.actual_end_child=n.nextSibling}else(n.parentNode!==t||n.nextSibling!==null)&&t.appendChild(n)}function it(t,n,e){t.insertBefore(n,e||null)}function rt(t,n,e){A&&!e?et(t,n):(n.parentNode!==t||n.nextSibling!=e)&&t.insertBefore(n,e||null)}function T(t){t.parentNode.removeChild(t)}function Et(t,n){for(let e=0;e<t.length;e+=1)t[e]&&t[e].d(n)}function G(t){return document.createElement(t)}function ct(t){return document.createElementNS("http://www.w3.org/2000/svg",t)}function v(t){return document.createTextNode(t)}function Tt(){return v(" ")}function At(){return v("")}function Nt(t,n,e,i){return t.addEventListener(n,e,i),()=>t.removeEventListener(n,e,i)}function St(t){return function(n){return n.preventDefault(),t.call(this,n)}}function kt(t){return function(n){return n.stopPropagation(),t.call(this,n)}}function jt(t,n,e){e==null?t.removeAttribute(n):t.getAttribute(n)!==e&&t.setAttribute(n,e)}function st(t){return Array.from(t.childNodes)}function F(t){t.claim_info===void 0&&(t.claim_info={last_index:0,total_claimed:0})}function I(t,n,e,i,r=!1){F(t);const u=(()=>{for(let s=t.claim_info.last_index;s<t.length;s++){const o=t[s];if(n(o)){const c=e(o);return c===void 0?t.splice(s,1):t[s]=c,r||(t.claim_info.last_index=s),o}}for(let s=t.claim_info.last_index-1;s>=0;s--){const o=t[s];if(n(o)){const c=e(o);return c===void 0?t.splice(s,1):t[s]=c,r?c===void 0&&t.claim_info.last_index--:t.claim_info.last_index=s,o}}return i()})();return u.claim_order=t.claim_info.total_claimed,t.claim_info.total_claimed+=1,u}function R(t,n,e,i){return I(t,r=>r.nodeName===n,r=>{const u=[];for(let s=0;s<r.attributes.length;s++){const o=r.attributes[s];e[o.name]||u.push(o.name)}u.forEach(s=>r.removeAttribute(s))},()=>i(n))}function vt(t,n,e){return R(t,n,e,G)}function Mt(t,n,e){return R(t,n,e,ct)}function ot(t,n){return I(t,e=>e.nodeType===3,e=>{const i=""+n;if(e.data.startsWith(i)){if(e.data.length!==i.length)return e.splitText(i.length)}else e.data=i},()=>v(n),!0)}function qt(t){return ot(t," ")}function H(t,n,e){for(let i=e;i<t.length;i+=1){const r=t[i];if(r.nodeType===8&&r.textContent.trim()===n)return i}return t.length}function Ct(t){const n=H(t,"HTML_TAG_START",0),e=H(t,"HTML_TAG_END",n);if(n===e)return new L;F(t);const i=t.splice(n,e-n+1);T(i[0]),T(i[i.length-1]);const r=i.slice(1,i.length-1);for(const u of r)u.claim_order=t.claim_info.total_claimed,t.claim_info.total_claimed+=1;return new L(r)}function Ht(t,n){n=""+n,t.wholeText!==n&&(t.data=n)}function Lt(t,n,e,i){e===null?t.style.removeProperty(n):t.style.setProperty(n,e,i?"important":"")}function Bt(t,n=document.body){return Array.from(n.querySelectorAll(t))}class lt{constructor(){this.e=this.n=null}c(n){this.h(n)}m(n,e,i=null){this.e||(this.e=G(e.nodeName),this.t=e,this.c(n)),this.i(i)}h(n){this.e.innerHTML=n,this.n=Array.from(this.e.childNodes)}i(n){for(let e=0;e<this.n.length;e+=1)it(this.t,this.n[e],n)}p(n){this.d(),this.h(n),this.i(this.a)}d(){this.n.forEach(T)}}class L extends lt{constructor(n){super();this.e=this.n=null,this.l=n}c(n){this.l?this.n=this.l:super.c(n)}i(n){for(let e=0;e<this.n.length;e+=1)rt(this.t,this.n[e],n)}}let y;function p(t){y=t}function N(){if(!y)throw new Error("Function called outside component initialization");return y}function Dt(t){N().$$.on_mount.push(t)}function Pt(t){N().$$.after_update.push(t)}function zt(t){N().$$.on_destroy.push(t)}function Ot(t,n){N().$$.context.set(t,n)}const m=[],B=[],$=[],D=[],W=Promise.resolve();let k=!1;function J(){k||(k=!0,W.then(K))}function Gt(){return J(),W}function j(t){$.push(t)}const S=new Set;let w=0;function K(){const t=y;do{for(;w<m.length;){const n=m[w];w++,p(n),ut(n.$$)}for(p(null),m.length=0,w=0;B.length;)B.pop()();for(let n=0;n<$.length;n+=1){const e=$[n];S.has(e)||(S.add(e),e())}$.length=0}while(m.length);for(;D.length;)D.pop()();k=!1,S.clear(),p(t)}function ut(t){if(t.fragment!==null){t.update(),g(t.before_update);const n=t.dirty;t.dirty=[-1],t.fragment&&t.fragment.p(t.ctx,n),t.after_update.forEach(j)}}const E=new Set;let _;function Ft(){_={r:0,c:[],p:_}}function It(){_.r||g(_.c),_=_.p}function at(t,n){t&&t.i&&(E.delete(t),t.i(n))}function Rt(t,n,e,i){if(t&&t.o){if(E.has(t))return;E.add(t),_.c.push(()=>{E.delete(t),i&&(e&&t.d(1),i())}),t.o(n)}}const Wt=typeof window!="undefined"?window:typeof globalThis!="undefined"?globalThis:global;function Jt(t,n){const e={},i={},r={$$scope:1};let u=t.length;for(;u--;){const s=t[u],o=n[u];if(o){for(const c in s)c in o||(i[c]=1);for(const c in o)r[c]||(e[c]=o[c],r[c]=1);t[u]=o}else for(const c in s)r[c]=1}for(const s in i)s in e||(e[s]=void 0);return e}function Kt(t){return typeof t=="object"&&t!==null?t:{}}function Qt(t){t&&t.c()}function Ut(t,n){t&&t.l(n)}function ft(t,n,e,i){const{fragment:r,on_mount:u,on_destroy:s,after_update:o}=t.$$;r&&r.m(n,e),i||j(()=>{const c=u.map(P).filter(z);s?s.push(...c):g(c),t.$$.on_mount=[]}),o.forEach(j)}function _t(t,n){const e=t.$$;e.fragment!==null&&(g(e.on_destroy),e.fragment&&e.fragment.d(n),e.on_destroy=e.fragment=null,e.ctx=[])}function dt(t,n){t.$$.dirty[0]===-1&&(m.push(t),J(),t.$$.dirty.fill(0)),t.$$.dirty[n/31|0]|=1<<n%31}function Vt(t,n,e,i,r,u,s,o=[-1]){const c=y;p(t);const l=t.$$={fragment:null,ctx:null,props:u,update:d,not_equal:r,bound:C(),on_mount:[],on_destroy:[],on_disconnect:[],before_update:[],after_update:[],context:new Map(n.context||(c?c.$$.context:[])),callbacks:C(),dirty:o,skip_bound:!1,root:n.target||c.$$.root};s&&s(l.root);let f=!1;if(l.ctx=e?e(t,n.props||{},(a,b,...M)=>{const q=M.length?M[0]:b;return l.ctx&&r(l.ctx[a],l.ctx[a]=q)&&(!l.skip_bound&&l.bound[a]&&l.bound[a](q),f&&dt(t,a)),b}):[],l.update(),f=!0,g(l.before_update),l.fragment=i?i(l.ctx):!1,n.target){if(n.hydrate){Y();const a=st(n.target);l.fragment&&l.fragment.l(a),a.forEach(T)}else l.fragment&&l.fragment.c();n.intro&&at(t.$$.fragment),ft(t,n.target,n.anchor,n.customElement),Z(),K()}p(c)}class Xt{$destroy(){_t(this,1),this.$destroy=d}$on(n,e){const i=this.$$.callbacks[n]||(this.$$.callbacks[n]=[]);return i.push(e),()=>{const r=i.indexOf(e);r!==-1&&i.splice(r,1)}}$set(n){this.$$set&&!V(n)&&(this.$$.skip_bound=!0,this.$$set(n),this.$$.skip_bound=!1)}}const h=[];function Yt(t,n=d){let e;const i=new Set;function r(o){if(U(t,o)&&(t=o,e)){const c=!h.length;for(const l of i)l[1](),h.push(l,t);if(c){for(let l=0;l<h.length;l+=2)h[l][0](h[l+1]);h.length=0}}}function u(o){r(o(t))}function s(o,c=d){const l=[o,c];return i.add(l),i.size===1&&(e=n(r)||d),o(t),()=>{i.delete(l),i.size===0&&(e(),e=null)}}return{set:r,update:u,subscribe:s}}export{Wt as $,Kt as A,_t as B,Q as C,Yt as D,Gt as E,et as F,Et as G,pt as H,gt as I,bt as J,yt as K,d as L,Bt as M,ht as N,ct as O,Mt as P,Nt as Q,g as R,Xt as S,zt as T,mt as U,wt as V,L as W,Ct as X,z as Y,B as Z,xt as _,st as a,$t as a0,kt as a1,St as a2,jt as b,vt as c,T as d,G as e,Lt as f,rt as g,ot as h,Vt as i,Ht as j,Tt as k,At as l,qt as m,Ft as n,Rt as o,It as p,at as q,Ot as r,U as s,v as t,Pt as u,Dt as v,Qt as w,Ut as x,ft as y,Jt as z};
239
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js
import{S as f,i as d,s as u,H as _,e as g,c,a as m,d as l,b,g as h,I as p,J as k,K as w,q as v,o as y}from"./vendor-4833417e.js";function q(t){let a,s;const o=t[3].default,r=_(o,t,t[2],null);return{c(){a=g("div"),r&&r.c(),this.h()},l(e){a=c(e,"DIV",{class:!0});var n=m(a);r&&r.l(n),n.forEach(l),this.h()},h(){b(a,"class","course-tip "+(t[0]==="orange"?"course-tip-orange":"")+" bg-gradient-to-br dark:bg-gradient-to-r before:border-"+t[0]+"-500 dark:before:border-"+t[0]+"-800 from-"+t[0]+"-50 dark:from-gray-900 to-white dark:to-gray-950 border border-"+t[0]+"-50 text-"+t[0]+"-700 dark:text-gray-400")},m(e,n){h(e,a,n),r&&r.m(a,null),s=!0},p(e,[n]){r&&r.p&&(!s||n&4)&&p(r,o,e,e[2],s?w(o,e[2],n,null):k(e[2]),null)},i(e){s||(v(r,e),s=!0)},o(e){y(r,e),s=!1},d(e){e&&l(a),r&&r.d(e)}}}function I(t,a,s){let{$$slots:o={},$$scope:r}=a,{warning:e=!1}=a;const n=e?"orange":"green";return t.$$set=i=>{"warning"in i&&s(1,e=i.warning),"$$scope"in i&&s(2,r=i.$$scope)},[n,e,r,o]}class T extends f{constructor(a){super();d(this,a,I,q,u,{warning:1})}}export{T};
240
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/custom_datasets.mdx-a1b148bf.js
import{S as dm,i as cm,s as mm,e as l,k as f,w as d,t as n,M as _m,c as i,d as t,m as h,a as p,x as c,h as o,b as u,F as s,g as r,y as m,q as _,o as w,B as g}from"../chunks/vendor-4833417e.js";import{T as Su}from"../chunks/Tip-fffd6df1.js";import{I as z}from"../chunks/IconCopyLink-4b81c553.js";import{C as b}from"../chunks/CodeBlock-6a3d1b46.js";import{D as wm}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function gm(V){let $,q,k,y,E,v,P,T;return{c(){$=l("p"),q=n(`For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding `),k=l("a"),y=n("PyTorch notebook"),E=n(` or `),v=l("a"),P=n("TensorFlow notebook"),T=n("."),this.h()},l(x){$=i(x,"P",{});var j=p($);q=o(j,`For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding `),k=i(j,"A",{href:!0,rel:!0});var A=p(k);y=o(A,"PyTorch notebook"),A.forEach(t),E=o(j,` or `),v=i(j,"A",{href:!0,rel:!0});var C=p(v);P=o(C,"TensorFlow notebook"),C.forEach(t),T=o(j,"."),j.forEach(t),this.h()},h(){u(k,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb"),u(k,"rel","nofollow"),u(v,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb"),u(v,"rel","nofollow")},m(x,j){r(x,$,j),s($,q),s($,k),s(k,y),s($,E),s($,v),s(v,P),s($,T)},d(x){x&&t($)}}}function bm(V){let $,q,k,y,E,v,P,T;return{c(){$=l("p"),q=n(`For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding `),k=l("a"),y=n("PyTorch notebook"),E=n(` or `),v=l("a"),P=n("TensorFlow notebook"),T=n("."),this.h()},l(x){$=i(x,"P",{});var j=p($);q=o(j,`For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding `),k=i(j,"A",{href:!0,rel:!0});var A=p(k);y=o(A,"PyTorch notebook"),A.forEach(t),E=o(j,` or `),v=i(j,"A",{href:!0,rel:!0});var C=p(v);P=o(C,"TensorFlow notebook"),C.forEach(t),T=o(j,"."),j.forEach(t),this.h()},h(){u(k,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb"),u(k,"rel","nofollow"),u(v,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb"),u(v,"rel","nofollow")},m(x,j){r(x,$,j),s($,q),s($,k),s(k,y),s($,E),s($,v),s(v,P),s($,T)},d(x){x&&t($)}}}function $m(V){let $,q,k,y,E,v,P,T;return{c(){$=l("p"),q=n(`For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding `),k=l("a"),y=n("PyTorch notebook"),E=n(` or `),v=l("a"),P=n("TensorFlow notebook"),T=n("."),this.h()},l(x){$=i(x,"P",{});var j=p($);q=o(j,`For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding `),k=i(j,"A",{href:!0,rel:!0});var A=p(k);y=o(A,"PyTorch notebook"),A.forEach(t),E=o(j,` or `),v=i(j,"A",{href:!0,rel:!0});var C=p(v);P=o(C,"TensorFlow notebook"),C.forEach(t),T=o(j,"."),j.forEach(t),this.h()},h(){u(k,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb"),u(k,"rel","nofollow"),u(v,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb"),u(v,"rel","nofollow")},m(x,j){r(x,$,j),s($,q),s($,k),s(k,y),s($,E),s($,v),s(v,P),s($,T)},d(x){x&&t($)}}}function km(V){let $,q,k,y,E,v,P,T,x,j,A,C,qs,gi,to,ue,bi,st,$i,ki,so,at,ao,Es,vi,no,I,Da,Ts,ji,yi,Sa,As,xi,qi,Na,zs,Ei,oo,Ps,ro,J,de,Ia,nt,Ti,Ma,Ai,lo,ce,zi,ot,Pi,Fi,io,me,po,K,_e,La,rt,Ci,Oa,Di,fo,Fs,Si,ho,lt,uo,we,Ni,Ba,Ii,Mi,co,it,mo,X,ge,Wa,pt,Li,Ha,Oi,_o,M,Bi,Cs,Wi,Hi,ft,Qi,Ri,wo,ht,go,Ds,Ui,bo,ut,$o,L,Yi,Qa,Gi,Vi,Ra,Ji,Ki,ko,dt,vo,F,Xi,Ua,Zi,ep,Ya,tp,sp,Ga,ap,np,Va,op,rp,jo,ct,yo,Z,be,Ja,mt,lp,Ka,ip,xo,$e,pp,Ss,fp,hp,qo,_t,Eo,Ns,up,To,O,wt,dp,Is,cp,mp,_p,gt,wp,Ms,gp,bp,$p,bt,kp,Xa,vp,jp,Ao,$t,zo,ee,ke,Za,kt,yp,en,xp,Po,Ls,qp,Fo,D,Ep,Os,Tp,Ap,tn,zp,Pp,sn,Fp,Cp,Co,vt,Do,S,Dp,an,Sp,Np,nn,Ip,Mp,on,Lp,Op,So,jt,No,Bs,Bp,Io,yt,Mo,ve,Wp,Ws,Hp,Qp,Lo,xt,Oo,Hs,Rp,Bo,qt,Wo,je,Up,rn,Yp,Gp,Ho,Et,Qo,Qs,Ro,te,ye,ln,Tt,Vp,pn,Jp,Uo,xe,Kp,At,Xp,Zp,Yo,qe,Go,se,Ee,fn,zt,ef,hn,tf,Vo,Rs,sf,Jo,Pt,Ko,Us,af,Xo,Ft,Zo,Ys,nf,er,Ct,tr,Gs,of,sr,B,Vs,un,rf,lf,pf,W,dn,ff,hf,cn,uf,df,mn,cf,mf,_f,Js,_n,wf,gf,ar,ae,Te,wn,Dt,bf,gn,$f,nr,Ae,kf,Ks,vf,jf,or,St,rr,ze,yf,bn,xf,qf,lr,Nt,ir,H,Ef,$n,Tf,Af,kn,zf,Pf,pr,Q,It,Ff,vn,Cf,Df,Sf,ne,Nf,jn,If,Mf,yn,Lf,Of,Bf,Mt,Wf,xn,Hf,Qf,fr,Xs,Rf,hr,Lt,ur,Pe,Uf,qn,Yf,Gf,dr,Ot,cr,Zs,Vf,mr,Bt,_r,oe,Fe,En,Wt,Jf,Tn,Kf,wr,Ce,Xf,ea,Zf,eh,gr,Ht,br,De,th,ta,sh,ah,$r,Qt,kr,Se,nh,sa,oh,rh,vr,Rt,jr,aa,lh,yr,Ut,xr,re,Ne,An,Yt,ih,zn,ph,qr,na,fh,Er,Gt,Tr,R,hh,Pn,uh,dh,Fn,ch,mh,Ar,Vt,zr,Ie,_h,oa,wh,gh,Pr,Jt,Fr,ra,bh,Cr,Kt,Dr,la,$h,Sr,Xt,Nr,Me,kh,Cn,vh,jh,Ir,Zt,Mr,ia,Lr,le,Le,Dn,es,yh,Sn,xh,Or,Oe,qh,ts,Eh,Th,Br,Be,Wr,ie,We,Nn,ss,Ah,In,zh,Hr,pa,Ph,Qr,as,Rr,fa,Fh,Ur,ns,Yr,pe,He,Mn,os,Ch,Ln,Dh,Gr,Qe,Sh,ha,Nh,Ih,Vr,rs,Jr,ua,Mh,Kr,U,N,Lh,On,Oh,Bh,Bn,Wh,Hh,Wn,Qh,Rh,Uh,ls,Yh,Hn,Gh,Vh,Jh,is,Kh,Qn,Xh,Zh,Xr,da,eu,Zr,ps,el,Re,tu,Rn,su,au,tl,fs,sl,ca,nu,al,hs,nl,fe,Ue,Un,us,ou,Yn,ru,ol,Ye,lu,ma,iu,pu,rl,ds,ll,Ge,fu,_a,hu,uu,il,cs,pl,Ve,du,wa,cu,mu,fl,ms,hl,ga,_u,ul,_s,dl,he,Je,Gn,ws,wu,Vn,gu,cl,ba,bu,ml,gs,_l,Y,$u,Jn,ku,vu,Kn,ju,yu,wl,bs,gl,$a,xu,bl,$s,$l,Ke,qu,ka,Eu,Tu,kl,ks,vl,va,Au,jl,vs,yl,Xe,zu,Xn,Pu,Fu,xl,js,ql;return v=new z({}),A=new wm({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/custom_datasets.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/custom_datasets.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/custom_datasets.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/custom_datasets.ipynb"}]}}),at=new b({props:{code:"pip install datasets",highlighted:"pip install datasets"}}),nt=new z({}),me=new Su({props:{$$slots:{default:[gm]},$$scope:{ctx:V}}}),rt=new z({}),lt=new b({props:{code:`from datasets import load_dataset imdb = load_dataset("imdb")`,highlighted:`<span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset imdb = load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>)`}}),it=new b({props:{code:`imdb["train"][0] { "label": 1, "text": "Bromwell High is a cartoon comedy. It ran at the same time as some other programs about school life, such as \\"Teachers\\". My 35 years in the teaching profession lead me to believe that Bromwell High's satire is much closer to reality than is \\"Teachers\\". The scramble to survive financially, the insightful students who can see right through their pathetic teachers' pomp, the pettiness of the whole situation, all remind me of the schools I knew and their students. When I saw the episode in which a student repeatedly tried to burn down the school, I immediately recalled ......... at .......... High. A classic line: INSPECTOR: I'm here to sack one of your teachers. STUDENT: Welcome to Bromwell High. I expect that many adults of my age think that Bromwell High is far fetched. What a pity that it isn't!", }`,highlighted:`imdb[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] { <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;text&quot;</span>: <span class="hljs-string">&quot;Bromwell High is a cartoon comedy. It ran at the same time as some other programs about school life, such as \\&quot;Teachers\\&quot;. My 35 years in the teaching profession lead me to believe that Bromwell High&#x27;s satire is much closer to reality than is \\&quot;Teachers\\&quot;. The scramble to survive financially, the insightful students who can see right through their pathetic teachers&#x27; pomp, the pettiness of the whole situation, all remind me of the schools I knew and their students. When I saw the episode in which a student repeatedly tried to burn down the school, I immediately recalled ......... at .......... High. A classic line: INSPECTOR: I&#x27;m here to sack one of your teachers. STUDENT: Welcome to Bromwell High. I expect that many adults of my age think that Bromwell High is far fetched. What a pity that it isn&#x27;t!&quot;</span>, }`}}),pt=new z({}),ht=new b({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),ut=new b({props:{code:`def preprocess_function(examples): return tokenizer(examples["text"], truncation=True)`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], truncation=<span class="hljs-literal">True</span>)`}}),dt=new b({props:{code:"tokenized_imdb = imdb.map(preprocess_function, batched=True)",highlighted:'tokenized_imdb = imdb.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),ct=new b({props:{code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`}}),mt=new z({}),_t=new b({props:{code:`from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),$t=new b({props:{code:`from transformers import TrainingArguments, Trainer training_args = TrainingArguments( output_dir="./results", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_imdb["train"], eval_dataset=tokenized_imdb["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments, Trainer training_args = TrainingArguments( output_dir=<span class="hljs-string">&quot;./results&quot;</span>, learning_rate=<span class="hljs-number">2e-5</span>, per_device_train_batch_size=<span class="hljs-number">16</span>, per_device_eval_batch_size=<span class="hljs-number">16</span>, num_train_epochs=<span class="hljs-number">5</span>, weight_decay=<span class="hljs-number">0.01</span>, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], eval_dataset=tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`}}),kt=new z({}),vt=new b({props:{code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),jt=new b({props:{code:`tf_train_dataset = tokenized_imdb["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "label"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_dataset = tokenized_imdb["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "label"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`tf_train_dataset = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], shuffle=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">16</span>, collate_fn=data_collator, ) tf_validation_dataset = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], shuffle=<span class="hljs-literal">False</span>, batch_size=<span class="hljs-number">16</span>, collate_fn=data_collator, )`}}),yt=new b({props:{code:`from transformers import create_optimizer import tensorflow as tf batch_size = 16 num_epochs = 5 batches_per_epoch = len(tokenized_imdb["train"]) // batch_size total_train_steps = int(batches_per_epoch * num_epochs) optimizer, schedule = create_optimizer(init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf batch_size = <span class="hljs-number">16</span> num_epochs = <span class="hljs-number">5</span> batches_per_epoch = <span class="hljs-built_in">len</span>(tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size total_train_steps = <span class="hljs-built_in">int</span>(batches_per_epoch * num_epochs) optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)`}}),xt=new b({props:{code:`from transformers import TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),qt=new b({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),Et=new b({props:{code:`model.fit( tf_train_set, validation_data=tf_validation_set, epochs=num_train_epochs, )`,highlighted:`model.fit( tf_train_set, validation_data=tf_validation_set, epochs=num_train_epochs, )`}}),Tt=new z({}),qe=new Su({props:{$$slots:{default:[bm]},$$scope:{ctx:V}}}),zt=new z({}),Pt=new b({props:{code:`from datasets import load_dataset wnut = load_dataset("wnut_17")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>wnut = load_dataset(<span class="hljs-string">&quot;wnut_17&quot;</span>)`}}),Ft=new b({props:{code:'wnut["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>wnut[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;0&#x27;</span>, <span class="hljs-string">&#x27;ner_tags&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;tokens&#x27;</span>: [<span class="hljs-string">&#x27;@paulwalk&#x27;</span>, <span class="hljs-string">&#x27;It&#x27;</span>, <span class="hljs-string">&quot;&#x27;s&quot;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;I&#x27;</span>, <span class="hljs-string">&quot;&#x27;m&quot;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Empire&#x27;</span>, <span class="hljs-string">&#x27;State&#x27;</span>, <span class="hljs-string">&#x27;Building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;ESB&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>] }`}}),Ct=new b({props:{code:`label_list = wnut["train"].features[f"ner_tags"].feature.names label_list`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>label_list = wnut[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">f&quot;ner_tags&quot;</span>].feature.names <span class="hljs-meta">&gt;&gt;&gt; </span>label_list [ <span class="hljs-string">&quot;O&quot;</span>, <span class="hljs-string">&quot;B-corporation&quot;</span>, <span class="hljs-string">&quot;I-corporation&quot;</span>, <span class="hljs-string">&quot;B-creative-work&quot;</span>, <span class="hljs-string">&quot;I-creative-work&quot;</span>, <span class="hljs-string">&quot;B-group&quot;</span>, <span class="hljs-string">&quot;I-group&quot;</span>, <span class="hljs-string">&quot;B-location&quot;</span>, <span class="hljs-string">&quot;I-location&quot;</span>, <span class="hljs-string">&quot;B-person&quot;</span>, <span class="hljs-string">&quot;I-person&quot;</span>, <span class="hljs-string">&quot;B-product&quot;</span>, <span class="hljs-string">&quot;I-product&quot;</span>, ]`}}),Dt=new z({}),St=new b({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Nt=new b({props:{code:`tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) tokens`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_input = tokenizer(example[<span class="hljs-string">&quot;tokens&quot;</span>], is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = tokenizer.convert_ids_to_tokens(tokenized_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens [<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;@&#x27;</span>, <span class="hljs-string">&#x27;paul&#x27;</span>, <span class="hljs-string">&#x27;##walk&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;s&#x27;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;i&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;m&#x27;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;empire&#x27;</span>, <span class="hljs-string">&#x27;state&#x27;</span>, <span class="hljs-string">&#x27;building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;##b&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;[SEP]&#x27;</span>]`}}),Lt=new b({props:{code:`def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) labels = [] for i, label in enumerate(examples[f"ner_tags"]): word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. previous_word_idx = None label_ids = [] for word_idx in word_ids: # Set the special tokens to -100. if word_idx is None: label_ids.append(-100) elif word_idx != previous_word_idx: # Only label the first token of a given word. label_ids.append(label[word_idx]) else: label_ids.append(-100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_and_align_labels</span>(<span class="hljs-params">examples</span>): tokenized_inputs = tokenizer(examples[<span class="hljs-string">&quot;tokens&quot;</span>], truncation=<span class="hljs-literal">True</span>, is_split_into_words=<span class="hljs-literal">True</span>) labels = [] <span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(examples[<span class="hljs-string">f&quot;ner_tags&quot;</span>]): word_ids = tokenized_inputs.word_ids(batch_index=i) <span class="hljs-comment"># Map tokens to their respective word.</span> previous_word_idx = <span class="hljs-literal">None</span> label_ids = [] <span class="hljs-keyword">for</span> word_idx <span class="hljs-keyword">in</span> word_ids: <span class="hljs-comment"># Set the special tokens to -100.</span> <span class="hljs-keyword">if</span> word_idx <span class="hljs-keyword">is</span> <span class="hljs-literal">None</span>: label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-keyword">elif</span> word_idx != previous_word_idx: <span class="hljs-comment"># Only label the first token of a given word.</span> label_ids.append(label[word_idx]) <span class="hljs-keyword">else</span>: label_ids.append(-<span class="hljs-number">100</span>) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-keyword">return</span> tokenized_inputs`}}),Ot=new b({props:{code:"tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)",highlighted:'tokenized_wnut = wnut.<span class="hljs-built_in">map</span>(tokenize_and_align_labels, batched=<span class="hljs-literal">True</span>)'}}),Bt=new b({props:{code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer)`}}),Wt=new z({}),Ht=new b({props:{code:`from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=len(label_list))`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, TrainingArguments, Trainer model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-built_in">len</span>(label_list))`}}),Qt=new b({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, )`,highlighted:`training_args = TrainingArguments( output_dir=<span class="hljs-string">&quot;./results&quot;</span>, evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, learning_rate=<span class="hljs-number">2e-5</span>, per_device_train_batch_size=<span class="hljs-number">16</span>, per_device_eval_batch_size=<span class="hljs-number">16</span>, num_train_epochs=<span class="hljs-number">3</span>, weight_decay=<span class="hljs-number">0.01</span>, )`}}),Rt=new b({props:{code:`trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_wnut["train"], eval_dataset=tokenized_wnut["test"], data_collator=data_collator, tokenizer=tokenizer, )`,highlighted:`trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], eval_dataset=tokenized_wnut[<span class="hljs-string">&quot;test&quot;</span>], data_collator=data_collator, tokenizer=tokenizer, )`}}),Ut=new b({props:{code:"trainer.train()",highlighted:"trainer.train()"}}),Yt=new z({}),Gt=new b({props:{code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),Vt=new b({props:{code:`tf_train_set = tokenized_wnut["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = tokenized_wnut["validation"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`tf_train_set = tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], shuffle=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">16</span>, collate_fn=data_collator, ) tf_validation_set = tokenized_wnut[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], shuffle=<span class="hljs-literal">False</span>, batch_size=<span class="hljs-number">16</span>, collate_fn=data_collator, )`}}),Jt=new b({props:{code:`from transformers import TFAutoModelForTokenClassification model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=len(label_list))`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-built_in">len</span>(label_list))`}}),Kt=new b({props:{code:`from transformers import create_optimizer batch_size = 16 num_train_epochs = 3 num_train_steps = (len(tokenized_datasets["train"]) // batch_size) * num_train_epochs optimizer, lr_schedule = create_optimizer( init_lr=2e-5, num_train_steps=num_train_steps, weight_decay_rate=0.01, num_warmup_steps=0, )`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer batch_size = <span class="hljs-number">16</span> num_train_epochs = <span class="hljs-number">3</span> num_train_steps = (<span class="hljs-built_in">len</span>(tokenized_datasets[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs optimizer, lr_schedule = create_optimizer( init_lr=<span class="hljs-number">2e-5</span>, num_train_steps=num_train_steps, weight_decay_rate=<span class="hljs-number">0.01</span>, num_warmup_steps=<span class="hljs-number">0</span>, )`}}),Xt=new b({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),Zt=new b({props:{code:`model.fit( tf_train_set, validation_data=tf_validation_set, epochs=num_train_epochs, )`,highlighted:`model.fit( tf_train_set, validation_data=tf_validation_set, epochs=num_train_epochs, )`}}),es=new z({}),Be=new Su({props:{$$slots:{default:[$m]},$$scope:{ctx:V}}}),ss=new z({}),as=new b({props:{code:`from datasets import load_dataset squad = load_dataset("squad")`,highlighted:`<span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset squad = load_dataset(<span class="hljs-string">&quot;squad&quot;</span>)`}}),ns=new b({props:{code:'squad["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>squad[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;answer_start&#x27;</span>: [<span class="hljs-number">515</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&#x27;Saint Bernadette Soubirous&#x27;</span>]}, <span class="hljs-string">&#x27;context&#x27;</span>: <span class="hljs-string">&#x27;Architecturally, the school has a Catholic character. Atop the Main Building\\&#x27;s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend &quot;Venite Ad Me Omnes&quot;. Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;5733be284776f41900661182&#x27;</span>, <span class="hljs-string">&#x27;question&#x27;</span>: <span class="hljs-string">&#x27;To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;University_of_Notre_Dame&#x27;</span> }`}}),os=new z({}),rs=new b({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),ps=new b({props:{code:`def preprocess_function(examples): questions = [q.strip() for q in examples["question"]] inputs = tokenizer( questions, examples["context"], max_length=384, truncation="only_second", return_offsets_mapping=True, padding="max_length", ) offset_mapping = inputs.pop("offset_mapping") answers = examples["answers"] start_positions = [] end_positions = [] for i, offset in enumerate(offset_mapping): answer = answers[i] start_char = answer["answer_start"][0] end_char = answer["answer_start"][0] + len(answer["text"][0]) sequence_ids = inputs.sequence_ids(i) # Find the start and end of the context idx = 0 while sequence_ids[idx] != 1: idx += 1 context_start = idx while sequence_ids[idx] == 1: idx += 1 context_end = idx - 1 # If the answer is not fully inside the context, label it (0, 0) if offset[context_start][0] > end_char or offset[context_end][1] < start_char: start_positions.append(0) end_positions.append(0) else: # Otherwise it's the start and end token positions idx = context_start while idx <= context_end and offset[idx][0] <= start_char: idx += 1 start_positions.append(idx - 1) idx = context_end while idx >= context_start and offset[idx][1] >= end_char: idx -= 1 end_positions.append(idx + 1) inputs["start_positions"] = start_positions inputs["end_positions"] = end_positions return inputs`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): questions = [q.strip() <span class="hljs-keyword">for</span> q <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;question&quot;</span>]] inputs = tokenizer( questions, examples[<span class="hljs-string">&quot;context&quot;</span>], max_length=<span class="hljs-number">384</span>, truncation=<span class="hljs-string">&quot;only_second&quot;</span>, return_offsets_mapping=<span class="hljs-literal">True</span>, padding=<span class="hljs-string">&quot;max_length&quot;</span>, ) offset_mapping = inputs.pop(<span class="hljs-string">&quot;offset_mapping&quot;</span>) answers = examples[<span class="hljs-string">&quot;answers&quot;</span>] start_positions = [] end_positions = [] <span class="hljs-keyword">for</span> i, offset <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(offset_mapping): answer = answers[i] start_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] end_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] + <span class="hljs-built_in">len</span>(answer[<span class="hljs-string">&quot;text&quot;</span>][<span class="hljs-number">0</span>]) sequence_ids = inputs.sequence_ids(i) <span class="hljs-comment"># Find the start and end of the context</span> idx = <span class="hljs-number">0</span> <span class="hljs-keyword">while</span> sequence_ids[idx] != <span class="hljs-number">1</span>: idx += <span class="hljs-number">1</span> context_start = idx <span class="hljs-keyword">while</span> sequence_ids[idx] == <span class="hljs-number">1</span>: idx += <span class="hljs-number">1</span> context_end = idx - <span class="hljs-number">1</span> <span class="hljs-comment"># If the answer is not fully inside the context, label it (0, 0)</span> <span class="hljs-keyword">if</span> offset[context_start][<span class="hljs-number">0</span>] &gt; end_char <span class="hljs-keyword">or</span> offset[context_end][<span class="hljs-number">1</span>] &lt; start_char: start_positions.append(<span class="hljs-number">0</span>) end_positions.append(<span class="hljs-number">0</span>) <span class="hljs-keyword">else</span>: <span class="hljs-comment"># Otherwise it&#x27;s the start and end token positions</span> idx = context_start <span class="hljs-keyword">while</span> idx &lt;= context_end <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">0</span>] &lt;= start_char: idx += <span class="hljs-number">1</span> start_positions.append(idx - <span class="hljs-number">1</span>) idx = context_end <span class="hljs-keyword">while</span> idx &gt;= context_start <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">1</span>] &gt;= end_char: idx -= <span class="hljs-number">1</span> end_positions.append(idx + <span class="hljs-number">1</span>) inputs[<span class="hljs-string">&quot;start_positions&quot;</span>] = start_positions inputs[<span class="hljs-string">&quot;end_positions&quot;</span>] = end_positions <span class="hljs-keyword">return</span> inputs`}}),fs=new b({props:{code:'tokenized_squad = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names)',highlighted:'tokenized_squad = squad.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>, remove_columns=squad[<span class="hljs-string">&quot;train&quot;</span>].column_names)'}}),hs=new b({props:{code:`from transformers import default_data_collator data_collator = default_data_collator`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> default_data_collator data_collator = default_data_collator`}}),us=new z({}),ds=new b({props:{code:`from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForQuestionAnswering, TrainingArguments, Trainer model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),cs=new b({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, )`,highlighted:`training_args = TrainingArguments( output_dir=<span class="hljs-string">&quot;./results&quot;</span>, evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, learning_rate=<span class="hljs-number">2e-5</span>, per_device_train_batch_size=<span class="hljs-number">16</span>, per_device_eval_batch_size=<span class="hljs-number">16</span>, num_train_epochs=<span class="hljs-number">3</span>, weight_decay=<span class="hljs-number">0.01</span>, )`}}),ms=new b({props:{code:`trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_squad["train"], eval_dataset=tokenized_squad["validation"], data_collator=data_collator, tokenizer=tokenizer, )`,highlighted:`trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>], eval_dataset=tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>], data_collator=data_collator, tokenizer=tokenizer, )`}}),_s=new b({props:{code:"trainer.train()",highlighted:"trainer.train()"}}),ws=new z({}),gs=new b({props:{code:`from transformers.data.data_collator import tf_default_collator data_collator = tf_default_collator`,highlighted:`<span class="hljs-keyword">from</span> transformers.data.data_collator <span class="hljs-keyword">import</span> tf_default_collator data_collator = tf_default_collator`}}),bs=new b({props:{code:`tf_train_set = tokenized_squad["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "start_positions", "end_positions"], dummy_labels=True, shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = tokenized_squad["validation"].to_tf_dataset( columns=["attention_mask", "input_ids", "start_positions", "end_positions"], dummy_labels=True, shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`tf_train_set = tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;start_positions&quot;</span>, <span class="hljs-string">&quot;end_positions&quot;</span>], dummy_labels=<span class="hljs-literal">True</span>, shuffle=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">16</span>, collate_fn=data_collator, ) tf_validation_set = tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;start_positions&quot;</span>, <span class="hljs-string">&quot;end_positions&quot;</span>], dummy_labels=<span class="hljs-literal">True</span>, shuffle=<span class="hljs-literal">False</span>, batch_size=<span class="hljs-number">16</span>, collate_fn=data_collator, )`}}),$s=new b({props:{code:`from transformers import create_optimizer batch_size = 16 num_epochs = 2 total_train_steps = (len(tokenized_squad["train"]) // batch_size) * num_epochs optimizer, schedule = create_optimizer( init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps, )`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer batch_size = <span class="hljs-number">16</span> num_epochs = <span class="hljs-number">2</span> total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_epochs optimizer, schedule = create_optimizer( init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps, )`}}),ks=new b({props:{code:`from transformers import TFAutoModelForQuestionAnswering model = TFAutoModelForQuestionAnswering("distilbert-base-uncased")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForQuestionAnswering model = TFAutoModelForQuestionAnswering(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),vs=new b({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),js=new b({props:{code:`model.fit( tf_train_set, validation_data=tf_validation_set, epochs=num_train_epochs, )`,highlighted:`model.fit( tf_train_set, validation_data=tf_validation_set, epochs=num_train_epochs, )`}}),{c(){$=l("meta"),q=f(),k=l("h1"),y=l("a"),E=l("span"),d(v.$$.fragment),P=f(),T=l("span"),x=n("How to fine-tune a model for common downstream tasks"),j=f(),d(A.$$.fragment),C=f(),qs=l("p"),gi=n(`This guide will show you how to fine-tune \u{1F917} Transformers models for common downstream tasks. You will use the \u{1F917} Datasets library to quickly load and preprocess the datasets, getting them ready for training with PyTorch and TensorFlow.`),to=f(),ue=l("p"),bi=n(`Before you begin, make sure you have the \u{1F917} Datasets library installed. For more detailed installation instructions, refer to the \u{1F917} Datasets `),st=l("a"),$i=n("installation page"),ki=n(`. All of the examples in this guide will use \u{1F917} Datasets to load and preprocess a dataset.`),so=f(),d(at.$$.fragment),ao=f(),Es=l("p"),vi=n("Learn how to fine-tune a model for:"),no=f(),I=l("ul"),Da=l("li"),Ts=l("a"),ji=n("seq_imdb"),yi=f(),Sa=l("li"),As=l("a"),xi=n("tok_ner"),qi=f(),Na=l("li"),zs=l("a"),Ei=n("qa_squad"),oo=f(),Ps=l("a"),ro=f(),J=l("h2"),de=l("a"),Ia=l("span"),d(nt.$$.fragment),Ti=f(),Ma=l("span"),Ai=n("Sequence classification with IMDb reviews"),lo=f(),ce=l("p"),zi=n(`Sequence classification refers to the task of classifying sequences of text according to a given number of classes. In this example, learn how to fine-tune a model on the `),ot=l("a"),Pi=n("IMDb dataset"),Fi=n(` to determine whether a review is positive or negative.`),io=f(),d(me.$$.fragment),po=f(),K=l("h3"),_e=l("a"),La=l("span"),d(rt.$$.fragment),Ci=f(),Oa=l("span"),Di=n("Load IMDb dataset"),fo=f(),Fs=l("p"),Si=n("The \u{1F917} Datasets library makes it simple to load a dataset:"),ho=f(),d(lt.$$.fragment),uo=f(),we=l("p"),Ni=n("This loads a "),Ba=l("code"),Ii=n("DatasetDict"),Mi=n(" object which you can index into to view an example:"),co=f(),d(it.$$.fragment),mo=f(),X=l("h3"),ge=l("a"),Wa=l("span"),d(pt.$$.fragment),Li=f(),Ha=l("span"),Oi=n("Preprocess"),_o=f(),M=l("p"),Bi=n(`The next step is to tokenize the text into a readable format by the model. It is important to load the same tokenizer a model was trained with to ensure appropriately tokenized words. Load the DistilBERT tokenizer with the `),Cs=l("a"),Wi=n("AutoTokenizer"),Hi=n(" because we will eventually train a classifier using a pretrained "),ft=l("a"),Qi=n("DistilBERT"),Ri=n(" model:"),wo=f(),d(ht.$$.fragment),go=f(),Ds=l("p"),Ui=n(`Now that you have instantiated a tokenizer, create a function that will tokenize the text. You should also truncate longer sequences in the text to be no longer than the model\u2019s maximum input length:`),bo=f(),d(ut.$$.fragment),$o=f(),L=l("p"),Yi=n("Use \u{1F917} Datasets "),Qa=l("code"),Gi=n("map"),Vi=n(` function to apply the preprocessing function to the entire dataset. You can also set `),Ra=l("code"),Ji=n("batched=True"),Ki=n(` to apply the preprocessing function to multiple elements of the dataset at once for faster preprocessing:`),ko=f(),d(dt.$$.fragment),vo=f(),F=l("p"),Xi=n("Lastly, pad your text so they are a uniform length. While it is possible to pad your text in the "),Ua=l("code"),Zi=n("tokenizer"),ep=n(` function by setting `),Ya=l("code"),tp=n("padding=True"),sp=n(`, it is more efficient to only pad the text to the length of the longest element in its batch. This is known as `),Ga=l("strong"),ap=n("dynamic padding"),np=n(". You can do this with the "),Va=l("code"),op=n("DataCollatorWithPadding"),rp=n(" function:"),jo=f(),d(ct.$$.fragment),yo=f(),Z=l("h3"),be=l("a"),Ja=l("span"),d(mt.$$.fragment),lp=f(),Ka=l("span"),ip=n("Fine-tune with the Trainer API"),xo=f(),$e=l("p"),pp=n("Now load your model with the "),Ss=l("a"),fp=n("AutoModelForSequenceClassification"),hp=n(" class along with the number of expected labels:"),qo=f(),d(_t.$$.fragment),Eo=f(),Ns=l("p"),up=n("At this point, only three steps remain:"),To=f(),O=l("ol"),wt=l("li"),dp=n("Define your training hyperparameters in "),Is=l("a"),cp=n("TrainingArguments"),mp=n("."),_p=f(),gt=l("li"),wp=n("Pass the training arguments to a "),Ms=l("a"),gp=n("Trainer"),bp=n(" along with the model, dataset, tokenizer, and data collator."),$p=f(),bt=l("li"),kp=n("Call "),Xa=l("code"),vp=n("Trainer.train()"),jp=n(" to fine-tune your model."),Ao=f(),d($t.$$.fragment),zo=f(),ee=l("h3"),ke=l("a"),Za=l("span"),d(kt.$$.fragment),yp=f(),en=l("span"),xp=n("Fine-tune with TensorFlow"),Po=f(),Ls=l("p"),qp=n("Fine-tuning with TensorFlow is just as easy, with only a few differences."),Fo=f(),D=l("p"),Ep=n("Start by batching the processed examples together with dynamic padding using the "),Os=l("a"),Tp=n("DataCollatorWithPadding"),Ap=n(` function. Make sure you set `),tn=l("code"),zp=n('return_tensors="tf"'),Pp=n(" to return "),sn=l("code"),Fp=n("tf.Tensor"),Cp=n(" outputs instead of PyTorch tensors!"),Co=f(),d(vt.$$.fragment),Do=f(),S=l("p"),Dp=n("Next, convert your datasets to the "),an=l("code"),Sp=n("tf.data.Dataset"),Np=n(" format with "),nn=l("code"),Ip=n("to_tf_dataset"),Mp=n(`. Specify inputs and labels in the `),on=l("code"),Lp=n("columns"),Op=n(" argument:"),So=f(),d(jt.$$.fragment),No=f(),Bs=l("p"),Bp=n("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Io=f(),d(yt.$$.fragment),Mo=f(),ve=l("p"),Wp=n("Load your model with the "),Ws=l("a"),Hp=n("TFAutoModelForSequenceClassification"),Qp=n(" class along with the number of expected labels:"),Lo=f(),d(xt.$$.fragment),Oo=f(),Hs=l("p"),Rp=n("Compile the model:"),Bo=f(),d(qt.$$.fragment),Wo=f(),je=l("p"),Up=n("Finally, fine-tune the model by calling "),rn=l("code"),Yp=n("model.fit"),Gp=n(":"),Ho=f(),d(Et.$$.fragment),Qo=f(),Qs=l("a"),Ro=f(),te=l("h2"),ye=l("a"),ln=l("span"),d(Tt.$$.fragment),Vp=f(),pn=l("span"),Jp=n("Token classification with WNUT emerging entities"),Uo=f(),xe=l("p"),Kp=n(`Token classification refers to the task of classifying individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization. In this example, learn how to fine-tune a model on the `),At=l("a"),Xp=n("WNUT 17"),Zp=n(" dataset to detect new entities."),Yo=f(),d(qe.$$.fragment),Go=f(),se=l("h3"),Ee=l("a"),fn=l("span"),d(zt.$$.fragment),ef=f(),hn=l("span"),tf=n("Load WNUT 17 dataset"),Vo=f(),Rs=l("p"),sf=n("Load the WNUT 17 dataset from the \u{1F917} Datasets library:"),Jo=f(),d(Pt.$$.fragment),Ko=f(),Us=l("p"),af=n("A quick look at the dataset shows the labels associated with each word in the sentence:"),Xo=f(),d(Ft.$$.fragment),Zo=f(),Ys=l("p"),nf=n("View the specific NER tags by:"),er=f(),d(Ct.$$.fragment),tr=f(),Gs=l("p"),of=n("A letter prefixes each NER tag which can mean:"),sr=f(),B=l("ul"),Vs=l("li"),un=l("code"),rf=n("B-"),lf=n(" indicates the beginning of an entity."),pf=f(),W=l("li"),dn=l("code"),ff=n("I-"),hf=n(" indicates a token is contained inside the same entity (e.g., the "),cn=l("code"),uf=n("State"),df=n(` token is a part of an entity like `),mn=l("code"),cf=n("Empire State Building"),mf=n(")."),_f=f(),Js=l("li"),_n=l("code"),wf=n("0"),gf=n(" indicates the token doesn\u2019t correspond to any entity."),ar=f(),ae=l("h3"),Te=l("a"),wn=l("span"),d(Dt.$$.fragment),bf=f(),gn=l("span"),$f=n("Preprocess"),nr=f(),Ae=l("p"),kf=n("Now you need to tokenize the text. Load the DistilBERT tokenizer with an "),Ks=l("a"),vf=n("AutoTokenizer"),jf=n(":"),or=f(),d(St.$$.fragment),rr=f(),ze=l("p"),yf=n("Since the input has already been split into words, set "),bn=l("code"),xf=n("is_split_into_words=True"),qf=n(` to tokenize the words into subwords:`),lr=f(),d(Nt.$$.fragment),ir=f(),H=l("p"),Ef=n("The addition of the special tokens "),$n=l("code"),Tf=n("[CLS]"),Af=n(" and "),kn=l("code"),zf=n("[SEP]"),Pf=n(` and subword tokenization creates a mismatch between the input and labels. Realign the labels and tokens by:`),pr=f(),Q=l("ol"),It=l("li"),Ff=n("Mapping all tokens to their corresponding word with the "),vn=l("code"),Cf=n("word_ids"),Df=n(" method."),Sf=f(),ne=l("li"),Nf=n("Assigning the label "),jn=l("code"),If=n("-100"),Mf=n(" to the special tokens "),yn=l("code"),Lf=n("[CLS]"),Of=n(" and \u201C[SEP]``` so the PyTorch loss function ignores\nthem."),Bf=f(),Mt=l("li"),Wf=n("Only labeling the first token of a given word. Assign "),xn=l("code"),Hf=n("-100"),Qf=n(" to the other subtokens from the same word."),fr=f(),Xs=l("p"),Rf=n("Here is how you can create a function that will realign the labels and tokens:"),hr=f(),d(Lt.$$.fragment),ur=f(),Pe=l("p"),Uf=n("Now tokenize and align the labels over the entire dataset with \u{1F917} Datasets "),qn=l("code"),Yf=n("map"),Gf=n(" function:"),dr=f(),d(Ot.$$.fragment),cr=f(),Zs=l("p"),Vf=n("Finally, pad your text and labels, so they are a uniform length:"),mr=f(),d(Bt.$$.fragment),_r=f(),oe=l("h3"),Fe=l("a"),En=l("span"),d(Wt.$$.fragment),Jf=f(),Tn=l("span"),Kf=n("Fine-tune with the Trainer API"),wr=f(),Ce=l("p"),Xf=n("Load your model with the "),ea=l("a"),Zf=n("AutoModelForTokenClassification"),eh=n(" class along with the number of expected labels:"),gr=f(),d(Ht.$$.fragment),br=f(),De=l("p"),th=n("Gather your training arguments in "),ta=l("a"),sh=n("TrainingArguments"),ah=n(":"),$r=f(),d(Qt.$$.fragment),kr=f(),Se=l("p"),nh=n("Collect your model, training arguments, dataset, data collator, and tokenizer in "),sa=l("a"),oh=n("Trainer"),rh=n(":"),vr=f(),d(Rt.$$.fragment),jr=f(),aa=l("p"),lh=n("Fine-tune your model:"),yr=f(),d(Ut.$$.fragment),xr=f(),re=l("h3"),Ne=l("a"),An=l("span"),d(Yt.$$.fragment),ih=f(),zn=l("span"),ph=n("Fine-tune with TensorFlow"),qr=f(),na=l("p"),fh=n("Batch your examples together and pad your text and labels, so they are a uniform length:"),Er=f(),d(Gt.$$.fragment),Tr=f(),R=l("p"),hh=n("Convert your datasets to the "),Pn=l("code"),uh=n("tf.data.Dataset"),dh=n(" format with "),Fn=l("code"),ch=n("to_tf_dataset"),mh=n(":"),Ar=f(),d(Vt.$$.fragment),zr=f(),Ie=l("p"),_h=n("Load the model with the "),oa=l("a"),wh=n("TFAutoModelForTokenClassification"),gh=n(" class along with the number of expected labels:"),Pr=f(),d(Jt.$$.fragment),Fr=f(),ra=l("p"),bh=n("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Cr=f(),d(Kt.$$.fragment),Dr=f(),la=l("p"),$h=n("Compile the model:"),Sr=f(),d(Xt.$$.fragment),Nr=f(),Me=l("p"),kh=n("Call "),Cn=l("code"),vh=n("model.fit"),jh=n(" to fine-tune your model:"),Ir=f(),d(Zt.$$.fragment),Mr=f(),ia=l("a"),Lr=f(),le=l("h2"),Le=l("a"),Dn=l("span"),d(es.$$.fragment),yh=f(),Sn=l("span"),xh=n("Question Answering with SQuAD"),Or=f(),Oe=l("p"),qh=n(`There are many types of question answering (QA) tasks. Extractive QA focuses on identifying the answer from the text given a question. In this example, learn how to fine-tune a model on the `),ts=l("a"),Eh=n("SQuAD"),Th=n(" dataset."),Br=f(),d(Be.$$.fragment),Wr=f(),ie=l("h3"),We=l("a"),Nn=l("span"),d(ss.$$.fragment),Ah=f(),In=l("span"),zh=n("Load SQuAD dataset"),Hr=f(),pa=l("p"),Ph=n("Load the SQuAD dataset from the \u{1F917} Datasets library:"),Qr=f(),d(as.$$.fragment),Rr=f(),fa=l("p"),Fh=n("Take a look at an example from the dataset:"),Ur=f(),d(ns.$$.fragment),Yr=f(),pe=l("h3"),He=l("a"),Mn=l("span"),d(os.$$.fragment),Ch=f(),Ln=l("span"),Dh=n("Preprocess"),Gr=f(),Qe=l("p"),Sh=n("Load the DistilBERT tokenizer with an "),ha=l("a"),Nh=n("AutoTokenizer"),Ih=n(":"),Vr=f(),d(rs.$$.fragment),Jr=f(),ua=l("p"),Mh=n("There are a few things to be aware of when preprocessing text for question answering:"),Kr=f(),U=l("ol"),N=l("li"),Lh=n("Some examples in a dataset may have a very long "),On=l("code"),Oh=n("context"),Bh=n(` that exceeds the maximum input length of the model. You can deal with this by truncating the `),Bn=l("code"),Wh=n("context"),Hh=n(" and set "),Wn=l("code"),Qh=n('truncation="only_second"'),Rh=n("."),Uh=f(),ls=l("li"),Yh=n(`Next, you need to map the start and end positions of the answer to the original context. Set `),Hn=l("code"),Gh=n("return_offset_mapping=True"),Vh=n(" to handle this."),Jh=f(),is=l("li"),Kh=n("With the mapping in hand, you can find the start and end tokens of the answer. Use the "),Qn=l("code"),Xh=n("sequence_ids"),Zh=n(` method to find which part of the offset corresponds to the question, and which part of the offset corresponds to the context.`),Xr=f(),da=l("p"),eu=n("Assemble everything in a preprocessing function as shown below:"),Zr=f(),d(ps.$$.fragment),el=f(),Re=l("p"),tu=n("Apply the preprocessing function over the entire dataset with \u{1F917} Datasets "),Rn=l("code"),su=n("map"),au=n(" function:"),tl=f(),d(fs.$$.fragment),sl=f(),ca=l("p"),nu=n("Batch the processed examples together:"),al=f(),d(hs.$$.fragment),nl=f(),fe=l("h3"),Ue=l("a"),Un=l("span"),d(us.$$.fragment),ou=f(),Yn=l("span"),ru=n("Fine-tune with the Trainer API"),ol=f(),Ye=l("p"),lu=n("Load your model with the "),ma=l("a"),iu=n("AutoModelForQuestionAnswering"),pu=n(" class:"),rl=f(),d(ds.$$.fragment),ll=f(),Ge=l("p"),fu=n("Gather your training arguments in "),_a=l("a"),hu=n("TrainingArguments"),uu=n(":"),il=f(),d(cs.$$.fragment),pl=f(),Ve=l("p"),du=n("Collect your model, training arguments, dataset, data collator, and tokenizer in "),wa=l("a"),cu=n("Trainer"),mu=n(":"),fl=f(),d(ms.$$.fragment),hl=f(),ga=l("p"),_u=n("Fine-tune your model:"),ul=f(),d(_s.$$.fragment),dl=f(),he=l("h3"),Je=l("a"),Gn=l("span"),d(ws.$$.fragment),wu=f(),Vn=l("span"),gu=n("Fine-tune with TensorFlow"),cl=f(),ba=l("p"),bu=n("Batch the processed examples together with a TensorFlow default data collator:"),ml=f(),d(gs.$$.fragment),_l=f(),Y=l("p"),$u=n("Convert your datasets to the "),Jn=l("code"),ku=n("tf.data.Dataset"),vu=n(" format with the "),Kn=l("code"),ju=n("to_tf_dataset"),yu=n(" function:"),wl=f(),d(bs.$$.fragment),gl=f(),$a=l("p"),xu=n("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),bl=f(),d($s.$$.fragment),$l=f(),Ke=l("p"),qu=n("Load your model with the "),ka=l("a"),Eu=n("TFAutoModelForQuestionAnswering"),Tu=n(" class:"),kl=f(),d(ks.$$.fragment),vl=f(),va=l("p"),Au=n("Compile the model:"),jl=f(),d(vs.$$.fragment),yl=f(),Xe=l("p"),zu=n("Call "),Xn=l("code"),Pu=n("model.fit"),Fu=n(" to fine-tune the model:"),xl=f(),d(js.$$.fragment),this.h()},l(e){const a=_m('[data-svelte="svelte-1phssyn"]',document.head);$=i(a,"META",{name:!0,content:!0}),a.forEach(t),q=h(e),k=i(e,"H1",{class:!0});var ys=p(k);y=i(ys,"A",{id:!0,class:!0,href:!0});var Zn=p(y);E=i(Zn,"SPAN",{});var eo=p(E);c(v.$$.fragment,eo),eo.forEach(t),Zn.forEach(t),P=h(ys),T=i(ys,"SPAN",{});var Nu=p(T);x=o(Nu,"How to fine-tune a model for common downstream tasks"),Nu.forEach(t),ys.forEach(t),j=h(e),c(A.$$.fragment,e),C=h(e),qs=i(e,"P",{});var Iu=p(qs);gi=o(Iu,`This guide will show you how to fine-tune \u{1F917} Transformers models for common downstream tasks. You will use the \u{1F917} Datasets library to quickly load and preprocess the datasets, getting them ready for training with PyTorch and TensorFlow.`),Iu.forEach(t),to=h(e),ue=i(e,"P",{});var El=p(ue);bi=o(El,`Before you begin, make sure you have the \u{1F917} Datasets library installed. For more detailed installation instructions, refer to the \u{1F917} Datasets `),st=i(El,"A",{href:!0,rel:!0});var Mu=p(st);$i=o(Mu,"installation page"),Mu.forEach(t),ki=o(El,`. All of the examples in this guide will use \u{1F917} Datasets to load and preprocess a dataset.`),El.forEach(t),so=h(e),c(at.$$.fragment,e),ao=h(e),Es=i(e,"P",{});var Lu=p(Es);vi=o(Lu,"Learn how to fine-tune a model for:"),Lu.forEach(t),no=h(e),I=i(e,"UL",{});var ja=p(I);Da=i(ja,"LI",{});var Ou=p(Da);Ts=i(Ou,"A",{href:!0});var Bu=p(Ts);ji=o(Bu,"seq_imdb"),Bu.forEach(t),Ou.forEach(t),yi=h(ja),Sa=i(ja,"LI",{});var Wu=p(Sa);As=i(Wu,"A",{href:!0});var Hu=p(As);xi=o(Hu,"tok_ner"),Hu.forEach(t),Wu.forEach(t),qi=h(ja),Na=i(ja,"LI",{});var Qu=p(Na);zs=i(Qu,"A",{href:!0});var Ru=p(zs);Ei=o(Ru,"qa_squad"),Ru.forEach(t),Qu.forEach(t),ja.forEach(t),oo=h(e),Ps=i(e,"A",{id:!0}),p(Ps).forEach(t),ro=h(e),J=i(e,"H2",{class:!0});var Tl=p(J);de=i(Tl,"A",{id:!0,class:!0,href:!0});var Uu=p(de);Ia=i(Uu,"SPAN",{});var Yu=p(Ia);c(nt.$$.fragment,Yu),Yu.forEach(t),Uu.forEach(t),Ti=h(Tl),Ma=i(Tl,"SPAN",{});var Gu=p(Ma);Ai=o(Gu,"Sequence classification with IMDb reviews"),Gu.forEach(t),Tl.forEach(t),lo=h(e),ce=i(e,"P",{});var Al=p(ce);zi=o(Al,`Sequence classification refers to the task of classifying sequences of text according to a given number of classes. In this example, learn how to fine-tune a model on the `),ot=i(Al,"A",{href:!0,rel:!0});var Vu=p(ot);Pi=o(Vu,"IMDb dataset"),Vu.forEach(t),Fi=o(Al,` to determine whether a review is positive or negative.`),Al.forEach(t),io=h(e),c(me.$$.fragment,e),po=h(e),K=i(e,"H3",{class:!0});var zl=p(K);_e=i(zl,"A",{id:!0,class:!0,href:!0});var Ju=p(_e);La=i(Ju,"SPAN",{});var Ku=p(La);c(rt.$$.fragment,Ku),Ku.forEach(t),Ju.forEach(t),Ci=h(zl),Oa=i(zl,"SPAN",{});var Xu=p(Oa);Di=o(Xu,"Load IMDb dataset"),Xu.forEach(t),zl.forEach(t),fo=h(e),Fs=i(e,"P",{});var Zu=p(Fs);Si=o(Zu,"The \u{1F917} Datasets library makes it simple to load a dataset:"),Zu.forEach(t),ho=h(e),c(lt.$$.fragment,e),uo=h(e),we=i(e,"P",{});var Pl=p(we);Ni=o(Pl,"This loads a "),Ba=i(Pl,"CODE",{});var ed=p(Ba);Ii=o(ed,"DatasetDict"),ed.forEach(t),Mi=o(Pl," object which you can index into to view an example:"),Pl.forEach(t),co=h(e),c(it.$$.fragment,e),mo=h(e),X=i(e,"H3",{class:!0});var Fl=p(X);ge=i(Fl,"A",{id:!0,class:!0,href:!0});var td=p(ge);Wa=i(td,"SPAN",{});var sd=p(Wa);c(pt.$$.fragment,sd),sd.forEach(t),td.forEach(t),Li=h(Fl),Ha=i(Fl,"SPAN",{});var ad=p(Ha);Oi=o(ad,"Preprocess"),ad.forEach(t),Fl.forEach(t),_o=h(e),M=i(e,"P",{});var ya=p(M);Bi=o(ya,`The next step is to tokenize the text into a readable format by the model. It is important to load the same tokenizer a model was trained with to ensure appropriately tokenized words. Load the DistilBERT tokenizer with the `),Cs=i(ya,"A",{href:!0});var nd=p(Cs);Wi=o(nd,"AutoTokenizer"),nd.forEach(t),Hi=o(ya," because we will eventually train a classifier using a pretrained "),ft=i(ya,"A",{href:!0,rel:!0});var od=p(ft);Qi=o(od,"DistilBERT"),od.forEach(t),Ri=o(ya," model:"),ya.forEach(t),wo=h(e),c(ht.$$.fragment,e),go=h(e),Ds=i(e,"P",{});var rd=p(Ds);Ui=o(rd,`Now that you have instantiated a tokenizer, create a function that will tokenize the text. You should also truncate longer sequences in the text to be no longer than the model\u2019s maximum input length:`),rd.forEach(t),bo=h(e),c(ut.$$.fragment,e),$o=h(e),L=i(e,"P",{});var xa=p(L);Yi=o(xa,"Use \u{1F917} Datasets "),Qa=i(xa,"CODE",{});var ld=p(Qa);Gi=o(ld,"map"),ld.forEach(t),Vi=o(xa,` function to apply the preprocessing function to the entire dataset. You can also set `),Ra=i(xa,"CODE",{});var id=p(Ra);Ji=o(id,"batched=True"),id.forEach(t),Ki=o(xa,` to apply the preprocessing function to multiple elements of the dataset at once for faster preprocessing:`),xa.forEach(t),ko=h(e),c(dt.$$.fragment,e),vo=h(e),F=i(e,"P",{});var G=p(F);Xi=o(G,"Lastly, pad your text so they are a uniform length. While it is possible to pad your text in the "),Ua=i(G,"CODE",{});var pd=p(Ua);Zi=o(pd,"tokenizer"),pd.forEach(t),ep=o(G,` function by setting `),Ya=i(G,"CODE",{});var fd=p(Ya);tp=o(fd,"padding=True"),fd.forEach(t),sp=o(G,`, it is more efficient to only pad the text to the length of the longest element in its batch. This is known as `),Ga=i(G,"STRONG",{});var hd=p(Ga);ap=o(hd,"dynamic padding"),hd.forEach(t),np=o(G,". You can do this with the "),Va=i(G,"CODE",{});var ud=p(Va);op=o(ud,"DataCollatorWithPadding"),ud.forEach(t),rp=o(G," function:"),G.forEach(t),jo=h(e),c(ct.$$.fragment,e),yo=h(e),Z=i(e,"H3",{class:!0});var Cl=p(Z);be=i(Cl,"A",{id:!0,class:!0,href:!0});var dd=p(be);Ja=i(dd,"SPAN",{});var cd=p(Ja);c(mt.$$.fragment,cd),cd.forEach(t),dd.forEach(t),lp=h(Cl),Ka=i(Cl,"SPAN",{});var md=p(Ka);ip=o(md,"Fine-tune with the Trainer API"),md.forEach(t),Cl.forEach(t),xo=h(e),$e=i(e,"P",{});var Dl=p($e);pp=o(Dl,"Now load your model with the "),Ss=i(Dl,"A",{href:!0});var _d=p(Ss);fp=o(_d,"AutoModelForSequenceClassification"),_d.forEach(t),hp=o(Dl," class along with the number of expected labels:"),Dl.forEach(t),qo=h(e),c(_t.$$.fragment,e),Eo=h(e),Ns=i(e,"P",{});var wd=p(Ns);up=o(wd,"At this point, only three steps remain:"),wd.forEach(t),To=h(e),O=i(e,"OL",{});var qa=p(O);wt=i(qa,"LI",{});var Sl=p(wt);dp=o(Sl,"Define your training hyperparameters in "),Is=i(Sl,"A",{href:!0});var gd=p(Is);cp=o(gd,"TrainingArguments"),gd.forEach(t),mp=o(Sl,"."),Sl.forEach(t),_p=h(qa),gt=i(qa,"LI",{});var Nl=p(gt);wp=o(Nl,"Pass the training arguments to a "),Ms=i(Nl,"A",{href:!0});var bd=p(Ms);gp=o(bd,"Trainer"),bd.forEach(t),bp=o(Nl," along with the model, dataset, tokenizer, and data collator."),Nl.forEach(t),$p=h(qa),bt=i(qa,"LI",{});var Il=p(bt);kp=o(Il,"Call "),Xa=i(Il,"CODE",{});var $d=p(Xa);vp=o($d,"Trainer.train()"),$d.forEach(t),jp=o(Il," to fine-tune your model."),Il.forEach(t),qa.forEach(t),Ao=h(e),c($t.$$.fragment,e),zo=h(e),ee=i(e,"H3",{class:!0});var Ml=p(ee);ke=i(Ml,"A",{id:!0,class:!0,href:!0});var kd=p(ke);Za=i(kd,"SPAN",{});var vd=p(Za);c(kt.$$.fragment,vd),vd.forEach(t),kd.forEach(t),yp=h(Ml),en=i(Ml,"SPAN",{});var jd=p(en);xp=o(jd,"Fine-tune with TensorFlow"),jd.forEach(t),Ml.forEach(t),Po=h(e),Ls=i(e,"P",{});var yd=p(Ls);qp=o(yd,"Fine-tuning with TensorFlow is just as easy, with only a few differences."),yd.forEach(t),Fo=h(e),D=i(e,"P",{});var Ze=p(D);Ep=o(Ze,"Start by batching the processed examples together with dynamic padding using the "),Os=i(Ze,"A",{href:!0});var xd=p(Os);Tp=o(xd,"DataCollatorWithPadding"),xd.forEach(t),Ap=o(Ze,` function. Make sure you set `),tn=i(Ze,"CODE",{});var qd=p(tn);zp=o(qd,'return_tensors="tf"'),qd.forEach(t),Pp=o(Ze," to return "),sn=i(Ze,"CODE",{});var Ed=p(sn);Fp=o(Ed,"tf.Tensor"),Ed.forEach(t),Cp=o(Ze," outputs instead of PyTorch tensors!"),Ze.forEach(t),Co=h(e),c(vt.$$.fragment,e),Do=h(e),S=i(e,"P",{});var et=p(S);Dp=o(et,"Next, convert your datasets to the "),an=i(et,"CODE",{});var Td=p(an);Sp=o(Td,"tf.data.Dataset"),Td.forEach(t),Np=o(et," format with "),nn=i(et,"CODE",{});var Ad=p(nn);Ip=o(Ad,"to_tf_dataset"),Ad.forEach(t),Mp=o(et,`. Specify inputs and labels in the `),on=i(et,"CODE",{});var zd=p(on);Lp=o(zd,"columns"),zd.forEach(t),Op=o(et," argument:"),et.forEach(t),So=h(e),c(jt.$$.fragment,e),No=h(e),Bs=i(e,"P",{});var Pd=p(Bs);Bp=o(Pd,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Pd.forEach(t),Io=h(e),c(yt.$$.fragment,e),Mo=h(e),ve=i(e,"P",{});var Ll=p(ve);Wp=o(Ll,"Load your model with the "),Ws=i(Ll,"A",{href:!0});var Fd=p(Ws);Hp=o(Fd,"TFAutoModelForSequenceClassification"),Fd.forEach(t),Qp=o(Ll," class along with the number of expected labels:"),Ll.forEach(t),Lo=h(e),c(xt.$$.fragment,e),Oo=h(e),Hs=i(e,"P",{});var Cd=p(Hs);Rp=o(Cd,"Compile the model:"),Cd.forEach(t),Bo=h(e),c(qt.$$.fragment,e),Wo=h(e),je=i(e,"P",{});var Ol=p(je);Up=o(Ol,"Finally, fine-tune the model by calling "),rn=i(Ol,"CODE",{});var Dd=p(rn);Yp=o(Dd,"model.fit"),Dd.forEach(t),Gp=o(Ol,":"),Ol.forEach(t),Ho=h(e),c(Et.$$.fragment,e),Qo=h(e),Qs=i(e,"A",{id:!0}),p(Qs).forEach(t),Ro=h(e),te=i(e,"H2",{class:!0});var Bl=p(te);ye=i(Bl,"A",{id:!0,class:!0,href:!0});var Sd=p(ye);ln=i(Sd,"SPAN",{});var Nd=p(ln);c(Tt.$$.fragment,Nd),Nd.forEach(t),Sd.forEach(t),Vp=h(Bl),pn=i(Bl,"SPAN",{});var Id=p(pn);Jp=o(Id,"Token classification with WNUT emerging entities"),Id.forEach(t),Bl.forEach(t),Uo=h(e),xe=i(e,"P",{});var Wl=p(xe);Kp=o(Wl,`Token classification refers to the task of classifying individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization. In this example, learn how to fine-tune a model on the `),At=i(Wl,"A",{href:!0,rel:!0});var Md=p(At);Xp=o(Md,"WNUT 17"),Md.forEach(t),Zp=o(Wl," dataset to detect new entities."),Wl.forEach(t),Yo=h(e),c(qe.$$.fragment,e),Go=h(e),se=i(e,"H3",{class:!0});var Hl=p(se);Ee=i(Hl,"A",{id:!0,class:!0,href:!0});var Ld=p(Ee);fn=i(Ld,"SPAN",{});var Od=p(fn);c(zt.$$.fragment,Od),Od.forEach(t),Ld.forEach(t),ef=h(Hl),hn=i(Hl,"SPAN",{});var Bd=p(hn);tf=o(Bd,"Load WNUT 17 dataset"),Bd.forEach(t),Hl.forEach(t),Vo=h(e),Rs=i(e,"P",{});var Wd=p(Rs);sf=o(Wd,"Load the WNUT 17 dataset from the \u{1F917} Datasets library:"),Wd.forEach(t),Jo=h(e),c(Pt.$$.fragment,e),Ko=h(e),Us=i(e,"P",{});var Hd=p(Us);af=o(Hd,"A quick look at the dataset shows the labels associated with each word in the sentence:"),Hd.forEach(t),Xo=h(e),c(Ft.$$.fragment,e),Zo=h(e),Ys=i(e,"P",{});var Qd=p(Ys);nf=o(Qd,"View the specific NER tags by:"),Qd.forEach(t),er=h(e),c(Ct.$$.fragment,e),tr=h(e),Gs=i(e,"P",{});var Rd=p(Gs);of=o(Rd,"A letter prefixes each NER tag which can mean:"),Rd.forEach(t),sr=h(e),B=i(e,"UL",{});var Ea=p(B);Vs=i(Ea,"LI",{});var Cu=p(Vs);un=i(Cu,"CODE",{});var Ud=p(un);rf=o(Ud,"B-"),Ud.forEach(t),lf=o(Cu," indicates the beginning of an entity."),Cu.forEach(t),pf=h(Ea),W=i(Ea,"LI",{});var xs=p(W);dn=i(xs,"CODE",{});var Yd=p(dn);ff=o(Yd,"I-"),Yd.forEach(t),hf=o(xs," indicates a token is contained inside the same entity (e.g., the "),cn=i(xs,"CODE",{});var Gd=p(cn);uf=o(Gd,"State"),Gd.forEach(t),df=o(xs,` token is a part of an entity like `),mn=i(xs,"CODE",{});var Vd=p(mn);cf=o(Vd,"Empire State Building"),Vd.forEach(t),mf=o(xs,")."),xs.forEach(t),_f=h(Ea),Js=i(Ea,"LI",{});var Du=p(Js);_n=i(Du,"CODE",{});var Jd=p(_n);wf=o(Jd,"0"),Jd.forEach(t),gf=o(Du," indicates the token doesn\u2019t correspond to any entity."),Du.forEach(t),Ea.forEach(t),ar=h(e),ae=i(e,"H3",{class:!0});var Ql=p(ae);Te=i(Ql,"A",{id:!0,class:!0,href:!0});var Kd=p(Te);wn=i(Kd,"SPAN",{});var Xd=p(wn);c(Dt.$$.fragment,Xd),Xd.forEach(t),Kd.forEach(t),bf=h(Ql),gn=i(Ql,"SPAN",{});var Zd=p(gn);$f=o(Zd,"Preprocess"),Zd.forEach(t),Ql.forEach(t),nr=h(e),Ae=i(e,"P",{});var Rl=p(Ae);kf=o(Rl,"Now you need to tokenize the text. Load the DistilBERT tokenizer with an "),Ks=i(Rl,"A",{href:!0});var ec=p(Ks);vf=o(ec,"AutoTokenizer"),ec.forEach(t),jf=o(Rl,":"),Rl.forEach(t),or=h(e),c(St.$$.fragment,e),rr=h(e),ze=i(e,"P",{});var Ul=p(ze);yf=o(Ul,"Since the input has already been split into words, set "),bn=i(Ul,"CODE",{});var tc=p(bn);xf=o(tc,"is_split_into_words=True"),tc.forEach(t),qf=o(Ul,` to tokenize the words into subwords:`),Ul.forEach(t),lr=h(e),c(Nt.$$.fragment,e),ir=h(e),H=i(e,"P",{});var Ta=p(H);Ef=o(Ta,"The addition of the special tokens "),$n=i(Ta,"CODE",{});var sc=p($n);Tf=o(sc,"[CLS]"),sc.forEach(t),Af=o(Ta," and "),kn=i(Ta,"CODE",{});var ac=p(kn);zf=o(ac,"[SEP]"),ac.forEach(t),Pf=o(Ta,` and subword tokenization creates a mismatch between the input and labels. Realign the labels and tokens by:`),Ta.forEach(t),pr=h(e),Q=i(e,"OL",{});var Aa=p(Q);It=i(Aa,"LI",{});var Yl=p(It);Ff=o(Yl,"Mapping all tokens to their corresponding word with the "),vn=i(Yl,"CODE",{});var nc=p(vn);Cf=o(nc,"word_ids"),nc.forEach(t),Df=o(Yl," method."),Yl.forEach(t),Sf=h(Aa),ne=i(Aa,"LI",{});var za=p(ne);Nf=o(za,"Assigning the label "),jn=i(za,"CODE",{});var oc=p(jn);If=o(oc,"-100"),oc.forEach(t),Mf=o(za," to the special tokens "),yn=i(za,"CODE",{});var rc=p(yn);Lf=o(rc,"[CLS]"),rc.forEach(t),Of=o(za," and \u201C[SEP]``` so the PyTorch loss function ignores\nthem."),za.forEach(t),Bf=h(Aa),Mt=i(Aa,"LI",{});var Gl=p(Mt);Wf=o(Gl,"Only labeling the first token of a given word. Assign "),xn=i(Gl,"CODE",{});var lc=p(xn);Hf=o(lc,"-100"),lc.forEach(t),Qf=o(Gl," to the other subtokens from the same word."),Gl.forEach(t),Aa.forEach(t),fr=h(e),Xs=i(e,"P",{});var ic=p(Xs);Rf=o(ic,"Here is how you can create a function that will realign the labels and tokens:"),ic.forEach(t),hr=h(e),c(Lt.$$.fragment,e),ur=h(e),Pe=i(e,"P",{});var Vl=p(Pe);Uf=o(Vl,"Now tokenize and align the labels over the entire dataset with \u{1F917} Datasets "),qn=i(Vl,"CODE",{});var pc=p(qn);Yf=o(pc,"map"),pc.forEach(t),Gf=o(Vl," function:"),Vl.forEach(t),dr=h(e),c(Ot.$$.fragment,e),cr=h(e),Zs=i(e,"P",{});var fc=p(Zs);Vf=o(fc,"Finally, pad your text and labels, so they are a uniform length:"),fc.forEach(t),mr=h(e),c(Bt.$$.fragment,e),_r=h(e),oe=i(e,"H3",{class:!0});var Jl=p(oe);Fe=i(Jl,"A",{id:!0,class:!0,href:!0});var hc=p(Fe);En=i(hc,"SPAN",{});var uc=p(En);c(Wt.$$.fragment,uc),uc.forEach(t),hc.forEach(t),Jf=h(Jl),Tn=i(Jl,"SPAN",{});var dc=p(Tn);Kf=o(dc,"Fine-tune with the Trainer API"),dc.forEach(t),Jl.forEach(t),wr=h(e),Ce=i(e,"P",{});var Kl=p(Ce);Xf=o(Kl,"Load your model with the "),ea=i(Kl,"A",{href:!0});var cc=p(ea);Zf=o(cc,"AutoModelForTokenClassification"),cc.forEach(t),eh=o(Kl," class along with the number of expected labels:"),Kl.forEach(t),gr=h(e),c(Ht.$$.fragment,e),br=h(e),De=i(e,"P",{});var Xl=p(De);th=o(Xl,"Gather your training arguments in "),ta=i(Xl,"A",{href:!0});var mc=p(ta);sh=o(mc,"TrainingArguments"),mc.forEach(t),ah=o(Xl,":"),Xl.forEach(t),$r=h(e),c(Qt.$$.fragment,e),kr=h(e),Se=i(e,"P",{});var Zl=p(Se);nh=o(Zl,"Collect your model, training arguments, dataset, data collator, and tokenizer in "),sa=i(Zl,"A",{href:!0});var _c=p(sa);oh=o(_c,"Trainer"),_c.forEach(t),rh=o(Zl,":"),Zl.forEach(t),vr=h(e),c(Rt.$$.fragment,e),jr=h(e),aa=i(e,"P",{});var wc=p(aa);lh=o(wc,"Fine-tune your model:"),wc.forEach(t),yr=h(e),c(Ut.$$.fragment,e),xr=h(e),re=i(e,"H3",{class:!0});var ei=p(re);Ne=i(ei,"A",{id:!0,class:!0,href:!0});var gc=p(Ne);An=i(gc,"SPAN",{});var bc=p(An);c(Yt.$$.fragment,bc),bc.forEach(t),gc.forEach(t),ih=h(ei),zn=i(ei,"SPAN",{});var $c=p(zn);ph=o($c,"Fine-tune with TensorFlow"),$c.forEach(t),ei.forEach(t),qr=h(e),na=i(e,"P",{});var kc=p(na);fh=o(kc,"Batch your examples together and pad your text and labels, so they are a uniform length:"),kc.forEach(t),Er=h(e),c(Gt.$$.fragment,e),Tr=h(e),R=i(e,"P",{});var Pa=p(R);hh=o(Pa,"Convert your datasets to the "),Pn=i(Pa,"CODE",{});var vc=p(Pn);uh=o(vc,"tf.data.Dataset"),vc.forEach(t),dh=o(Pa," format with "),Fn=i(Pa,"CODE",{});var jc=p(Fn);ch=o(jc,"to_tf_dataset"),jc.forEach(t),mh=o(Pa,":"),Pa.forEach(t),Ar=h(e),c(Vt.$$.fragment,e),zr=h(e),Ie=i(e,"P",{});var ti=p(Ie);_h=o(ti,"Load the model with the "),oa=i(ti,"A",{href:!0});var yc=p(oa);wh=o(yc,"TFAutoModelForTokenClassification"),yc.forEach(t),gh=o(ti," class along with the number of expected labels:"),ti.forEach(t),Pr=h(e),c(Jt.$$.fragment,e),Fr=h(e),ra=i(e,"P",{});var xc=p(ra);bh=o(xc,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),xc.forEach(t),Cr=h(e),c(Kt.$$.fragment,e),Dr=h(e),la=i(e,"P",{});var qc=p(la);$h=o(qc,"Compile the model:"),qc.forEach(t),Sr=h(e),c(Xt.$$.fragment,e),Nr=h(e),Me=i(e,"P",{});var si=p(Me);kh=o(si,"Call "),Cn=i(si,"CODE",{});var Ec=p(Cn);vh=o(Ec,"model.fit"),Ec.forEach(t),jh=o(si," to fine-tune your model:"),si.forEach(t),Ir=h(e),c(Zt.$$.fragment,e),Mr=h(e),ia=i(e,"A",{id:!0}),p(ia).forEach(t),Lr=h(e),le=i(e,"H2",{class:!0});var ai=p(le);Le=i(ai,"A",{id:!0,class:!0,href:!0});var Tc=p(Le);Dn=i(Tc,"SPAN",{});var Ac=p(Dn);c(es.$$.fragment,Ac),Ac.forEach(t),Tc.forEach(t),yh=h(ai),Sn=i(ai,"SPAN",{});var zc=p(Sn);xh=o(zc,"Question Answering with SQuAD"),zc.forEach(t),ai.forEach(t),Or=h(e),Oe=i(e,"P",{});var ni=p(Oe);qh=o(ni,`There are many types of question answering (QA) tasks. Extractive QA focuses on identifying the answer from the text given a question. In this example, learn how to fine-tune a model on the `),ts=i(ni,"A",{href:!0,rel:!0});var Pc=p(ts);Eh=o(Pc,"SQuAD"),Pc.forEach(t),Th=o(ni," dataset."),ni.forEach(t),Br=h(e),c(Be.$$.fragment,e),Wr=h(e),ie=i(e,"H3",{class:!0});var oi=p(ie);We=i(oi,"A",{id:!0,class:!0,href:!0});var Fc=p(We);Nn=i(Fc,"SPAN",{});var Cc=p(Nn);c(ss.$$.fragment,Cc),Cc.forEach(t),Fc.forEach(t),Ah=h(oi),In=i(oi,"SPAN",{});var Dc=p(In);zh=o(Dc,"Load SQuAD dataset"),Dc.forEach(t),oi.forEach(t),Hr=h(e),pa=i(e,"P",{});var Sc=p(pa);Ph=o(Sc,"Load the SQuAD dataset from the \u{1F917} Datasets library:"),Sc.forEach(t),Qr=h(e),c(as.$$.fragment,e),Rr=h(e),fa=i(e,"P",{});var Nc=p(fa);Fh=o(Nc,"Take a look at an example from the dataset:"),Nc.forEach(t),Ur=h(e),c(ns.$$.fragment,e),Yr=h(e),pe=i(e,"H3",{class:!0});var ri=p(pe);He=i(ri,"A",{id:!0,class:!0,href:!0});var Ic=p(He);Mn=i(Ic,"SPAN",{});var Mc=p(Mn);c(os.$$.fragment,Mc),Mc.forEach(t),Ic.forEach(t),Ch=h(ri),Ln=i(ri,"SPAN",{});var Lc=p(Ln);Dh=o(Lc,"Preprocess"),Lc.forEach(t),ri.forEach(t),Gr=h(e),Qe=i(e,"P",{});var li=p(Qe);Sh=o(li,"Load the DistilBERT tokenizer with an "),ha=i(li,"A",{href:!0});var Oc=p(ha);Nh=o(Oc,"AutoTokenizer"),Oc.forEach(t),Ih=o(li,":"),li.forEach(t),Vr=h(e),c(rs.$$.fragment,e),Jr=h(e),ua=i(e,"P",{});var Bc=p(ua);Mh=o(Bc,"There are a few things to be aware of when preprocessing text for question answering:"),Bc.forEach(t),Kr=h(e),U=i(e,"OL",{});var Fa=p(U);N=i(Fa,"LI",{});var tt=p(N);Lh=o(tt,"Some examples in a dataset may have a very long "),On=i(tt,"CODE",{});var Wc=p(On);Oh=o(Wc,"context"),Wc.forEach(t),Bh=o(tt,` that exceeds the maximum input length of the model. You can deal with this by truncating the `),Bn=i(tt,"CODE",{});var Hc=p(Bn);Wh=o(Hc,"context"),Hc.forEach(t),Hh=o(tt," and set "),Wn=i(tt,"CODE",{});var Qc=p(Wn);Qh=o(Qc,'truncation="only_second"'),Qc.forEach(t),Rh=o(tt,"."),tt.forEach(t),Uh=h(Fa),ls=i(Fa,"LI",{});var ii=p(ls);Yh=o(ii,`Next, you need to map the start and end positions of the answer to the original context. Set `),Hn=i(ii,"CODE",{});var Rc=p(Hn);Gh=o(Rc,"return_offset_mapping=True"),Rc.forEach(t),Vh=o(ii," to handle this."),ii.forEach(t),Jh=h(Fa),is=i(Fa,"LI",{});var pi=p(is);Kh=o(pi,"With the mapping in hand, you can find the start and end tokens of the answer. Use the "),Qn=i(pi,"CODE",{});var Uc=p(Qn);Xh=o(Uc,"sequence_ids"),Uc.forEach(t),Zh=o(pi,` method to find which part of the offset corresponds to the question, and which part of the offset corresponds to the context.`),pi.forEach(t),Fa.forEach(t),Xr=h(e),da=i(e,"P",{});var Yc=p(da);eu=o(Yc,"Assemble everything in a preprocessing function as shown below:"),Yc.forEach(t),Zr=h(e),c(ps.$$.fragment,e),el=h(e),Re=i(e,"P",{});var fi=p(Re);tu=o(fi,"Apply the preprocessing function over the entire dataset with \u{1F917} Datasets "),Rn=i(fi,"CODE",{});var Gc=p(Rn);su=o(Gc,"map"),Gc.forEach(t),au=o(fi," function:"),fi.forEach(t),tl=h(e),c(fs.$$.fragment,e),sl=h(e),ca=i(e,"P",{});var Vc=p(ca);nu=o(Vc,"Batch the processed examples together:"),Vc.forEach(t),al=h(e),c(hs.$$.fragment,e),nl=h(e),fe=i(e,"H3",{class:!0});var hi=p(fe);Ue=i(hi,"A",{id:!0,class:!0,href:!0});var Jc=p(Ue);Un=i(Jc,"SPAN",{});var Kc=p(Un);c(us.$$.fragment,Kc),Kc.forEach(t),Jc.forEach(t),ou=h(hi),Yn=i(hi,"SPAN",{});var Xc=p(Yn);ru=o(Xc,"Fine-tune with the Trainer API"),Xc.forEach(t),hi.forEach(t),ol=h(e),Ye=i(e,"P",{});var ui=p(Ye);lu=o(ui,"Load your model with the "),ma=i(ui,"A",{href:!0});var Zc=p(ma);iu=o(Zc,"AutoModelForQuestionAnswering"),Zc.forEach(t),pu=o(ui," class:"),ui.forEach(t),rl=h(e),c(ds.$$.fragment,e),ll=h(e),Ge=i(e,"P",{});var di=p(Ge);fu=o(di,"Gather your training arguments in "),_a=i(di,"A",{href:!0});var em=p(_a);hu=o(em,"TrainingArguments"),em.forEach(t),uu=o(di,":"),di.forEach(t),il=h(e),c(cs.$$.fragment,e),pl=h(e),Ve=i(e,"P",{});var ci=p(Ve);du=o(ci,"Collect your model, training arguments, dataset, data collator, and tokenizer in "),wa=i(ci,"A",{href:!0});var tm=p(wa);cu=o(tm,"Trainer"),tm.forEach(t),mu=o(ci,":"),ci.forEach(t),fl=h(e),c(ms.$$.fragment,e),hl=h(e),ga=i(e,"P",{});var sm=p(ga);_u=o(sm,"Fine-tune your model:"),sm.forEach(t),ul=h(e),c(_s.$$.fragment,e),dl=h(e),he=i(e,"H3",{class:!0});var mi=p(he);Je=i(mi,"A",{id:!0,class:!0,href:!0});var am=p(Je);Gn=i(am,"SPAN",{});var nm=p(Gn);c(ws.$$.fragment,nm),nm.forEach(t),am.forEach(t),wu=h(mi),Vn=i(mi,"SPAN",{});var om=p(Vn);gu=o(om,"Fine-tune with TensorFlow"),om.forEach(t),mi.forEach(t),cl=h(e),ba=i(e,"P",{});var rm=p(ba);bu=o(rm,"Batch the processed examples together with a TensorFlow default data collator:"),rm.forEach(t),ml=h(e),c(gs.$$.fragment,e),_l=h(e),Y=i(e,"P",{});var Ca=p(Y);$u=o(Ca,"Convert your datasets to the "),Jn=i(Ca,"CODE",{});var lm=p(Jn);ku=o(lm,"tf.data.Dataset"),lm.forEach(t),vu=o(Ca," format with the "),Kn=i(Ca,"CODE",{});var im=p(Kn);ju=o(im,"to_tf_dataset"),im.forEach(t),yu=o(Ca," function:"),Ca.forEach(t),wl=h(e),c(bs.$$.fragment,e),gl=h(e),$a=i(e,"P",{});var pm=p($a);xu=o(pm,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),pm.forEach(t),bl=h(e),c($s.$$.fragment,e),$l=h(e),Ke=i(e,"P",{});var _i=p(Ke);qu=o(_i,"Load your model with the "),ka=i(_i,"A",{href:!0});var fm=p(ka);Eu=o(fm,"TFAutoModelForQuestionAnswering"),fm.forEach(t),Tu=o(_i," class:"),_i.forEach(t),kl=h(e),c(ks.$$.fragment,e),vl=h(e),va=i(e,"P",{});var hm=p(va);Au=o(hm,"Compile the model:"),hm.forEach(t),jl=h(e),c(vs.$$.fragment,e),yl=h(e),Xe=i(e,"P",{});var wi=p(Xe);zu=o(wi,"Call "),Xn=i(wi,"CODE",{});var um=p(Xn);Pu=o(um,"model.fit"),um.forEach(t),Fu=o(wi," to fine-tune the model:"),wi.forEach(t),xl=h(e),c(js.$$.fragment,e),this.h()},h(){u($,"name","hf:doc:metadata"),u($,"content",JSON.stringify(vm)),u(y,"id","how-to-finetune-a-model-for-common-downstream-tasks"),u(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(y,"href","#how-to-finetune-a-model-for-common-downstream-tasks"),u(k,"class","relative group"),u(st,"href","https://huggingface.co/docs/datasets/installation.html"),u(st,"rel","nofollow"),u(Ts,"href","#seq_imdb"),u(As,"href","#tok_ner"),u(zs,"href","#qa_squad"),u(Ps,"id","seq_imdb"),u(de,"id","sequence-classification-with-imdb-reviews"),u(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(de,"href","#sequence-classification-with-imdb-reviews"),u(J,"class","relative group"),u(ot,"href","https://huggingface.co/datasets/imdb"),u(ot,"rel","nofollow"),u(_e,"id","load-imdb-dataset"),u(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(_e,"href","#load-imdb-dataset"),u(K,"class","relative group"),u(ge,"id","preprocess"),u(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ge,"href","#preprocess"),u(X,"class","relative group"),u(Cs,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),u(ft,"href","https://huggingface.co/distilbert-base-uncased"),u(ft,"rel","nofollow"),u(be,"id","finetune-with-the-trainer-api"),u(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(be,"href","#finetune-with-the-trainer-api"),u(Z,"class","relative group"),u(Ss,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification"),u(Is,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),u(Ms,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),u(ke,"id","finetune-with-tensorflow"),u(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ke,"href","#finetune-with-tensorflow"),u(ee,"class","relative group"),u(Os,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding"),u(Ws,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSequenceClassification"),u(Qs,"id","tok_ner"),u(ye,"id","token-classification-with-wnut-emerging-entities"),u(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ye,"href","#token-classification-with-wnut-emerging-entities"),u(te,"class","relative group"),u(At,"href","https://huggingface.co/datasets/wnut_17"),u(At,"rel","nofollow"),u(Ee,"id","load-wnut-17-dataset"),u(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ee,"href","#load-wnut-17-dataset"),u(se,"class","relative group"),u(Te,"id","preprocess"),u(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Te,"href","#preprocess"),u(ae,"class","relative group"),u(Ks,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),u(Fe,"id","finetune-with-the-trainer-api"),u(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Fe,"href","#finetune-with-the-trainer-api"),u(oe,"class","relative group"),u(ea,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForTokenClassification"),u(ta,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),u(sa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),u(Ne,"id","finetune-with-tensorflow"),u(Ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ne,"href","#finetune-with-tensorflow"),u(re,"class","relative group"),u(oa,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForTokenClassification"),u(ia,"id","qa_squad"),u(Le,"id","question-answering-with-squad"),u(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Le,"href","#question-answering-with-squad"),u(le,"class","relative group"),u(ts,"href","https://huggingface.co/datasets/squad"),u(ts,"rel","nofollow"),u(We,"id","load-squad-dataset"),u(We,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(We,"href","#load-squad-dataset"),u(ie,"class","relative group"),u(He,"id","preprocess"),u(He,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(He,"href","#preprocess"),u(pe,"class","relative group"),u(ha,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),u(Ue,"id","finetune-with-the-trainer-api"),u(Ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ue,"href","#finetune-with-the-trainer-api"),u(fe,"class","relative group"),u(ma,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForQuestionAnswering"),u(_a,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),u(wa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),u(Je,"id","finetune-with-tensorflow"),u(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Je,"href","#finetune-with-tensorflow"),u(he,"class","relative group"),u(ka,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForQuestionAnswering")},m(e,a){s(document.head,$),r(e,q,a),r(e,k,a),s(k,y),s(y,E),m(v,E,null),s(k,P),s(k,T),s(T,x),r(e,j,a),m(A,e,a),r(e,C,a),r(e,qs,a),s(qs,gi),r(e,to,a),r(e,ue,a),s(ue,bi),s(ue,st),s(st,$i),s(ue,ki),r(e,so,a),m(at,e,a),r(e,ao,a),r(e,Es,a),s(Es,vi),r(e,no,a),r(e,I,a),s(I,Da),s(Da,Ts),s(Ts,ji),s(I,yi),s(I,Sa),s(Sa,As),s(As,xi),s(I,qi),s(I,Na),s(Na,zs),s(zs,Ei),r(e,oo,a),r(e,Ps,a),r(e,ro,a),r(e,J,a),s(J,de),s(de,Ia),m(nt,Ia,null),s(J,Ti),s(J,Ma),s(Ma,Ai),r(e,lo,a),r(e,ce,a),s(ce,zi),s(ce,ot),s(ot,Pi),s(ce,Fi),r(e,io,a),m(me,e,a),r(e,po,a),r(e,K,a),s(K,_e),s(_e,La),m(rt,La,null),s(K,Ci),s(K,Oa),s(Oa,Di),r(e,fo,a),r(e,Fs,a),s(Fs,Si),r(e,ho,a),m(lt,e,a),r(e,uo,a),r(e,we,a),s(we,Ni),s(we,Ba),s(Ba,Ii),s(we,Mi),r(e,co,a),m(it,e,a),r(e,mo,a),r(e,X,a),s(X,ge),s(ge,Wa),m(pt,Wa,null),s(X,Li),s(X,Ha),s(Ha,Oi),r(e,_o,a),r(e,M,a),s(M,Bi),s(M,Cs),s(Cs,Wi),s(M,Hi),s(M,ft),s(ft,Qi),s(M,Ri),r(e,wo,a),m(ht,e,a),r(e,go,a),r(e,Ds,a),s(Ds,Ui),r(e,bo,a),m(ut,e,a),r(e,$o,a),r(e,L,a),s(L,Yi),s(L,Qa),s(Qa,Gi),s(L,Vi),s(L,Ra),s(Ra,Ji),s(L,Ki),r(e,ko,a),m(dt,e,a),r(e,vo,a),r(e,F,a),s(F,Xi),s(F,Ua),s(Ua,Zi),s(F,ep),s(F,Ya),s(Ya,tp),s(F,sp),s(F,Ga),s(Ga,ap),s(F,np),s(F,Va),s(Va,op),s(F,rp),r(e,jo,a),m(ct,e,a),r(e,yo,a),r(e,Z,a),s(Z,be),s(be,Ja),m(mt,Ja,null),s(Z,lp),s(Z,Ka),s(Ka,ip),r(e,xo,a),r(e,$e,a),s($e,pp),s($e,Ss),s(Ss,fp),s($e,hp),r(e,qo,a),m(_t,e,a),r(e,Eo,a),r(e,Ns,a),s(Ns,up),r(e,To,a),r(e,O,a),s(O,wt),s(wt,dp),s(wt,Is),s(Is,cp),s(wt,mp),s(O,_p),s(O,gt),s(gt,wp),s(gt,Ms),s(Ms,gp),s(gt,bp),s(O,$p),s(O,bt),s(bt,kp),s(bt,Xa),s(Xa,vp),s(bt,jp),r(e,Ao,a),m($t,e,a),r(e,zo,a),r(e,ee,a),s(ee,ke),s(ke,Za),m(kt,Za,null),s(ee,yp),s(ee,en),s(en,xp),r(e,Po,a),r(e,Ls,a),s(Ls,qp),r(e,Fo,a),r(e,D,a),s(D,Ep),s(D,Os),s(Os,Tp),s(D,Ap),s(D,tn),s(tn,zp),s(D,Pp),s(D,sn),s(sn,Fp),s(D,Cp),r(e,Co,a),m(vt,e,a),r(e,Do,a),r(e,S,a),s(S,Dp),s(S,an),s(an,Sp),s(S,Np),s(S,nn),s(nn,Ip),s(S,Mp),s(S,on),s(on,Lp),s(S,Op),r(e,So,a),m(jt,e,a),r(e,No,a),r(e,Bs,a),s(Bs,Bp),r(e,Io,a),m(yt,e,a),r(e,Mo,a),r(e,ve,a),s(ve,Wp),s(ve,Ws),s(Ws,Hp),s(ve,Qp),r(e,Lo,a),m(xt,e,a),r(e,Oo,a),r(e,Hs,a),s(Hs,Rp),r(e,Bo,a),m(qt,e,a),r(e,Wo,a),r(e,je,a),s(je,Up),s(je,rn),s(rn,Yp),s(je,Gp),r(e,Ho,a),m(Et,e,a),r(e,Qo,a),r(e,Qs,a),r(e,Ro,a),r(e,te,a),s(te,ye),s(ye,ln),m(Tt,ln,null),s(te,Vp),s(te,pn),s(pn,Jp),r(e,Uo,a),r(e,xe,a),s(xe,Kp),s(xe,At),s(At,Xp),s(xe,Zp),r(e,Yo,a),m(qe,e,a),r(e,Go,a),r(e,se,a),s(se,Ee),s(Ee,fn),m(zt,fn,null),s(se,ef),s(se,hn),s(hn,tf),r(e,Vo,a),r(e,Rs,a),s(Rs,sf),r(e,Jo,a),m(Pt,e,a),r(e,Ko,a),r(e,Us,a),s(Us,af),r(e,Xo,a),m(Ft,e,a),r(e,Zo,a),r(e,Ys,a),s(Ys,nf),r(e,er,a),m(Ct,e,a),r(e,tr,a),r(e,Gs,a),s(Gs,of),r(e,sr,a),r(e,B,a),s(B,Vs),s(Vs,un),s(un,rf),s(Vs,lf),s(B,pf),s(B,W),s(W,dn),s(dn,ff),s(W,hf),s(W,cn),s(cn,uf),s(W,df),s(W,mn),s(mn,cf),s(W,mf),s(B,_f),s(B,Js),s(Js,_n),s(_n,wf),s(Js,gf),r(e,ar,a),r(e,ae,a),s(ae,Te),s(Te,wn),m(Dt,wn,null),s(ae,bf),s(ae,gn),s(gn,$f),r(e,nr,a),r(e,Ae,a),s(Ae,kf),s(Ae,Ks),s(Ks,vf),s(Ae,jf),r(e,or,a),m(St,e,a),r(e,rr,a),r(e,ze,a),s(ze,yf),s(ze,bn),s(bn,xf),s(ze,qf),r(e,lr,a),m(Nt,e,a),r(e,ir,a),r(e,H,a),s(H,Ef),s(H,$n),s($n,Tf),s(H,Af),s(H,kn),s(kn,zf),s(H,Pf),r(e,pr,a),r(e,Q,a),s(Q,It),s(It,Ff),s(It,vn),s(vn,Cf),s(It,Df),s(Q,Sf),s(Q,ne),s(ne,Nf),s(ne,jn),s(jn,If),s(ne,Mf),s(ne,yn),s(yn,Lf),s(ne,Of),s(Q,Bf),s(Q,Mt),s(Mt,Wf),s(Mt,xn),s(xn,Hf),s(Mt,Qf),r(e,fr,a),r(e,Xs,a),s(Xs,Rf),r(e,hr,a),m(Lt,e,a),r(e,ur,a),r(e,Pe,a),s(Pe,Uf),s(Pe,qn),s(qn,Yf),s(Pe,Gf),r(e,dr,a),m(Ot,e,a),r(e,cr,a),r(e,Zs,a),s(Zs,Vf),r(e,mr,a),m(Bt,e,a),r(e,_r,a),r(e,oe,a),s(oe,Fe),s(Fe,En),m(Wt,En,null),s(oe,Jf),s(oe,Tn),s(Tn,Kf),r(e,wr,a),r(e,Ce,a),s(Ce,Xf),s(Ce,ea),s(ea,Zf),s(Ce,eh),r(e,gr,a),m(Ht,e,a),r(e,br,a),r(e,De,a),s(De,th),s(De,ta),s(ta,sh),s(De,ah),r(e,$r,a),m(Qt,e,a),r(e,kr,a),r(e,Se,a),s(Se,nh),s(Se,sa),s(sa,oh),s(Se,rh),r(e,vr,a),m(Rt,e,a),r(e,jr,a),r(e,aa,a),s(aa,lh),r(e,yr,a),m(Ut,e,a),r(e,xr,a),r(e,re,a),s(re,Ne),s(Ne,An),m(Yt,An,null),s(re,ih),s(re,zn),s(zn,ph),r(e,qr,a),r(e,na,a),s(na,fh),r(e,Er,a),m(Gt,e,a),r(e,Tr,a),r(e,R,a),s(R,hh),s(R,Pn),s(Pn,uh),s(R,dh),s(R,Fn),s(Fn,ch),s(R,mh),r(e,Ar,a),m(Vt,e,a),r(e,zr,a),r(e,Ie,a),s(Ie,_h),s(Ie,oa),s(oa,wh),s(Ie,gh),r(e,Pr,a),m(Jt,e,a),r(e,Fr,a),r(e,ra,a),s(ra,bh),r(e,Cr,a),m(Kt,e,a),r(e,Dr,a),r(e,la,a),s(la,$h),r(e,Sr,a),m(Xt,e,a),r(e,Nr,a),r(e,Me,a),s(Me,kh),s(Me,Cn),s(Cn,vh),s(Me,jh),r(e,Ir,a),m(Zt,e,a),r(e,Mr,a),r(e,ia,a),r(e,Lr,a),r(e,le,a),s(le,Le),s(Le,Dn),m(es,Dn,null),s(le,yh),s(le,Sn),s(Sn,xh),r(e,Or,a),r(e,Oe,a),s(Oe,qh),s(Oe,ts),s(ts,Eh),s(Oe,Th),r(e,Br,a),m(Be,e,a),r(e,Wr,a),r(e,ie,a),s(ie,We),s(We,Nn),m(ss,Nn,null),s(ie,Ah),s(ie,In),s(In,zh),r(e,Hr,a),r(e,pa,a),s(pa,Ph),r(e,Qr,a),m(as,e,a),r(e,Rr,a),r(e,fa,a),s(fa,Fh),r(e,Ur,a),m(ns,e,a),r(e,Yr,a),r(e,pe,a),s(pe,He),s(He,Mn),m(os,Mn,null),s(pe,Ch),s(pe,Ln),s(Ln,Dh),r(e,Gr,a),r(e,Qe,a),s(Qe,Sh),s(Qe,ha),s(ha,Nh),s(Qe,Ih),r(e,Vr,a),m(rs,e,a),r(e,Jr,a),r(e,ua,a),s(ua,Mh),r(e,Kr,a),r(e,U,a),s(U,N),s(N,Lh),s(N,On),s(On,Oh),s(N,Bh),s(N,Bn),s(Bn,Wh),s(N,Hh),s(N,Wn),s(Wn,Qh),s(N,Rh),s(U,Uh),s(U,ls),s(ls,Yh),s(ls,Hn),s(Hn,Gh),s(ls,Vh),s(U,Jh),s(U,is),s(is,Kh),s(is,Qn),s(Qn,Xh),s(is,Zh),r(e,Xr,a),r(e,da,a),s(da,eu),r(e,Zr,a),m(ps,e,a),r(e,el,a),r(e,Re,a),s(Re,tu),s(Re,Rn),s(Rn,su),s(Re,au),r(e,tl,a),m(fs,e,a),r(e,sl,a),r(e,ca,a),s(ca,nu),r(e,al,a),m(hs,e,a),r(e,nl,a),r(e,fe,a),s(fe,Ue),s(Ue,Un),m(us,Un,null),s(fe,ou),s(fe,Yn),s(Yn,ru),r(e,ol,a),r(e,Ye,a),s(Ye,lu),s(Ye,ma),s(ma,iu),s(Ye,pu),r(e,rl,a),m(ds,e,a),r(e,ll,a),r(e,Ge,a),s(Ge,fu),s(Ge,_a),s(_a,hu),s(Ge,uu),r(e,il,a),m(cs,e,a),r(e,pl,a),r(e,Ve,a),s(Ve,du),s(Ve,wa),s(wa,cu),s(Ve,mu),r(e,fl,a),m(ms,e,a),r(e,hl,a),r(e,ga,a),s(ga,_u),r(e,ul,a),m(_s,e,a),r(e,dl,a),r(e,he,a),s(he,Je),s(Je,Gn),m(ws,Gn,null),s(he,wu),s(he,Vn),s(Vn,gu),r(e,cl,a),r(e,ba,a),s(ba,bu),r(e,ml,a),m(gs,e,a),r(e,_l,a),r(e,Y,a),s(Y,$u),s(Y,Jn),s(Jn,ku),s(Y,vu),s(Y,Kn),s(Kn,ju),s(Y,yu),r(e,wl,a),m(bs,e,a),r(e,gl,a),r(e,$a,a),s($a,xu),r(e,bl,a),m($s,e,a),r(e,$l,a),r(e,Ke,a),s(Ke,qu),s(Ke,ka),s(ka,Eu),s(Ke,Tu),r(e,kl,a),m(ks,e,a),r(e,vl,a),r(e,va,a),s(va,Au),r(e,jl,a),m(vs,e,a),r(e,yl,a),r(e,Xe,a),s(Xe,zu),s(Xe,Xn),s(Xn,Pu),s(Xe,Fu),r(e,xl,a),m(js,e,a),ql=!0},p(e,[a]){const ys={};a&2&&(ys.$$scope={dirty:a,ctx:e}),me.$set(ys);const Zn={};a&2&&(Zn.$$scope={dirty:a,ctx:e}),qe.$set(Zn);const eo={};a&2&&(eo.$$scope={dirty:a,ctx:e}),Be.$set(eo)},i(e){ql||(_(v.$$.fragment,e),_(A.$$.fragment,e),_(at.$$.fragment,e),_(nt.$$.fragment,e),_(me.$$.fragment,e),_(rt.$$.fragment,e),_(lt.$$.fragment,e),_(it.$$.fragment,e),_(pt.$$.fragment,e),_(ht.$$.fragment,e),_(ut.$$.fragment,e),_(dt.$$.fragment,e),_(ct.$$.fragment,e),_(mt.$$.fragment,e),_(_t.$$.fragment,e),_($t.$$.fragment,e),_(kt.$$.fragment,e),_(vt.$$.fragment,e),_(jt.$$.fragment,e),_(yt.$$.fragment,e),_(xt.$$.fragment,e),_(qt.$$.fragment,e),_(Et.$$.fragment,e),_(Tt.$$.fragment,e),_(qe.$$.fragment,e),_(zt.$$.fragment,e),_(Pt.$$.fragment,e),_(Ft.$$.fragment,e),_(Ct.$$.fragment,e),_(Dt.$$.fragment,e),_(St.$$.fragment,e),_(Nt.$$.fragment,e),_(Lt.$$.fragment,e),_(Ot.$$.fragment,e),_(Bt.$$.fragment,e),_(Wt.$$.fragment,e),_(Ht.$$.fragment,e),_(Qt.$$.fragment,e),_(Rt.$$.fragment,e),_(Ut.$$.fragment,e),_(Yt.$$.fragment,e),_(Gt.$$.fragment,e),_(Vt.$$.fragment,e),_(Jt.$$.fragment,e),_(Kt.$$.fragment,e),_(Xt.$$.fragment,e),_(Zt.$$.fragment,e),_(es.$$.fragment,e),_(Be.$$.fragment,e),_(ss.$$.fragment,e),_(as.$$.fragment,e),_(ns.$$.fragment,e),_(os.$$.fragment,e),_(rs.$$.fragment,e),_(ps.$$.fragment,e),_(fs.$$.fragment,e),_(hs.$$.fragment,e),_(us.$$.fragment,e),_(ds.$$.fragment,e),_(cs.$$.fragment,e),_(ms.$$.fragment,e),_(_s.$$.fragment,e),_(ws.$$.fragment,e),_(gs.$$.fragment,e),_(bs.$$.fragment,e),_($s.$$.fragment,e),_(ks.$$.fragment,e),_(vs.$$.fragment,e),_(js.$$.fragment,e),ql=!0)},o(e){w(v.$$.fragment,e),w(A.$$.fragment,e),w(at.$$.fragment,e),w(nt.$$.fragment,e),w(me.$$.fragment,e),w(rt.$$.fragment,e),w(lt.$$.fragment,e),w(it.$$.fragment,e),w(pt.$$.fragment,e),w(ht.$$.fragment,e),w(ut.$$.fragment,e),w(dt.$$.fragment,e),w(ct.$$.fragment,e),w(mt.$$.fragment,e),w(_t.$$.fragment,e),w($t.$$.fragment,e),w(kt.$$.fragment,e),w(vt.$$.fragment,e),w(jt.$$.fragment,e),w(yt.$$.fragment,e),w(xt.$$.fragment,e),w(qt.$$.fragment,e),w(Et.$$.fragment,e),w(Tt.$$.fragment,e),w(qe.$$.fragment,e),w(zt.$$.fragment,e),w(Pt.$$.fragment,e),w(Ft.$$.fragment,e),w(Ct.$$.fragment,e),w(Dt.$$.fragment,e),w(St.$$.fragment,e),w(Nt.$$.fragment,e),w(Lt.$$.fragment,e),w(Ot.$$.fragment,e),w(Bt.$$.fragment,e),w(Wt.$$.fragment,e),w(Ht.$$.fragment,e),w(Qt.$$.fragment,e),w(Rt.$$.fragment,e),w(Ut.$$.fragment,e),w(Yt.$$.fragment,e),w(Gt.$$.fragment,e),w(Vt.$$.fragment,e),w(Jt.$$.fragment,e),w(Kt.$$.fragment,e),w(Xt.$$.fragment,e),w(Zt.$$.fragment,e),w(es.$$.fragment,e),w(Be.$$.fragment,e),w(ss.$$.fragment,e),w(as.$$.fragment,e),w(ns.$$.fragment,e),w(os.$$.fragment,e),w(rs.$$.fragment,e),w(ps.$$.fragment,e),w(fs.$$.fragment,e),w(hs.$$.fragment,e),w(us.$$.fragment,e),w(ds.$$.fragment,e),w(cs.$$.fragment,e),w(ms.$$.fragment,e),w(_s.$$.fragment,e),w(ws.$$.fragment,e),w(gs.$$.fragment,e),w(bs.$$.fragment,e),w($s.$$.fragment,e),w(ks.$$.fragment,e),w(vs.$$.fragment,e),w(js.$$.fragment,e),ql=!1},d(e){t($),e&&t(q),e&&t(k),g(v),e&&t(j),g(A,e),e&&t(C),e&&t(qs),e&&t(to),e&&t(ue),e&&t(so),g(at,e),e&&t(ao),e&&t(Es),e&&t(no),e&&t(I),e&&t(oo),e&&t(Ps),e&&t(ro),e&&t(J),g(nt),e&&t(lo),e&&t(ce),e&&t(io),g(me,e),e&&t(po),e&&t(K),g(rt),e&&t(fo),e&&t(Fs),e&&t(ho),g(lt,e),e&&t(uo),e&&t(we),e&&t(co),g(it,e),e&&t(mo),e&&t(X),g(pt),e&&t(_o),e&&t(M),e&&t(wo),g(ht,e),e&&t(go),e&&t(Ds),e&&t(bo),g(ut,e),e&&t($o),e&&t(L),e&&t(ko),g(dt,e),e&&t(vo),e&&t(F),e&&t(jo),g(ct,e),e&&t(yo),e&&t(Z),g(mt),e&&t(xo),e&&t($e),e&&t(qo),g(_t,e),e&&t(Eo),e&&t(Ns),e&&t(To),e&&t(O),e&&t(Ao),g($t,e),e&&t(zo),e&&t(ee),g(kt),e&&t(Po),e&&t(Ls),e&&t(Fo),e&&t(D),e&&t(Co),g(vt,e),e&&t(Do),e&&t(S),e&&t(So),g(jt,e),e&&t(No),e&&t(Bs),e&&t(Io),g(yt,e),e&&t(Mo),e&&t(ve),e&&t(Lo),g(xt,e),e&&t(Oo),e&&t(Hs),e&&t(Bo),g(qt,e),e&&t(Wo),e&&t(je),e&&t(Ho),g(Et,e),e&&t(Qo),e&&t(Qs),e&&t(Ro),e&&t(te),g(Tt),e&&t(Uo),e&&t(xe),e&&t(Yo),g(qe,e),e&&t(Go),e&&t(se),g(zt),e&&t(Vo),e&&t(Rs),e&&t(Jo),g(Pt,e),e&&t(Ko),e&&t(Us),e&&t(Xo),g(Ft,e),e&&t(Zo),e&&t(Ys),e&&t(er),g(Ct,e),e&&t(tr),e&&t(Gs),e&&t(sr),e&&t(B),e&&t(ar),e&&t(ae),g(Dt),e&&t(nr),e&&t(Ae),e&&t(or),g(St,e),e&&t(rr),e&&t(ze),e&&t(lr),g(Nt,e),e&&t(ir),e&&t(H),e&&t(pr),e&&t(Q),e&&t(fr),e&&t(Xs),e&&t(hr),g(Lt,e),e&&t(ur),e&&t(Pe),e&&t(dr),g(Ot,e),e&&t(cr),e&&t(Zs),e&&t(mr),g(Bt,e),e&&t(_r),e&&t(oe),g(Wt),e&&t(wr),e&&t(Ce),e&&t(gr),g(Ht,e),e&&t(br),e&&t(De),e&&t($r),g(Qt,e),e&&t(kr),e&&t(Se),e&&t(vr),g(Rt,e),e&&t(jr),e&&t(aa),e&&t(yr),g(Ut,e),e&&t(xr),e&&t(re),g(Yt),e&&t(qr),e&&t(na),e&&t(Er),g(Gt,e),e&&t(Tr),e&&t(R),e&&t(Ar),g(Vt,e),e&&t(zr),e&&t(Ie),e&&t(Pr),g(Jt,e),e&&t(Fr),e&&t(ra),e&&t(Cr),g(Kt,e),e&&t(Dr),e&&t(la),e&&t(Sr),g(Xt,e),e&&t(Nr),e&&t(Me),e&&t(Ir),g(Zt,e),e&&t(Mr),e&&t(ia),e&&t(Lr),e&&t(le),g(es),e&&t(Or),e&&t(Oe),e&&t(Br),g(Be,e),e&&t(Wr),e&&t(ie),g(ss),e&&t(Hr),e&&t(pa),e&&t(Qr),g(as,e),e&&t(Rr),e&&t(fa),e&&t(Ur),g(ns,e),e&&t(Yr),e&&t(pe),g(os),e&&t(Gr),e&&t(Qe),e&&t(Vr),g(rs,e),e&&t(Jr),e&&t(ua),e&&t(Kr),e&&t(U),e&&t(Xr),e&&t(da),e&&t(Zr),g(ps,e),e&&t(el),e&&t(Re),e&&t(tl),g(fs,e),e&&t(sl),e&&t(ca),e&&t(al),g(hs,e),e&&t(nl),e&&t(fe),g(us),e&&t(ol),e&&t(Ye),e&&t(rl),g(ds,e),e&&t(ll),e&&t(Ge),e&&t(il),g(cs,e),e&&t(pl),e&&t(Ve),e&&t(fl),g(ms,e),e&&t(hl),e&&t(ga),e&&t(ul),g(_s,e),e&&t(dl),e&&t(he),g(ws),e&&t(cl),e&&t(ba),e&&t(ml),g(gs,e),e&&t(_l),e&&t(Y),e&&t(wl),g(bs,e),e&&t(gl),e&&t($a),e&&t(bl),g($s,e),e&&t($l),e&&t(Ke),e&&t(kl),g(ks,e),e&&t(vl),e&&t(va),e&&t(jl),g(vs,e),e&&t(yl),e&&t(Xe),e&&t(xl),g(js,e)}}}const vm={local:"how-to-finetune-a-model-for-common-downstream-tasks",sections:[{local:"sequence-classification-with-imdb-reviews",sections:[{local:"load-imdb-dataset",title:"Load IMDb dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-the-trainer-api",title:"Fine-tune with the Trainer API"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Sequence classification with IMDb reviews"},{local:"token-classification-with-wnut-emerging-entities",sections:[{local:"load-wnut-17-dataset",title:"Load WNUT 17 dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-the-trainer-api",title:"Fine-tune with the Trainer API"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Token classification with WNUT emerging entities"},{local:"question-answering-with-squad",sections:[{local:"load-squad-dataset",title:"Load SQuAD dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-the-trainer-api",title:"Fine-tune with the Trainer API"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Question Answering with SQuAD"}],title:"How to fine-tune a model for common downstream tasks"};function jm(V,$,q){let{fw:k}=$;return V.$$set=y=>{"fw"in y&&q(0,k=y.fw)},[k]}class zm extends dm{constructor($){super();cm(this,$,jm,km,mm,{fw:0})}}export{zm as default,vm as metadata};
241
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/pr_checks.mdx-69a387c3.js
import{S as As,i as Ps,s as Ts,e as s,k as c,w as m,t as l,M as xs,c as a,d as o,m as f,a as r,x as y,h as i,b as p,F as t,g as h,y as v,L as Cs,q as _,o as b,B as k}from"../chunks/vendor-4833417e.js";import{I as kt}from"../chunks/IconCopyLink-4b81c553.js";import{C as I}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function Ds(wo){let T,ue,w,P,Le,z,Eo,Re,go,$t,me,Ao,wt,E,qe,Po,To,Se,xo,Co,Ne,Do,Io,je,Oo,Et,ye,Lo,gt,ve,Ro,At,G,Pt,_e,qo,Tt,Y,xt,be,So,Ct,O,N,Be,K,No,Me,jo,Dt,x,Bo,Fe,Mo,Fo,He,Ho,Wo,It,ke,Jo,Ot,Q,Lt,$e,Uo,Rt,g,We,zo,Go,Je,Yo,Ko,Ue,Qo,Vo,ze,Xo,qt,j,Zo,Ge,el,tl,St,V,Nt,we,ol,jt,L,B,Ye,X,ll,Ke,il,Bt,C,sl,Qe,al,rl,Ve,nl,hl,Mt,Z,Ft,Ee,cl,Ht,R,M,Xe,ee,fl,Ze,pl,Wt,d,dl,et,ul,ml,tt,yl,vl,ot,_l,bl,lt,kl,$l,it,wl,El,st,gl,Al,Jt,te,Ut,D,Pl,at,Tl,xl,rt,Cl,Dl,zt,oe,Gt,ge,Il,Yt,le,Kt,Ae,Ol,Qt,q,F,nt,ie,Ll,ht,Rl,Vt,H,ql,ct,Sl,Nl,Xt,se,Zt,Pe,jl,eo,u,ae,Bl,ft,Ml,Fl,Hl,S,Wl,pt,Jl,Ul,dt,zl,Gl,Yl,re,Kl,ut,Ql,Vl,Xl,ne,Zl,mt,ei,ti,oi,he,li,yt,ii,si,ai,ce,ri,vt,ni,hi,to,Te,ci,oo,fe,lo,xe,fi,io,W,pe,pi,_t,di,ui,mi,de,yi,bt,vi,_i,so;return z=new kt({}),G=new I({props:{code:"pip install transformers[dev]",highlighted:"pip install transformers[dev]"}}),Y=new I({props:{code:"pip install -e .[dev]",highlighted:"pip install -e .[dev]"}}),K=new kt({}),Q=new I({props:{code:"python utils/tests_fetcher.py",highlighted:"python utils/tests_fetcher.py"}}),V=new I({props:{code:"python -m pytest -n 8 --dist=loadfile -rA -s $(cat test_list.txt)",highlighted:'python -m pytest -n 8 --dist=loadfile -rA -s $(<span class="hljs-built_in">cat</span> test_list.txt)'}}),X=new kt({}),Z=new I({props:{code:"make html",highlighted:"make html"}}),ee=new kt({}),te=new I({props:{code:"make style",highlighted:"make style"}}),oe=new I({props:{code:"make quality",highlighted:"make quality"}}),le=new I({props:{code:"make fixup",highlighted:"make fixup"}}),ie=new kt({}),se=new I({props:{code:"make repo-consistency",highlighted:"make repo-consistency"}}),fe=new I({props:{code:"make fix-copies",highlighted:"make fix-copies"}}),{c(){T=s("meta"),ue=c(),w=s("h1"),P=s("a"),Le=s("span"),m(z.$$.fragment),Eo=c(),Re=s("span"),go=l("Checks on a Pull Request"),$t=c(),me=s("p"),Ao=l("When you open a pull request on \u{1F917} Transformers, a fair number of checks will be run to make sure the patch you are adding is not breaking anything existing. Those checks are of four types:"),wt=c(),E=s("ul"),qe=s("li"),Po=l("regular tests"),To=c(),Se=s("li"),xo=l("documentation build"),Co=c(),Ne=s("li"),Do=l("code and documentation style"),Io=c(),je=s("li"),Oo=l("general repository consistency"),Et=c(),ye=s("p"),Lo=l("In this document, we will take a stab at explaining what those various checks are and the reason behind them, as well as how to debug them locally if one of them fails on your PR."),gt=c(),ve=s("p"),Ro=l("Note that they all require you to have a dev install:"),At=c(),m(G.$$.fragment),Pt=c(),_e=s("p"),qo=l("or for an editable install:"),Tt=c(),m(Y.$$.fragment),xt=c(),be=s("p"),So=l("inside the Transformers repo."),Ct=c(),O=s("h2"),N=s("a"),Be=s("span"),m(K.$$.fragment),No=c(),Me=s("span"),jo=l("Tests"),Dt=c(),x=s("p"),Bo=l("All the jobs that begin with "),Fe=s("code"),Mo=l("ci/circleci: run_tests_"),Fo=l(" run parts of the Transformers testing suite. Each of those jobs focuses on a part of the library in a certain environment: for instance "),He=s("code"),Ho=l("ci/circleci: run_tests_pipelines_tf"),Wo=l(" runs the pipelines test in an environment where TensorFlow only is installed."),It=c(),ke=s("p"),Jo=l("Note that to avoid running tests when there is no real change in the modules they are testing, only part of the test suite is run each time: a utility is run to determine the differences in the library between before and after the PR (what GitHub shows you in the \u201CFiles changes\u201D tab) and picks the tests impacted by that diff. That utility can be run locally with:"),Ot=c(),m(Q.$$.fragment),Lt=c(),$e=s("p"),Uo=l("from the root of the Transformers repo. It will:"),Rt=c(),g=s("ol"),We=s("li"),zo=l("Check for each file in the diff if the changes are in the code or only in comments or docstrings. Only the files with real code changes are kept."),Go=c(),Je=s("li"),Yo=l("Build an internal map that gives for each file of the source code of the library all the files it recursively impacts. Module A is said to impact module B if module B imports module A. For the recursive impact, we need a chain of modules going from module A to module B in which each module imports the previous one."),Ko=c(),Ue=s("li"),Qo=l("Apply this map on the files gathered in step 1, which gives us the list of model files impacted by the PR."),Vo=c(),ze=s("li"),Xo=l("Map each of those files to their corresponding test file(s) and get the list of tests to run."),qt=c(),j=s("p"),Zo=l("When executing the script locally, you should get the results of step 1, 3 and 4 printed and thus know which tests are run. The script will also create a file named "),Ge=s("code"),el=l("test_list.txt"),tl=l(" which contains the list of tests to run, and you can run them locally with the following command:"),St=c(),m(V.$$.fragment),Nt=c(),we=s("p"),ol=l("Just in case anything slipped through the cracks, the full test suite is also run daily."),jt=c(),L=s("h2"),B=s("a"),Ye=s("span"),m(X.$$.fragment),ll=c(),Ke=s("span"),il=l("Documentation build"),Bt=c(),C=s("p"),sl=l("The job "),Qe=s("code"),al=l("ci/circleci: build_doc"),rl=l(" runs a build of the documentation just to make sure everything will be okay once your PR is merged. If that steps fails, you can inspect it locally by going into the "),Ve=s("code"),nl=l("docs"),hl=l(" folder of the Transformers repo and then typing"),Mt=c(),m(Z.$$.fragment),Ft=c(),Ee=s("p"),cl=l("Sphinx is not known for its helpful error messages, so you might have to try a few things to really find the source of the error."),Ht=c(),R=s("h2"),M=s("a"),Xe=s("span"),m(ee.$$.fragment),fl=c(),Ze=s("span"),pl=l("Code and documentation style"),Wt=c(),d=s("p"),dl=l("Code formatting is applied to all the source files, the examples and the tests using "),et=s("code"),ul=l("black"),ml=l(" and "),tt=s("code"),yl=l("isort"),vl=l(". We also have a custom tool taking care of the formatting of docstrings and "),ot=s("code"),_l=l("rst"),bl=l(" files ("),lt=s("code"),kl=l("utils/style_doc.py"),$l=l("), as well as the order of the lazy imports performed in the Transformers "),it=s("code"),wl=l("__init__.py"),El=l(" files ("),st=s("code"),gl=l("utils/custom_init_isort.py"),Al=l("). All of this can be launched by executing"),Jt=c(),m(te.$$.fragment),Ut=c(),D=s("p"),Pl=l("The CI checks those have been applied inside the "),at=s("code"),Tl=l("ci/circleci: check_code_quality"),xl=l(" check. It also runs "),rt=s("code"),Cl=l("flake8"),Dl=l(", that will have a basic look at your code and will complain if it finds an undefined variable, or one that is not used. To run that check locally, use"),zt=c(),m(oe.$$.fragment),Gt=c(),ge=s("p"),Il=l("This can take a lot of time, so to run the same thing on only the files you modified in the current branch, run"),Yt=c(),m(le.$$.fragment),Kt=c(),Ae=s("p"),Ol=l("This last command will also run all the additional checks for the repository consistency. Let\u2019s have a look at them."),Qt=c(),q=s("h2"),F=s("a"),nt=s("span"),m(ie.$$.fragment),Ll=c(),ht=s("span"),Rl=l("Repository consistency"),Vt=c(),H=s("p"),ql=l("This regroups all the tests to make sure your PR leaves the repository in a good state, and is performed by the "),ct=s("code"),Sl=l("ci/circleci: check_repository_consistency"),Nl=l(" check. You can locally run that check by executing the following:"),Xt=c(),m(se.$$.fragment),Zt=c(),Pe=s("p"),jl=l("This checks that:"),eo=c(),u=s("ul"),ae=s("li"),Bl=l("All objects added to the init are documented (performed by "),ft=s("code"),Ml=l("utils/check_repo.py"),Fl=l(")"),Hl=c(),S=s("li"),Wl=l("All "),pt=s("code"),Jl=l("__init__.py"),Ul=l(" files have the same content in their two sections (performed by "),dt=s("code"),zl=l("utils/check_inits.py"),Gl=l(")"),Yl=c(),re=s("li"),Kl=l("All code identified as a copy from another module is consistent with the original (performed by "),ut=s("code"),Ql=l("utils/check_copies.py"),Vl=l(")"),Xl=c(),ne=s("li"),Zl=l("The translations of the READMEs and the index of the doc have the same model list as the main README (performed by "),mt=s("code"),ei=l("utils/check_copies.py"),ti=l(")"),oi=c(),he=s("li"),li=l("The auto-generated tables in the documentation are up to date (performed by "),yt=s("code"),ii=l("utils/check_table.py"),si=l(")"),ai=c(),ce=s("li"),ri=l("The library has all objects available even if not all optional dependencies are installed (performed by "),vt=s("code"),ni=l("utils/check_dummies.py"),hi=l(")"),to=c(),Te=s("p"),ci=l("Should this check fail, the first two items require manual fixing, the last four can be fixed automatically for you by running the command"),oo=c(),m(fe.$$.fragment),lo=c(),xe=s("p"),fi=l("Additional checks concern PRs that add new models, mainly that:"),io=c(),W=s("ul"),pe=s("li"),pi=l("All models added are in an Auto-mapping (performed by "),_t=s("code"),di=l("utils/check_repo.py"),ui=l(")"),mi=c(),de=s("li"),yi=l("All models are properly tested (performed by "),bt=s("code"),vi=l("utils/check_repo.py"),_i=l(")"),this.h()},l(e){const n=xs('[data-svelte="svelte-1phssyn"]',document.head);T=a(n,"META",{name:!0,content:!0}),n.forEach(o),ue=f(e),w=a(e,"H1",{class:!0});var ao=r(w);P=a(ao,"A",{id:!0,class:!0,href:!0});var bi=r(P);Le=a(bi,"SPAN",{});var ki=r(Le);y(z.$$.fragment,ki),ki.forEach(o),bi.forEach(o),Eo=f(ao),Re=a(ao,"SPAN",{});var $i=r(Re);go=i($i,"Checks on a Pull Request"),$i.forEach(o),ao.forEach(o),$t=f(e),me=a(e,"P",{});var wi=r(me);Ao=i(wi,"When you open a pull request on \u{1F917} Transformers, a fair number of checks will be run to make sure the patch you are adding is not breaking anything existing. Those checks are of four types:"),wi.forEach(o),wt=f(e),E=a(e,"UL",{});var J=r(E);qe=a(J,"LI",{});var Ei=r(qe);Po=i(Ei,"regular tests"),Ei.forEach(o),To=f(J),Se=a(J,"LI",{});var gi=r(Se);xo=i(gi,"documentation build"),gi.forEach(o),Co=f(J),Ne=a(J,"LI",{});var Ai=r(Ne);Do=i(Ai,"code and documentation style"),Ai.forEach(o),Io=f(J),je=a(J,"LI",{});var Pi=r(je);Oo=i(Pi,"general repository consistency"),Pi.forEach(o),J.forEach(o),Et=f(e),ye=a(e,"P",{});var Ti=r(ye);Lo=i(Ti,"In this document, we will take a stab at explaining what those various checks are and the reason behind them, as well as how to debug them locally if one of them fails on your PR."),Ti.forEach(o),gt=f(e),ve=a(e,"P",{});var xi=r(ve);Ro=i(xi,"Note that they all require you to have a dev install:"),xi.forEach(o),At=f(e),y(G.$$.fragment,e),Pt=f(e),_e=a(e,"P",{});var Ci=r(_e);qo=i(Ci,"or for an editable install:"),Ci.forEach(o),Tt=f(e),y(Y.$$.fragment,e),xt=f(e),be=a(e,"P",{});var Di=r(be);So=i(Di,"inside the Transformers repo."),Di.forEach(o),Ct=f(e),O=a(e,"H2",{class:!0});var ro=r(O);N=a(ro,"A",{id:!0,class:!0,href:!0});var Ii=r(N);Be=a(Ii,"SPAN",{});var Oi=r(Be);y(K.$$.fragment,Oi),Oi.forEach(o),Ii.forEach(o),No=f(ro),Me=a(ro,"SPAN",{});var Li=r(Me);jo=i(Li,"Tests"),Li.forEach(o),ro.forEach(o),Dt=f(e),x=a(e,"P",{});var Ce=r(x);Bo=i(Ce,"All the jobs that begin with "),Fe=a(Ce,"CODE",{});var Ri=r(Fe);Mo=i(Ri,"ci/circleci: run_tests_"),Ri.forEach(o),Fo=i(Ce," run parts of the Transformers testing suite. Each of those jobs focuses on a part of the library in a certain environment: for instance "),He=a(Ce,"CODE",{});var qi=r(He);Ho=i(qi,"ci/circleci: run_tests_pipelines_tf"),qi.forEach(o),Wo=i(Ce," runs the pipelines test in an environment where TensorFlow only is installed."),Ce.forEach(o),It=f(e),ke=a(e,"P",{});var Si=r(ke);Jo=i(Si,"Note that to avoid running tests when there is no real change in the modules they are testing, only part of the test suite is run each time: a utility is run to determine the differences in the library between before and after the PR (what GitHub shows you in the \u201CFiles changes\u201D tab) and picks the tests impacted by that diff. That utility can be run locally with:"),Si.forEach(o),Ot=f(e),y(Q.$$.fragment,e),Lt=f(e),$e=a(e,"P",{});var Ni=r($e);Uo=i(Ni,"from the root of the Transformers repo. It will:"),Ni.forEach(o),Rt=f(e),g=a(e,"OL",{});var U=r(g);We=a(U,"LI",{});var ji=r(We);zo=i(ji,"Check for each file in the diff if the changes are in the code or only in comments or docstrings. Only the files with real code changes are kept."),ji.forEach(o),Go=f(U),Je=a(U,"LI",{});var Bi=r(Je);Yo=i(Bi,"Build an internal map that gives for each file of the source code of the library all the files it recursively impacts. Module A is said to impact module B if module B imports module A. For the recursive impact, we need a chain of modules going from module A to module B in which each module imports the previous one."),Bi.forEach(o),Ko=f(U),Ue=a(U,"LI",{});var Mi=r(Ue);Qo=i(Mi,"Apply this map on the files gathered in step 1, which gives us the list of model files impacted by the PR."),Mi.forEach(o),Vo=f(U),ze=a(U,"LI",{});var Fi=r(ze);Xo=i(Fi,"Map each of those files to their corresponding test file(s) and get the list of tests to run."),Fi.forEach(o),U.forEach(o),qt=f(e),j=a(e,"P",{});var no=r(j);Zo=i(no,"When executing the script locally, you should get the results of step 1, 3 and 4 printed and thus know which tests are run. The script will also create a file named "),Ge=a(no,"CODE",{});var Hi=r(Ge);el=i(Hi,"test_list.txt"),Hi.forEach(o),tl=i(no," which contains the list of tests to run, and you can run them locally with the following command:"),no.forEach(o),St=f(e),y(V.$$.fragment,e),Nt=f(e),we=a(e,"P",{});var Wi=r(we);ol=i(Wi,"Just in case anything slipped through the cracks, the full test suite is also run daily."),Wi.forEach(o),jt=f(e),L=a(e,"H2",{class:!0});var ho=r(L);B=a(ho,"A",{id:!0,class:!0,href:!0});var Ji=r(B);Ye=a(Ji,"SPAN",{});var Ui=r(Ye);y(X.$$.fragment,Ui),Ui.forEach(o),Ji.forEach(o),ll=f(ho),Ke=a(ho,"SPAN",{});var zi=r(Ke);il=i(zi,"Documentation build"),zi.forEach(o),ho.forEach(o),Bt=f(e),C=a(e,"P",{});var De=r(C);sl=i(De,"The job "),Qe=a(De,"CODE",{});var Gi=r(Qe);al=i(Gi,"ci/circleci: build_doc"),Gi.forEach(o),rl=i(De," runs a build of the documentation just to make sure everything will be okay once your PR is merged. If that steps fails, you can inspect it locally by going into the "),Ve=a(De,"CODE",{});var Yi=r(Ve);nl=i(Yi,"docs"),Yi.forEach(o),hl=i(De," folder of the Transformers repo and then typing"),De.forEach(o),Mt=f(e),y(Z.$$.fragment,e),Ft=f(e),Ee=a(e,"P",{});var Ki=r(Ee);cl=i(Ki,"Sphinx is not known for its helpful error messages, so you might have to try a few things to really find the source of the error."),Ki.forEach(o),Ht=f(e),R=a(e,"H2",{class:!0});var co=r(R);M=a(co,"A",{id:!0,class:!0,href:!0});var Qi=r(M);Xe=a(Qi,"SPAN",{});var Vi=r(Xe);y(ee.$$.fragment,Vi),Vi.forEach(o),Qi.forEach(o),fl=f(co),Ze=a(co,"SPAN",{});var Xi=r(Ze);pl=i(Xi,"Code and documentation style"),Xi.forEach(o),co.forEach(o),Wt=f(e),d=a(e,"P",{});var $=r(d);dl=i($,"Code formatting is applied to all the source files, the examples and the tests using "),et=a($,"CODE",{});var Zi=r(et);ul=i(Zi,"black"),Zi.forEach(o),ml=i($," and "),tt=a($,"CODE",{});var es=r(tt);yl=i(es,"isort"),es.forEach(o),vl=i($,". We also have a custom tool taking care of the formatting of docstrings and "),ot=a($,"CODE",{});var ts=r(ot);_l=i(ts,"rst"),ts.forEach(o),bl=i($," files ("),lt=a($,"CODE",{});var os=r(lt);kl=i(os,"utils/style_doc.py"),os.forEach(o),$l=i($,"), as well as the order of the lazy imports performed in the Transformers "),it=a($,"CODE",{});var ls=r(it);wl=i(ls,"__init__.py"),ls.forEach(o),El=i($," files ("),st=a($,"CODE",{});var is=r(st);gl=i(is,"utils/custom_init_isort.py"),is.forEach(o),Al=i($,"). All of this can be launched by executing"),$.forEach(o),Jt=f(e),y(te.$$.fragment,e),Ut=f(e),D=a(e,"P",{});var Ie=r(D);Pl=i(Ie,"The CI checks those have been applied inside the "),at=a(Ie,"CODE",{});var ss=r(at);Tl=i(ss,"ci/circleci: check_code_quality"),ss.forEach(o),xl=i(Ie," check. It also runs "),rt=a(Ie,"CODE",{});var as=r(rt);Cl=i(as,"flake8"),as.forEach(o),Dl=i(Ie,", that will have a basic look at your code and will complain if it finds an undefined variable, or one that is not used. To run that check locally, use"),Ie.forEach(o),zt=f(e),y(oe.$$.fragment,e),Gt=f(e),ge=a(e,"P",{});var rs=r(ge);Il=i(rs,"This can take a lot of time, so to run the same thing on only the files you modified in the current branch, run"),rs.forEach(o),Yt=f(e),y(le.$$.fragment,e),Kt=f(e),Ae=a(e,"P",{});var ns=r(Ae);Ol=i(ns,"This last command will also run all the additional checks for the repository consistency. Let\u2019s have a look at them."),ns.forEach(o),Qt=f(e),q=a(e,"H2",{class:!0});var fo=r(q);F=a(fo,"A",{id:!0,class:!0,href:!0});var hs=r(F);nt=a(hs,"SPAN",{});var cs=r(nt);y(ie.$$.fragment,cs),cs.forEach(o),hs.forEach(o),Ll=f(fo),ht=a(fo,"SPAN",{});var fs=r(ht);Rl=i(fs,"Repository consistency"),fs.forEach(o),fo.forEach(o),Vt=f(e),H=a(e,"P",{});var po=r(H);ql=i(po,"This regroups all the tests to make sure your PR leaves the repository in a good state, and is performed by the "),ct=a(po,"CODE",{});var ps=r(ct);Sl=i(ps,"ci/circleci: check_repository_consistency"),ps.forEach(o),Nl=i(po," check. You can locally run that check by executing the following:"),po.forEach(o),Xt=f(e),y(se.$$.fragment,e),Zt=f(e),Pe=a(e,"P",{});var ds=r(Pe);jl=i(ds,"This checks that:"),ds.forEach(o),eo=f(e),u=a(e,"UL",{});var A=r(u);ae=a(A,"LI",{});var uo=r(ae);Bl=i(uo,"All objects added to the init are documented (performed by "),ft=a(uo,"CODE",{});var us=r(ft);Ml=i(us,"utils/check_repo.py"),us.forEach(o),Fl=i(uo,")"),uo.forEach(o),Hl=f(A),S=a(A,"LI",{});var Oe=r(S);Wl=i(Oe,"All "),pt=a(Oe,"CODE",{});var ms=r(pt);Jl=i(ms,"__init__.py"),ms.forEach(o),Ul=i(Oe," files have the same content in their two sections (performed by "),dt=a(Oe,"CODE",{});var ys=r(dt);zl=i(ys,"utils/check_inits.py"),ys.forEach(o),Gl=i(Oe,")"),Oe.forEach(o),Yl=f(A),re=a(A,"LI",{});var mo=r(re);Kl=i(mo,"All code identified as a copy from another module is consistent with the original (performed by "),ut=a(mo,"CODE",{});var vs=r(ut);Ql=i(vs,"utils/check_copies.py"),vs.forEach(o),Vl=i(mo,")"),mo.forEach(o),Xl=f(A),ne=a(A,"LI",{});var yo=r(ne);Zl=i(yo,"The translations of the READMEs and the index of the doc have the same model list as the main README (performed by "),mt=a(yo,"CODE",{});var _s=r(mt);ei=i(_s,"utils/check_copies.py"),_s.forEach(o),ti=i(yo,")"),yo.forEach(o),oi=f(A),he=a(A,"LI",{});var vo=r(he);li=i(vo,"The auto-generated tables in the documentation are up to date (performed by "),yt=a(vo,"CODE",{});var bs=r(yt);ii=i(bs,"utils/check_table.py"),bs.forEach(o),si=i(vo,")"),vo.forEach(o),ai=f(A),ce=a(A,"LI",{});var _o=r(ce);ri=i(_o,"The library has all objects available even if not all optional dependencies are installed (performed by "),vt=a(_o,"CODE",{});var ks=r(vt);ni=i(ks,"utils/check_dummies.py"),ks.forEach(o),hi=i(_o,")"),_o.forEach(o),A.forEach(o),to=f(e),Te=a(e,"P",{});var $s=r(Te);ci=i($s,"Should this check fail, the first two items require manual fixing, the last four can be fixed automatically for you by running the command"),$s.forEach(o),oo=f(e),y(fe.$$.fragment,e),lo=f(e),xe=a(e,"P",{});var ws=r(xe);fi=i(ws,"Additional checks concern PRs that add new models, mainly that:"),ws.forEach(o),io=f(e),W=a(e,"UL",{});var bo=r(W);pe=a(bo,"LI",{});var ko=r(pe);pi=i(ko,"All models added are in an Auto-mapping (performed by "),_t=a(ko,"CODE",{});var Es=r(_t);di=i(Es,"utils/check_repo.py"),Es.forEach(o),ui=i(ko,")"),ko.forEach(o),mi=f(bo),de=a(bo,"LI",{});var $o=r(de);yi=i($o,"All models are properly tested (performed by "),bt=a($o,"CODE",{});var gs=r(bt);vi=i(gs,"utils/check_repo.py"),gs.forEach(o),_i=i($o,")"),$o.forEach(o),bo.forEach(o),this.h()},h(){p(T,"name","hf:doc:metadata"),p(T,"content",JSON.stringify(Is)),p(P,"id","checks-on-a-pull-request"),p(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(P,"href","#checks-on-a-pull-request"),p(w,"class","relative group"),p(N,"id","tests"),p(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(N,"href","#tests"),p(O,"class","relative group"),p(B,"id","documentation-build"),p(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(B,"href","#documentation-build"),p(L,"class","relative group"),p(M,"id","code-and-documentation-style"),p(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(M,"href","#code-and-documentation-style"),p(R,"class","relative group"),p(F,"id","repository-consistency"),p(F,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(F,"href","#repository-consistency"),p(q,"class","relative group")},m(e,n){t(document.head,T),h(e,ue,n),h(e,w,n),t(w,P),t(P,Le),v(z,Le,null),t(w,Eo),t(w,Re),t(Re,go),h(e,$t,n),h(e,me,n),t(me,Ao),h(e,wt,n),h(e,E,n),t(E,qe),t(qe,Po),t(E,To),t(E,Se),t(Se,xo),t(E,Co),t(E,Ne),t(Ne,Do),t(E,Io),t(E,je),t(je,Oo),h(e,Et,n),h(e,ye,n),t(ye,Lo),h(e,gt,n),h(e,ve,n),t(ve,Ro),h(e,At,n),v(G,e,n),h(e,Pt,n),h(e,_e,n),t(_e,qo),h(e,Tt,n),v(Y,e,n),h(e,xt,n),h(e,be,n),t(be,So),h(e,Ct,n),h(e,O,n),t(O,N),t(N,Be),v(K,Be,null),t(O,No),t(O,Me),t(Me,jo),h(e,Dt,n),h(e,x,n),t(x,Bo),t(x,Fe),t(Fe,Mo),t(x,Fo),t(x,He),t(He,Ho),t(x,Wo),h(e,It,n),h(e,ke,n),t(ke,Jo),h(e,Ot,n),v(Q,e,n),h(e,Lt,n),h(e,$e,n),t($e,Uo),h(e,Rt,n),h(e,g,n),t(g,We),t(We,zo),t(g,Go),t(g,Je),t(Je,Yo),t(g,Ko),t(g,Ue),t(Ue,Qo),t(g,Vo),t(g,ze),t(ze,Xo),h(e,qt,n),h(e,j,n),t(j,Zo),t(j,Ge),t(Ge,el),t(j,tl),h(e,St,n),v(V,e,n),h(e,Nt,n),h(e,we,n),t(we,ol),h(e,jt,n),h(e,L,n),t(L,B),t(B,Ye),v(X,Ye,null),t(L,ll),t(L,Ke),t(Ke,il),h(e,Bt,n),h(e,C,n),t(C,sl),t(C,Qe),t(Qe,al),t(C,rl),t(C,Ve),t(Ve,nl),t(C,hl),h(e,Mt,n),v(Z,e,n),h(e,Ft,n),h(e,Ee,n),t(Ee,cl),h(e,Ht,n),h(e,R,n),t(R,M),t(M,Xe),v(ee,Xe,null),t(R,fl),t(R,Ze),t(Ze,pl),h(e,Wt,n),h(e,d,n),t(d,dl),t(d,et),t(et,ul),t(d,ml),t(d,tt),t(tt,yl),t(d,vl),t(d,ot),t(ot,_l),t(d,bl),t(d,lt),t(lt,kl),t(d,$l),t(d,it),t(it,wl),t(d,El),t(d,st),t(st,gl),t(d,Al),h(e,Jt,n),v(te,e,n),h(e,Ut,n),h(e,D,n),t(D,Pl),t(D,at),t(at,Tl),t(D,xl),t(D,rt),t(rt,Cl),t(D,Dl),h(e,zt,n),v(oe,e,n),h(e,Gt,n),h(e,ge,n),t(ge,Il),h(e,Yt,n),v(le,e,n),h(e,Kt,n),h(e,Ae,n),t(Ae,Ol),h(e,Qt,n),h(e,q,n),t(q,F),t(F,nt),v(ie,nt,null),t(q,Ll),t(q,ht),t(ht,Rl),h(e,Vt,n),h(e,H,n),t(H,ql),t(H,ct),t(ct,Sl),t(H,Nl),h(e,Xt,n),v(se,e,n),h(e,Zt,n),h(e,Pe,n),t(Pe,jl),h(e,eo,n),h(e,u,n),t(u,ae),t(ae,Bl),t(ae,ft),t(ft,Ml),t(ae,Fl),t(u,Hl),t(u,S),t(S,Wl),t(S,pt),t(pt,Jl),t(S,Ul),t(S,dt),t(dt,zl),t(S,Gl),t(u,Yl),t(u,re),t(re,Kl),t(re,ut),t(ut,Ql),t(re,Vl),t(u,Xl),t(u,ne),t(ne,Zl),t(ne,mt),t(mt,ei),t(ne,ti),t(u,oi),t(u,he),t(he,li),t(he,yt),t(yt,ii),t(he,si),t(u,ai),t(u,ce),t(ce,ri),t(ce,vt),t(vt,ni),t(ce,hi),h(e,to,n),h(e,Te,n),t(Te,ci),h(e,oo,n),v(fe,e,n),h(e,lo,n),h(e,xe,n),t(xe,fi),h(e,io,n),h(e,W,n),t(W,pe),t(pe,pi),t(pe,_t),t(_t,di),t(pe,ui),t(W,mi),t(W,de),t(de,yi),t(de,bt),t(bt,vi),t(de,_i),so=!0},p:Cs,i(e){so||(_(z.$$.fragment,e),_(G.$$.fragment,e),_(Y.$$.fragment,e),_(K.$$.fragment,e),_(Q.$$.fragment,e),_(V.$$.fragment,e),_(X.$$.fragment,e),_(Z.$$.fragment,e),_(ee.$$.fragment,e),_(te.$$.fragment,e),_(oe.$$.fragment,e),_(le.$$.fragment,e),_(ie.$$.fragment,e),_(se.$$.fragment,e),_(fe.$$.fragment,e),so=!0)},o(e){b(z.$$.fragment,e),b(G.$$.fragment,e),b(Y.$$.fragment,e),b(K.$$.fragment,e),b(Q.$$.fragment,e),b(V.$$.fragment,e),b(X.$$.fragment,e),b(Z.$$.fragment,e),b(ee.$$.fragment,e),b(te.$$.fragment,e),b(oe.$$.fragment,e),b(le.$$.fragment,e),b(ie.$$.fragment,e),b(se.$$.fragment,e),b(fe.$$.fragment,e),so=!1},d(e){o(T),e&&o(ue),e&&o(w),k(z),e&&o($t),e&&o(me),e&&o(wt),e&&o(E),e&&o(Et),e&&o(ye),e&&o(gt),e&&o(ve),e&&o(At),k(G,e),e&&o(Pt),e&&o(_e),e&&o(Tt),k(Y,e),e&&o(xt),e&&o(be),e&&o(Ct),e&&o(O),k(K),e&&o(Dt),e&&o(x),e&&o(It),e&&o(ke),e&&o(Ot),k(Q,e),e&&o(Lt),e&&o($e),e&&o(Rt),e&&o(g),e&&o(qt),e&&o(j),e&&o(St),k(V,e),e&&o(Nt),e&&o(we),e&&o(jt),e&&o(L),k(X),e&&o(Bt),e&&o(C),e&&o(Mt),k(Z,e),e&&o(Ft),e&&o(Ee),e&&o(Ht),e&&o(R),k(ee),e&&o(Wt),e&&o(d),e&&o(Jt),k(te,e),e&&o(Ut),e&&o(D),e&&o(zt),k(oe,e),e&&o(Gt),e&&o(ge),e&&o(Yt),k(le,e),e&&o(Kt),e&&o(Ae),e&&o(Qt),e&&o(q),k(ie),e&&o(Vt),e&&o(H),e&&o(Xt),k(se,e),e&&o(Zt),e&&o(Pe),e&&o(eo),e&&o(u),e&&o(to),e&&o(Te),e&&o(oo),k(fe,e),e&&o(lo),e&&o(xe),e&&o(io),e&&o(W)}}}const Is={local:"checks-on-a-pull-request",sections:[{local:"tests",title:"Tests"},{local:"documentation-build",title:"Documentation build"},{local:"code-and-documentation-style",title:"Code and documentation style"},{local:"repository-consistency",title:"Repository consistency"}],title:"Checks on a Pull Request"};function Os(wo,T,ue){let{fw:w}=T;return wo.$$set=P=>{"fw"in P&&ue(0,w=P.fw)},[w]}class Ns extends As{constructor(T){super();Ps(this,T,Os,Ds,Ts,{fw:0})}}export{Ns as default,Is as metadata};
242
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/bertology.mdx-1663513a.js
import{S as De,i as Me,s as $e,e as r,k as c,w as Ne,t as s,M as Oe,c as l,d as a,m as u,a as o,x as Ue,h as n,b as i,F as t,g as p,y as Ke,L as je,q as qe,o as He,B as We}from"../chunks/vendor-4833417e.js";import{I as Fe}from"../chunks/IconCopyLink-4b81c553.js";function Je(Q){let v,A,f,d,D,_,V,M,X,O,L,Y,U,m,k,Z,w,ee,te,G,ae,b,re,le,I,oe,T,se,K,y,ne,x,ie,he,j,g,$,fe,pe,N,de,ce,P,ue,B,ve,me,q,E,ge,R,ye,Ee,H;return _=new Fe({}),{c(){v=r("meta"),A=c(),f=r("h1"),d=r("a"),D=r("span"),Ne(_.$$.fragment),V=c(),M=r("span"),X=s("BERTology"),O=c(),L=r("p"),Y=s(`There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT (that some call \u201CBERTology\u201D). Some good examples of this field are:`),U=c(),m=r("ul"),k=r("li"),Z=s(`BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: `),w=r("a"),ee=s("https://arxiv.org/abs/1905.05950"),te=c(),G=r("li"),ae=s("Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: "),b=r("a"),re=s("https://arxiv.org/abs/1905.10650"),le=c(),I=r("li"),oe=s(`What Does BERT Look At? An Analysis of BERT\u2019s Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: `),T=r("a"),se=s("https://arxiv.org/abs/1906.04341"),K=c(),y=r("p"),ne=s(`In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (`),x=r("a"),ie=s("https://arxiv.org/abs/1905.10650"),he=s("):"),j=c(),g=r("ul"),$=r("li"),fe=s("accessing all the hidden-states of BERT/GPT/GPT-2,"),pe=c(),N=r("li"),de=s("accessing all the attention weights for each head of BERT/GPT/GPT-2,"),ce=c(),P=r("li"),ue=s(`retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in `),B=r("a"),ve=s("https://arxiv.org/abs/1905.10650"),me=s("."),q=c(),E=r("p"),ge=s("To help you understand and use these features, we have added a specific example script: "),R=r("a"),ye=s("bertology.py"),Ee=s(` while extract information and prune a model pre-trained on GLUE.`),this.h()},l(e){const h=Oe('[data-svelte="svelte-1phssyn"]',document.head);v=l(h,"META",{name:!0,content:!0}),h.forEach(a),A=u(e),f=l(e,"H1",{class:!0});var W=o(f);d=l(W,"A",{id:!0,class:!0,href:!0});var Te=o(d);D=l(Te,"SPAN",{});var xe=o(D);Ue(_.$$.fragment,xe),xe.forEach(a),Te.forEach(a),V=u(W),M=l(W,"SPAN",{});var Pe=o(M);X=n(Pe,"BERTology"),Pe.forEach(a),W.forEach(a),O=u(e),L=l(e,"P",{});var Be=o(L);Y=n(Be,`There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT (that some call \u201CBERTology\u201D). Some good examples of this field are:`),Be.forEach(a),U=u(e),m=l(e,"UL",{});var S=o(m);k=l(S,"LI",{});var _e=o(k);Z=n(_e,`BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: `),w=l(_e,"A",{href:!0,rel:!0});var Re=o(w);ee=n(Re,"https://arxiv.org/abs/1905.05950"),Re.forEach(a),_e.forEach(a),te=u(S),G=l(S,"LI",{});var we=o(G);ae=n(we,"Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: "),b=l(we,"A",{href:!0,rel:!0});var Ae=o(b);re=n(Ae,"https://arxiv.org/abs/1905.10650"),Ae.forEach(a),we.forEach(a),le=u(S),I=l(S,"LI",{});var be=o(I);oe=n(be,`What Does BERT Look At? An Analysis of BERT\u2019s Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: `),T=l(be,"A",{href:!0,rel:!0});var Le=o(T);se=n(Le,"https://arxiv.org/abs/1906.04341"),Le.forEach(a),be.forEach(a),S.forEach(a),K=u(e),y=l(e,"P",{});var F=o(y);ne=n(F,`In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (`),x=l(F,"A",{href:!0,rel:!0});var ke=o(x);ie=n(ke,"https://arxiv.org/abs/1905.10650"),ke.forEach(a),he=n(F,"):"),F.forEach(a),j=u(e),g=l(e,"UL",{});var C=o(g);$=l(C,"LI",{});var Ge=o($);fe=n(Ge,"accessing all the hidden-states of BERT/GPT/GPT-2,"),Ge.forEach(a),pe=u(C),N=l(C,"LI",{});var Ie=o(N);de=n(Ie,"accessing all the attention weights for each head of BERT/GPT/GPT-2,"),Ie.forEach(a),ce=u(C),P=l(C,"LI",{});var J=o(P);ue=n(J,`retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in `),B=l(J,"A",{href:!0,rel:!0});var Se=o(B);ve=n(Se,"https://arxiv.org/abs/1905.10650"),Se.forEach(a),me=n(J,"."),J.forEach(a),C.forEach(a),q=u(e),E=l(e,"P",{});var z=o(E);ge=n(z,"To help you understand and use these features, we have added a specific example script: "),R=l(z,"A",{href:!0,rel:!0});var Ce=o(R);ye=n(Ce,"bertology.py"),Ce.forEach(a),Ee=n(z,` while extract information and prune a model pre-trained on GLUE.`),z.forEach(a),this.h()},h(){i(v,"name","hf:doc:metadata"),i(v,"content",JSON.stringify(ze)),i(d,"id","bertology"),i(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(d,"href","#bertology"),i(f,"class","relative group"),i(w,"href","https://arxiv.org/abs/1905.05950"),i(w,"rel","nofollow"),i(b,"href","https://arxiv.org/abs/1905.10650"),i(b,"rel","nofollow"),i(T,"href","https://arxiv.org/abs/1906.04341"),i(T,"rel","nofollow"),i(x,"href","https://arxiv.org/abs/1905.10650"),i(x,"rel","nofollow"),i(B,"href","https://arxiv.org/abs/1905.10650"),i(B,"rel","nofollow"),i(R,"href","https://github.com/huggingface/transformers/tree/master/examples/research_projects/bertology/run_bertology.py"),i(R,"rel","nofollow")},m(e,h){t(document.head,v),p(e,A,h),p(e,f,h),t(f,d),t(d,D),Ke(_,D,null),t(f,V),t(f,M),t(M,X),p(e,O,h),p(e,L,h),t(L,Y),p(e,U,h),p(e,m,h),t(m,k),t(k,Z),t(k,w),t(w,ee),t(m,te),t(m,G),t(G,ae),t(G,b),t(b,re),t(m,le),t(m,I),t(I,oe),t(I,T),t(T,se),p(e,K,h),p(e,y,h),t(y,ne),t(y,x),t(x,ie),t(y,he),p(e,j,h),p(e,g,h),t(g,$),t($,fe),t(g,pe),t(g,N),t(N,de),t(g,ce),t(g,P),t(P,ue),t(P,B),t(B,ve),t(P,me),p(e,q,h),p(e,E,h),t(E,ge),t(E,R),t(R,ye),t(E,Ee),H=!0},p:je,i(e){H||(qe(_.$$.fragment,e),H=!0)},o(e){He(_.$$.fragment,e),H=!1},d(e){a(v),e&&a(A),e&&a(f),We(_),e&&a(O),e&&a(L),e&&a(U),e&&a(m),e&&a(K),e&&a(y),e&&a(j),e&&a(g),e&&a(q),e&&a(E)}}}const ze={local:"bertology",title:"BERTology"};function Qe(Q,v,A){let{fw:f}=v;return Q.$$set=d=>{"fw"in d&&A(0,f=d.fw)},[f]}class Ye extends De{constructor(v){super();Me(this,v,Qe,Je,$e,{fw:0})}}export{Ye as default,ze as metadata};
243
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/parallelism.mdx-f6d6de66.js
import{S as R3,i as Z3,s as C3,e as a,k as h,w as v,t as r,M as j3,c as i,d as t,m as f,a as o,x as P,h as s,b as d,N as K,F as l,g as p,y as w,L as F3,q as y,o as b,B as _}from"../chunks/vendor-4833417e.js";import{I as O}from"../chunks/IconCopyLink-4b81c553.js";import{C as Wh}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function B3(Vh){let $,El,U,M,Zi,Xe,Hh,Ci,Xh,ms,Q,me,ji,Ye,Yh,Fi,Jh,vs,gl,Kh,Ps,ve,Bi,Qh,ef,qi,tf,ws,Ll,lf,ys,Dl,af,bs,ee,Pe,Wi,Je,of,Vi,rf,_s,Gl,sf,Es,E,Hi,nf,pf,Xi,hf,ff,Yi,df,cf,Ji,uf,mf,Ki,vf,gs,te,we,Qi,Ke,Pf,eo,wf,Ls,x,yf,to,bf,_f,lo,Ef,gf,Ds,le,ye,ao,Qe,Lf,io,Df,Gs,ae,Gf,et,Uf,kf,Ul,E1,Us,be,Tf,oo,If,Sf,ks,kl,Af,Ts,tt,Is,Tl,Of,Ss,Il,Mf,As,lt,Os,Sl,$f,Ms,Al,xf,$s,at,xs,Ol,zf,zs,Ml,Nf,Ns,$l,Rf,Rs,xl,Zf,Zs,zl,Cf,Cs,Nl,jf,js,Rl,Ff,Fs,Zl,Bf,Bs,Cl,qf,qs,z,ro,Wf,Vf,so,Hf,Xf,no,Yf,Ws,jl,Jf,Vs,Fl,Kf,Hs,Bl,Qf,Xs,ql,ed,Ys,Wl,td,Js,N,Vl,it,ld,ad,id,Hl,ot,od,rd,sd,po,rt,ho,nd,pd,Ks,ie,_e,fo,st,hd,co,fd,Qs,Ee,dd,uo,cd,ud,en,Xl,md,tn,nt,ln,Yl,vd,an,Jl,Pd,on,Kl,wd,rn,Ql,yd,sn,ge,mo,bd,_d,vo,Ed,nn,ea,gd,pn,Le,Ld,pt,Dd,Gd,hn,ta,la,g1,fn,aa,Ud,dn,ia,kd,cn,R,Td,Po,Id,Sd,wo,Ad,Od,un,De,Md,yo,$d,xd,mn,Ge,zd,bo,Nd,Rd,vn,Z,Zd,_o,Cd,jd,Eo,Fd,Bd,Pn,oa,qd,wn,C,Wd,go,Vd,Hd,Lo,Xd,Yd,yn,k,Jd,Do,Kd,Qd,Go,ec,tc,Uo,lc,ac,bn,ra,ic,_n,sa,oc,En,T,ko,rc,sc,To,nc,pc,Io,hc,fc,So,dc,gn,na,cc,Ln,Ue,Ao,uc,mc,Oo,vc,Dn,pa,Pc,Gn,I,ht,wc,Mo,yc,bc,_c,ha,Ec,ft,gc,Lc,$o,Dc,Gc,xo,Uc,Un,fa,kc,kn,da,Tc,Tn,u,dt,ct,Ic,Sc,ut,Ac,Oc,zo,mt,Mc,$c,No,vt,xc,zc,ca,Pt,Nc,Rc,Zc,Ro,wt,Cc,jc,ua,yt,Fc,Bc,qc,ma,bt,Wc,Vc,In,ke,Hc,Zo,Xc,Yc,Sn,va,Jc,An,oe,Kc,_t,Qc,eu,Pa,L1,On,wa,tu,Mn,ya,lu,$n,Te,au,Co,iu,ou,xn,re,Ie,jo,Et,ru,Fo,su,zn,ba,nu,Nn,j,pu,gt,hu,fu,Lt,du,cu,Rn,F,uu,Bo,mu,vu,qo,Pu,wu,Zn,g,yu,Wo,bu,_u,Vo,Eu,gu,Ho,Lu,Du,Xo,Gu,Uu,Cn,Dt,ku,_a,D1,jn,c,Tu,Yo,Iu,Su,Jo,Au,Ou,Ko,Mu,$u,Qo,xu,zu,er,Nu,Ru,tr,Zu,Cu,lr,ju,Fu,Ea,G1,Fn,Gt,Bu,ga,U1,Bn,Ut,qu,La,k1,qn,Da,Wu,Wn,B,Vu,kt,Hu,Xu,Tt,Yu,Ju,Vn,Ga,Ku,Hn,Ua,Qu,Xn,ka,Ta,em,It,tm,Yn,Ia,lm,Jn,S,Sa,St,am,im,om,Aa,At,rm,sm,nm,Oa,Ot,pm,hm,fm,Ma,Mt,dm,cm,Kn,$a,um,Qn,q,ar,mm,vm,$t,Pm,xt,wm,ym,bm,xa,_m,zt,Em,ep,se,Se,ir,Nt,gm,or,Lm,tp,Ae,Dm,Rt,Gm,Um,lp,za,Na,T1,ap,Ra,km,ip,Za,Tm,op,Ca,Im,rp,L,rr,Zt,Sm,Am,sr,Ct,Om,Mm,nr,jt,$m,xm,pr,Ft,zm,Nm,hr,Bt,Rm,sp,ja,Zm,np,ne,Oe,fr,qt,Cm,dr,jm,pp,Fa,Fm,hp,Ba,qa,I1,fp,Me,Bm,Wt,qm,Wm,dp,Wa,Vm,cp,Va,Hm,up,D,Ha,Vt,Xm,Ym,Jm,cr,Ht,Km,Qm,ur,Xt,ev,tv,mr,Yt,lv,av,vr,Jt,iv,mp,Xa,ov,vp,pe,$e,Pr,Kt,rv,wr,sv,Pp,xe,nv,Ya,pv,hv,wp,Ja,fv,yp,Ka,dv,bp,ze,cv,yr,uv,mv,_p,Qa,vv,Ep,ei,Pv,gp,ti,wv,Lp,Ne,Re,Qt,yv,bv,el,_v,Ev,gv,br,tl,Lv,Dp,li,Dv,Gp,ai,_r,ll,Gv,Up,ii,Uv,kp,he,Ze,Er,al,kv,gr,Tv,Tp,il,ol,Iv,Sv,Ip,rl,Av,sl,Ov,Sp,oi,Mv,Ap,A,Lr,$v,xv,Dr,zv,Nv,Gr,Rv,Zv,Ur,Cv,Op,ri,jv,Mp,si,kr,Fv,$p,ni,Bv,xp,pi,Tr,qv,zp,hi,Wv,Np,fi,Ir,Vv,Rp,di,Hv,Zp,ci,Sr,Xv,Cp,ui,Yv,jp,mi,vi,S1,Fp,Pi,Jv,Bp,wi,Kv,qp,yi,Qv,Wp,Ce,eP,nl,tP,lP,Vp,fe,je,Ar,pl,aP,Or,iP,Hp,bi,oP,Xp,_i,Mr,rP,Yp,W,hl,$r,sP,nP,xr,zr,pP,hP,fl,Nr,fP,dP,dl,Rr,cP,uP,Zr,mP,vP,Cr,jr,PP,Jp,Ei,de,wP,cl,yP,bP,Fr,_P,EP,Kp,gi,Br,gP,Qp,V,ul,qr,LP,DP,ml,Wr,GP,UP,Vr,kP,TP,vl,Hr,IP,SP,ce,Xr,Yr,AP,OP,Jr,Kr,MP,$P,ue,Qr,xP,zP,es,NP,RP,ts,ZP,CP,Pl,ls,jP,FP,wl,as,BP,qP,is,WP,eh,Li,os,VP,th,Fe,yl,rs,HP,XP,bl,ss,YP,JP,ns,KP,QP,_l,ps,e1,t1,hs,fs,l1,lh;return Xe=new O({}),Ye=new O({}),Je=new O({}),Ke=new O({}),Qe=new O({}),tt=new Wh({props:{code:`La | Lb | Lc ---|----|--- a0 | b0 | c0 a1 | b1 | c1 a2 | b2 | c2`,highlighted:`La |<span class="hljs-string"> Lb </span>|<span class="hljs-string"> Lc ---</span>|<span class="hljs-string">----</span>|<span class="hljs-string">--- a0 </span>|<span class="hljs-string"> b0 </span>|<span class="hljs-string"> c0 a1 </span>|<span class="hljs-string"> b1 </span>|<span class="hljs-string"> c1 a2 </span>|<span class="hljs-string"> b2 </span>|<span class="hljs-string"> c2</span>`}}),lt=new Wh({props:{code:`GPU0: La | Lb | Lc ---|----|--- a0 | b0 | c0 GPU1: La | Lb | Lc ---|----|--- a1 | b1 | c1 GPU2: La | Lb | Lc ---|----|--- a2 | b2 | c2`,highlighted:`GPU0: La |<span class="hljs-string"> Lb </span>|<span class="hljs-string"> Lc ---</span>|<span class="hljs-string">----</span>|<span class="hljs-string">--- a0 </span>|<span class="hljs-string"> b0 </span>|<span class="hljs-string"> c0 GPU1: La </span>|<span class="hljs-string"> Lb </span>|<span class="hljs-string"> Lc ---</span>|<span class="hljs-string">----</span>|<span class="hljs-string">--- a1 </span>|<span class="hljs-string"> b1 </span>|<span class="hljs-string"> c1 GPU2: La </span>|<span class="hljs-string"> Lb </span>|<span class="hljs-string"> Lc ---</span>|<span class="hljs-string">----</span>|<span class="hljs-string">--- a2 </span>|<span class="hljs-string"> b2 </span>|<span class="hljs-string"> c2</span>`}}),at=new Wh({props:{code:`x0 => GPU0 x1 => GPU1 x2 => GPU2`,highlighted:`<span class="hljs-attribute">x0</span> <span class="hljs-operator">=</span>&gt; GPU0 <span class="hljs-attribute">x1</span> <span class="hljs-operator">=</span>&gt; GPU1 <span class="hljs-attribute">x2</span> <span class="hljs-operator">=</span>&gt; GPU2`}}),st=new O({}),nt=new Wh({props:{code:`=================== =================== | 0 | 1 | 2 | 3 | | 4 | 5 | 6 | 7 | =================== =================== gpu0 gpu1`,highlighted:`=================== =================== |<span class="hljs-string"> 0 </span>|<span class="hljs-string"> 1 </span>|<span class="hljs-string"> 2 </span>|<span class="hljs-string"> 3 </span>|<span class="hljs-string"> </span>|<span class="hljs-string"> 4 </span>|<span class="hljs-string"> 5 </span>|<span class="hljs-string"> 6 </span>|<span class="hljs-string"> 7 </span>| =================== =================== gpu0 gpu1`}}),Et=new O({}),Nt=new O({}),qt=new O({}),Kt=new O({}),al=new O({}),pl=new O({}),{c(){$=a("meta"),El=h(),U=a("h1"),M=a("a"),Zi=a("span"),v(Xe.$$.fragment),Hh=h(),Ci=a("span"),Xh=r("Model Parallelism"),ms=h(),Q=a("h2"),me=a("a"),ji=a("span"),v(Ye.$$.fragment),Yh=h(),Fi=a("span"),Jh=r("Parallelism overview"),vs=h(),gl=a("p"),Kh=r("In the modern machine learning the various approaches to parallelism are used to:"),Ps=h(),ve=a("ol"),Bi=a("li"),Qh=r("fit very large models onto limited hardware - e.g. t5-11b is 45GB in just model params"),ef=h(),qi=a("li"),tf=r("significantly speed up training - finish training that would take a year in hours"),ws=h(),Ll=a("p"),lf=r("We will first discuss in depth various 1D parallelism techniques and their pros and cons and then look at how they can be combined into 2D and 3D parallelism to enable an even faster training and to support even bigger models. Various other powerful alternative approaches will be presented."),ys=h(),Dl=a("p"),af=r("While the main concepts most likely will apply to any other framework, this article is focused on PyTorch-based implementations."),bs=h(),ee=a("h2"),Pe=a("a"),Wi=a("span"),v(Je.$$.fragment),of=h(),Vi=a("span"),rf=r("Concepts"),_s=h(),Gl=a("p"),sf=r("The following is the brief description of the main concepts that will be described later in depth in this document."),Es=h(),E=a("ol"),Hi=a("li"),nf=r("DataParallel (DP) - the same setup is replicated multiple times, and each being fed a slice of the data. The processing is done in parallel and all setups are synchronized at the end of each training step."),pf=h(),Xi=a("li"),hf=r("TensorParallel (TP) - each tensor is split up into multiple chunks, so instead of having the whole tensor reside on a single gpu, each shard of the tensor resides on its designated gpu. During processing each shard gets processed separately and in parallel on different GPUs and the results are synced at the end of the step. This is what one may call horizontal parallelism, as the splitting happens on horizontal level."),ff=h(),Yi=a("li"),df=r("PipelineParallel (PP) - the model is split up vertically (layer-level) across multiple GPUs, so that only one or several layers of the model are places on a single gpu. Each gpu processes in parallel different stages of the pipeline and working on a small chunk of the batch."),cf=h(),Ji=a("li"),uf=r("Zero Redundancy Optimizer (ZeRO) - Also performs sharding of the tensors somewhat similar to TP, except the whole tensor gets reconstructed in time for a forward or backward computation, therefore the model doesn\u2019t need to be modified. It also supports various offloading techniques to compensate for limited GPU memory."),mf=h(),Ki=a("li"),vf=r("Sharded DDP - is another name for the foundational ZeRO concept as used by various other implementations of ZeRO."),gs=h(),te=a("h2"),we=a("a"),Qi=a("span"),v(Ke.$$.fragment),Pf=h(),eo=a("span"),wf=r("Data Parallelism"),Ls=h(),x=a("p"),yf=r("Most users with just 2 GPUs already enjoy the increased training speed up thanks to "),to=a("code"),bf=r("DataParallel"),_f=r(" (DP) and "),lo=a("code"),Ef=r("DistributedDataParallel"),gf=r(" (DDP) that are almost trivial to use. This is a built-in feature of Pytorch."),Ds=h(),le=a("h2"),ye=a("a"),ao=a("span"),v(Qe.$$.fragment),Lf=h(),io=a("span"),Df=r("ZeRO Data Parallelism"),Gs=h(),ae=a("p"),Gf=r("ZeRO-powered data parallelism (ZeRO-DP) is described on the following diagram from this "),et=a("a"),Uf=r("blog post"),kf=h(),Ul=a("img"),Us=h(),be=a("p"),Tf=r("It can be difficult to wrap one\u2019s head around it, but in reality the concept is quite simple. This is just the usual "),oo=a("code"),If=r("DataParallel"),Sf=r(" (DP), except, instead of replicating the full model params, gradients and optimizer states, each GPU stores only a slice of it. And then at run-time when the full layer params are needed just for the given layer, all GPUs synchronize to give each other parts that they miss - this is it."),ks=h(),kl=a("p"),Af=r("Consider this simple model with 3 layers, where each layer has 3 params:"),Ts=h(),v(tt.$$.fragment),Is=h(),Tl=a("p"),Of=r("Layer La has weights a0, a1 and a2."),Ss=h(),Il=a("p"),Mf=r("If we have 3 GPUs, the Sharded DDP (= Zero-DP) splits the model onto 3 GPUs like so:"),As=h(),v(lt.$$.fragment),Os=h(),Sl=a("p"),$f=r("In a way this is the same horizontal slicing, as tensor parallelism, if you imagine the typical DNN diagram. Vertical slicing is where one puts whole layer-groups on different GPUs. But it\u2019s just the starting point."),Ms=h(),Al=a("p"),xf=r("Now each of these GPUs will get the usual mini-batch as it works in DP:"),$s=h(),v(at.$$.fragment),xs=h(),Ol=a("p"),zf=r("The inputs are unmodified - they think they are going to be processed by the normal model."),zs=h(),Ml=a("p"),Nf=r("First, the inputs hit the layer La."),Ns=h(),$l=a("p"),Rf=r("Let\u2019s focus just on GPU0: x0 needs a0, a1, a2 params to do its forward path, but GPU0 has only a0 - it gets sent a1 from GPU1 and a2 from GPU2, bringing all pieces of the model together."),Rs=h(),xl=a("p"),Zf=r("In parallel, GPU1 gets mini-batch x1 and it only has a1, but needs a0 and a2 params, so it gets those from GPU0 and GPU2."),Zs=h(),zl=a("p"),Cf=r("Same happens to GPU2 that gets input x2. It gets a0 and a1 from GPU0 and GPU1, and with its a2 it reconstructs the full tensor."),Cs=h(),Nl=a("p"),jf=r("All 3 GPUs get the full tensors reconstructed and a forward happens."),js=h(),Rl=a("p"),Ff=r("As soon as the calculation is done, the data that is no longer needed gets dropped - it\u2019s only used during the calculation. The reconstruction is done efficiently via a pre-fetch."),Fs=h(),Zl=a("p"),Bf=r("And the whole process is repeated for layer Lb, then Lc forward-wise, and then backward Lc -> Lb -> La."),Bs=h(),Cl=a("p"),qf=r("To me this sounds like an efficient group backpacking weight distribution strategy:"),qs=h(),z=a("ol"),ro=a("li"),Wf=r("person A carries the tent"),Vf=h(),so=a("li"),Hf=r("person B carries the stove"),Xf=h(),no=a("li"),Yf=r("person C carries the axe"),Ws=h(),jl=a("p"),Jf=r("Now each night they all share what they have with others and get from others what they don\u2019t have, and in the morning they pack up their allocated type of gear and continue on their way. This is Sharded DDP / Zero DP."),Vs=h(),Fl=a("p"),Kf=r("Compare this strategy to the simple one where each person has to carry their own tent, stove and axe, which would be far more inefficient. This is DataParallel (DP and DDP) in Pytorch."),Hs=h(),Bl=a("p"),Qf=r("While reading the literature on this topic you may encounter the following synonyms: Sharded, Partitioned."),Xs=h(),ql=a("p"),ed=r("If you pay close attention the way ZeRO partitions the model\u2019s weights - it looks very similar to tensor parallelism which will be discussed later. This is because it partitions/shards each layer\u2019s weights, unlike vertical model parallelism which is discussed next."),Ys=h(),Wl=a("p"),td=r("Implementations:"),Js=h(),N=a("ul"),Vl=a("li"),it=a("a"),ld=r("DeepSpeed"),ad=r(" ZeRO-DP stages 1+2+3"),id=h(),Hl=a("li"),ot=a("a"),od=r("Fairscale"),rd=r(" ZeRO-DP stages 1+2+3"),sd=h(),po=a("li"),rt=a("a"),ho=a("code"),nd=r("transformers"),pd=r(" integration"),Ks=h(),ie=a("h2"),_e=a("a"),fo=a("span"),v(st.$$.fragment),hd=h(),co=a("span"),fd=r("Naive Model Parallelism (Vertical) and Pipeline Parallelism"),Qs=h(),Ee=a("p"),dd=r("Naive Model Parallelism (MP) is where one spreads groups of model layers across multiple GPUs. The mechanism is relatively simple - switch the desired layers "),uo=a("code"),cd=r(".to()"),ud=r(" the desired devices and now whenever the data goes in and out those layers switch the data to the same device as the layer and leave the rest unmodified."),en=h(),Xl=a("p"),md=r("We refer to it as Vertical MP, because if you remember how most models are drawn, we slice the layers vertically. For example, if the following diagram shows an 8-layer model:"),tn=h(),v(nt.$$.fragment),ln=h(),Yl=a("p"),vd=r("we just sliced it in 2 vertically, placing layers 0-3 onto GPU0 and 4-7 to GPU1."),an=h(),Jl=a("p"),Pd=r("Now while data travels from layer 0 to 1, 1 to 2 and 2 to 3 this is just the normal model. But when data needs to pass from layer 3 to layer 4 it needs to travel from GPU0 to GPU1 which introduces a communication overhead. If the participating GPUs are on the same compute node (e.g. same physical machine) this copying is pretty fast, but if the GPUs are located on different compute nodes (e.g. multiple machines) the communication overhead could be significantly larger."),on=h(),Kl=a("p"),wd=r("Then layers 4 to 5 to 6 to 7 are as a normal model would have and when the 7th layer completes we often need to send the data back to layer 0 where the labels are (or alternatively send the labels to the last layer). Now the loss can be computed and the optimizer can do its work."),rn=h(),Ql=a("p"),yd=r("Problems:"),sn=h(),ge=a("ul"),mo=a("li"),bd=r("the main deficiency and why this one is called \u201Cnaive\u201D MP, is that all but one GPU is idle at any given moment. So if 4 GPUs are used, it\u2019s almost identical to quadrupling the amount of memory of a single GPU, and ignoring the rest of the hardware. Plus there is the overhead of copying the data between devices. So 4x 6GB cards will be able to accommodate the same size as 1x 24GB card using naive MP, except the latter will complete the training faster, since it doesn\u2019t have the data copying overhead. But, say, if you have 40GB cards and need to fit a 45GB model you can with 4x 40GB cards (but barely because of the gradient and optimizer states)"),_d=h(),vo=a("li"),Ed=r("shared embeddings may need to get copied back and forth between GPUs."),nn=h(),ea=a("p"),gd=r("Pipeline Parallelism (PP) is almost identical to a naive MP, but it solves the GPU idling problem, by chunking the incoming batch into micro-batches and artificially creating a pipeline, which allows different GPUs to concurrently participate in the computation process."),pn=h(),Le=a("p"),Ld=r("The following illustration from the "),pt=a("a"),Dd=r("GPipe paper"),Gd=r(" shows the naive MP on the top, and PP on the bottom:"),hn=h(),ta=a("p"),la=a("img"),fn=h(),aa=a("p"),Ud=r("It\u2019s easy to see from the bottom diagram how PP has less dead zones, where GPUs are idle. The idle parts are referred to as the \u201Cbubble\u201D."),dn=h(),ia=a("p"),kd=r("Both parts of the diagram show a parallelism that is of degree 4. That is 4 GPUs are participating in the pipeline. So there is the forward path of 4 pipe stages F0, F1, F2 and F3 and then the return reverse order backward path of B3, B2, B1 and B0."),cn=h(),R=a("p"),Td=r("PP introduces a new hyper-parameter to tune and it\u2019s "),Po=a("code"),Id=r("chunks"),Sd=r(" which defines how many chunks of data are sent in a sequence through the same pipe stage. For example, in the bottomw diagram you can see that "),wo=a("code"),Ad=r("chunks=4"),Od=r(". GPU0 performs the same forward path on chunk 0, 1, 2 and 3 (F0,0, F0,1, F0,2, F0,3) and then it waits for other GPUs to do their work and only when their work is starting to be complete, GPU0 starts to work again doing the backward path for chunks 3, 2, 1 and 0 (B0,3, B0,2, B0,1, B0,0)."),un=h(),De=a("p"),Md=r("Note that conceptually this is the same concept as gradient accumulation steps (GAS). Pytorch uses "),yo=a("code"),$d=r("chunks"),xd=r(", whereas DeepSpeed refers to the same hyper-parameter as GAS."),mn=h(),Ge=a("p"),zd=r("Because of the chunks, PP introduces the concept of micro-batches (MBS). DP splits the global data batch size into mini-batches, so if you have a DP degree of 4, a global batch size of 1024 gets split up into 4 mini-batches of 256 each (1024/4). And if the number of "),bo=a("code"),Nd=r("chunks"),Rd=r(" (or GAS) is 32 we end up with a micro-batch size of 8 (256/32). Each Pipeline stage works with a single micro-batch at a time."),vn=h(),Z=a("p"),Zd=r("To calculate the global batch size of the DP + PP setup we then do: "),_o=a("code"),Cd=r("mbs*chunks*dp_degree"),jd=r(" ("),Eo=a("code"),Fd=r("8*32*4=1024"),Bd=r(")."),Pn=h(),oa=a("p"),qd=r("Let\u2019s go back to the diagram."),wn=h(),C=a("p"),Wd=r("With "),go=a("code"),Vd=r("chunks=1"),Hd=r(" you end up with the naive MP, which is very inefficient. With a very large "),Lo=a("code"),Xd=r("chunks"),Yd=r(" value you end up with tiny micro-batch sizes which could be not every efficient either. So one has to experiment to find the value that leads to the highest efficient utilization of the gpus."),yn=h(),k=a("p"),Jd=r("While the diagram shows that there is a bubble of \u201Cdead\u201D time that can\u2019t be parallelized because the last "),Do=a("code"),Kd=r("forward"),Qd=r(" stage has to wait for "),Go=a("code"),ec=r("backward"),tc=r(" to complete the pipeline, the purpose of finding the best value for "),Uo=a("code"),lc=r("chunks"),ac=r(" is to enable a high concurrent GPU utilization across all participating GPUs which translates to minimizing the size of the bubble."),bn=h(),ra=a("p"),ic=r("There are 2 groups of solutions - the traditional Pipeline API and the more modern solutions that make things much easier for the end user."),_n=h(),sa=a("p"),oc=r("Traditional Pipeline API solutions:"),En=h(),T=a("ul"),ko=a("li"),rc=r("PyTorch"),sc=h(),To=a("li"),nc=r("FairScale"),pc=h(),Io=a("li"),hc=r("DeepSpeed"),fc=h(),So=a("li"),dc=r("Megatron-LM"),gn=h(),na=a("p"),cc=r("Modern solutions:"),Ln=h(),Ue=a("ul"),Ao=a("li"),uc=r("Varuna"),mc=h(),Oo=a("li"),vc=r("Sagemaker"),Dn=h(),pa=a("p"),Pc=r("Problems with traditional Pipeline API solutions:"),Gn=h(),I=a("ul"),ht=a("li"),wc=r("have to modify the model quite heavily, because Pipeline requires one to rewrite the normal flow of modules into a "),Mo=a("code"),yc=r("nn.Sequential"),bc=r(" sequence of the same, which may require changes to the design of the model."),_c=h(),ha=a("li"),Ec=r("currently the Pipeline API is very restricted. If you had a bunch of python variables being passed in the very first stage of the Pipeline, you will have to find a way around it. Currently, the pipeline interface requires either a single Tensor or a tuple of Tensors as the only input and output. These tensors must have a batch size as the very first dimension, since pipeline is going to chunk the mini batch into micro-batches. Possible improvements are being discussed here "),ft=a("a"),gc=r("https://github.com/pytorch/pytorch/pull/50693"),Lc=h(),$o=a("li"),Dc=r("conditional control flow at the level of pipe stages is not possible - e.g., Encoder-Decoder models like T5 require special workarounds to handle a conditional encoder stage."),Gc=h(),xo=a("li"),Uc=r("have to arrange each layer so that the output of one model becomes an input to the other model."),Un=h(),fa=a("p"),kc=r("We are yet to experiment with Varuna and SageMaker but their papers report that they have overcome the list of problems mentioned above and that they require much smaller changes to the user\u2019s model."),kn=h(),da=a("p"),Tc=r("Implementations:"),Tn=h(),u=a("ul"),dt=a("li"),ct=a("a"),Ic=r("Pytorch"),Sc=r(" (initial support in pytorch-1.8, and progressively getting improved in 1.9 and more so in 1.10). Some "),ut=a("a"),Ac=r("examples"),Oc=h(),zo=a("li"),mt=a("a"),Mc=r("FairScale"),$c=h(),No=a("li"),vt=a("a"),xc=r("DeepSpeed"),zc=h(),ca=a("li"),Pt=a("a"),Nc=r("Megatron-LM"),Rc=r(" has an internal implementation - no API."),Zc=h(),Ro=a("li"),wt=a("a"),Cc=r("Varuna"),jc=h(),ua=a("li"),yt=a("a"),Fc=r("SageMaker"),Bc=r(" - this is a proprietary solution that can only be used on AWS."),qc=h(),ma=a("li"),bt=a("a"),Wc=r("OSLO"),Vc=r(" - this is implemented based on the Hugging Face Transformers."),In=h(),ke=a("p"),Hc=r("\u{1F917} Transformers status: as of this writing none of the models supports full-PP. GPT2 and T5 models have naive MP support. The main obstacle is being unable to convert the models to "),Zo=a("code"),Xc=r("nn.Sequential"),Yc=r(" and have all the inputs to be Tensors. This is because currently the models include many features that make the conversion very complicated, and will need to be removed to accomplish that."),Sn=h(),va=a("p"),Jc=r("Other approaches:"),An=h(),oe=a("p"),Kc=r("DeepSpeed, Varuna and SageMaker use the concept of an "),_t=a("a"),Qc=r("Interleaved Pipeline"),eu=h(),Pa=a("img"),On=h(),wa=a("p"),tu=r("Here the bubble (idle time) is further minimized by prioritizing backward passes."),Mn=h(),ya=a("p"),lu=r("Varuna further tries to improve the schedule by using simulations to discover the most efficient scheduling."),$n=h(),Te=a("p"),au=r("OSLO has pipeline parallelism implementation based on the Transformers without "),Co=a("code"),iu=r("nn.Sequential"),ou=r(" converting."),xn=h(),re=a("h2"),Ie=a("a"),jo=a("span"),v(Et.$$.fragment),ru=h(),Fo=a("span"),su=r("Tensor Parallelism"),zn=h(),ba=a("p"),nu=r("In Tensor Parallelism each GPU processes only a slice of a tensor and only aggregates the full tensor for operations that require the whole thing."),Nn=h(),j=a("p"),pu=r("In this section we use concepts and diagrams from the "),gt=a("a"),hu=r("Megatron-LM"),fu=r(" paper: "),Lt=a("a"),du=r("Efficient Large-Scale Language Model Training on GPU Clusters"),cu=r("."),Rn=h(),F=a("p"),uu=r("The main building block of any transformer is a fully connected "),Bo=a("code"),mu=r("nn.Linear"),vu=r(" followed by a nonlinear activation "),qo=a("code"),Pu=r("GeLU"),wu=r("."),Zn=h(),g=a("p"),yu=r("Following the Megatron\u2019s paper notation, we can write the dot-product part of it as "),Wo=a("code"),bu=r("Y = GeLU(XA)"),_u=r(", where "),Vo=a("code"),Eu=r("X"),gu=r(" and "),Ho=a("code"),Lu=r("Y"),Du=r(" are the input and output vectors, and "),Xo=a("code"),Gu=r("A"),Uu=r(" is the weight matrix."),Cn=h(),Dt=a("p"),ku=r(`If we look at the computation in matrix form, it\u2019s easy to see how the matrix multiplication can be split between multiple GPUs: `),_a=a("img"),jn=h(),c=a("p"),Tu=r("If we split the weight matrix "),Yo=a("code"),Iu=r("A"),Su=r(" column-wise across "),Jo=a("code"),Au=r("N"),Ou=r(" GPUs and perform matrix multiplications "),Ko=a("code"),Mu=r("XA_1"),$u=r(" through "),Qo=a("code"),xu=r("XA_n"),zu=r(" in parallel, then we will end up with "),er=a("code"),Nu=r("N"),Ru=r(" output vectors "),tr=a("code"),Zu=r("Y_1, Y_2, ..., Y_n"),Cu=r(" which can be fed into "),lr=a("code"),ju=r("GeLU"),Fu=r(` independently: `),Ea=a("img"),Fn=h(),Gt=a("p"),Bu=r(`Using this principle, we can update an MLP of arbitrary depth, without the need for any synchronization between GPUs until the very end, where we need to reconstruct the output vector from shards. The Megatron-LM paper authors provide a helpful illustration for that: `),ga=a("img"),Bn=h(),Ut=a("p"),qu=r(`Parallelizing the multi-headed attention layers is even simpler, since they are already inherently parallel, due to having multiple independent heads! `),La=a("img"),qn=h(),Da=a("p"),Wu=r("Special considerations: TP requires very fast network, and therefore it\u2019s not advisable to do TP across more than one node. Practically, if a node has 4 GPUs, the highest TP degree is therefore 4. If you need a TP degree of 8, you need to use nodes that have at least 8 GPUs."),Wn=h(),B=a("p"),Vu=r("This section is based on the original much more "),kt=a("a"),Hu=r("detailed TP overview"),Xu=r(`. by `),Tt=a("a"),Yu=r("@anton-l"),Ju=r("."),Vn=h(),Ga=a("p"),Ku=r("SageMaker combines TP with DP for a more efficient processing."),Hn=h(),Ua=a("p"),Qu=r("Alternative names:"),Xn=h(),ka=a("ul"),Ta=a("li"),em=r("DeepSpeed calls it "),It=a("a"),tm=r("tensor slicing"),Yn=h(),Ia=a("p"),lm=r("Implementations:"),Jn=h(),S=a("ul"),Sa=a("li"),St=a("a"),am=r("Megatron-LM"),im=r(" has an internal implementation, as it\u2019s very model-specific"),om=h(),Aa=a("li"),At=a("a"),rm=r("parallelformers"),sm=r(" (only inference at the moment)"),nm=h(),Oa=a("li"),Ot=a("a"),pm=r("SageMaker"),hm=r(" - this is a proprietary solution that can only be used on AWS."),fm=h(),Ma=a("li"),Mt=a("a"),dm=r("OSLO"),cm=r(" has the tensor parallelism implementation based on the Transformers."),Kn=h(),$a=a("p"),um=r("\u{1F917} Transformers status:"),Qn=h(),q=a("ul"),ar=a("li"),mm=r("core: not yet implemented in the core"),vm=h(),$t=a("li"),Pm=r("but if you want inference "),xt=a("a"),wm=r("parallelformers"),ym=r(" provides this support for most of our models. So until this is implemented in the core you can use theirs. And hopefully training mode will be supported too."),bm=h(),xa=a("li"),_m=r("Deepspeed-Inference also supports our BERT, GPT-2, and GPT-Neo models in their super-fast CUDA-kernel-based inference mode, see more "),zt=a("a"),Em=r("here"),ep=h(),se=a("h2"),Se=a("a"),ir=a("span"),v(Nt.$$.fragment),gm=h(),or=a("span"),Lm=r("DP+PP"),tp=h(),Ae=a("p"),Dm=r("The following diagram from the DeepSpeed "),Rt=a("a"),Gm=r("pipeline tutorial"),Um=r(" demonstrates how one combines DP with PP."),lp=h(),za=a("p"),Na=a("img"),ap=h(),Ra=a("p"),km=r("Here it\u2019s important to see how DP rank 0 doesn\u2019t see GPU2 and DP rank 1 doesn\u2019t see GPU3. To DP there is just GPUs 0 and 1 where it feeds data as if there were just 2 GPUs. GPU0 \u201Csecretly\u201D offloads some of its load to GPU2 using PP. And GPU1 does the same by enlisting GPU3 to its aid."),ip=h(),Za=a("p"),Tm=r("Since each dimension requires at least 2 GPUs, here you\u2019d need at least 4 GPUs."),op=h(),Ca=a("p"),Im=r("Implementations:"),rp=h(),L=a("ul"),rr=a("li"),Zt=a("a"),Sm=r("DeepSpeed"),Am=h(),sr=a("li"),Ct=a("a"),Om=r("Megatron-LM"),Mm=h(),nr=a("li"),jt=a("a"),$m=r("Varuna"),xm=h(),pr=a("li"),Ft=a("a"),zm=r("SageMaker"),Nm=h(),hr=a("li"),Bt=a("a"),Rm=r("OSLO"),sp=h(),ja=a("p"),Zm=r("\u{1F917} Transformers status: not yet implemented"),np=h(),ne=a("h2"),Oe=a("a"),fr=a("span"),v(qt.$$.fragment),Cm=h(),dr=a("span"),jm=r("DP+PP+TP"),pp=h(),Fa=a("p"),Fm=r("To get an even more efficient training a 3D parallelism is used where PP is combined with TP and DP. This can be seen in the following diagram."),hp=h(),Ba=a("p"),qa=a("img"),fp=h(),Me=a("p"),Bm=r("This diagram is from a blog post "),Wt=a("a"),qm=r("3D parallelism: Scaling to trillion-parameter models"),Wm=r(", which is a good read as well."),dp=h(),Wa=a("p"),Vm=r("Since each dimension requires at least 2 GPUs, here you\u2019d need at least 8 GPUs."),cp=h(),Va=a("p"),Hm=r("Implementations:"),up=h(),D=a("ul"),Ha=a("li"),Vt=a("a"),Xm=r("DeepSpeed"),Ym=r(" - DeepSpeed also includes an even more efficient DP, which they call ZeRO-DP."),Jm=h(),cr=a("li"),Ht=a("a"),Km=r("Megatron-LM"),Qm=h(),ur=a("li"),Xt=a("a"),ev=r("Varuna"),tv=h(),mr=a("li"),Yt=a("a"),lv=r("SageMaker"),av=h(),vr=a("li"),Jt=a("a"),iv=r("OSLO"),mp=h(),Xa=a("p"),ov=r("\u{1F917} Transformers status: not yet implemented, since we have no PP and TP."),vp=h(),pe=a("h2"),$e=a("a"),Pr=a("span"),v(Kt.$$.fragment),rv=h(),wr=a("span"),sv=r("ZeRO DP+PP+TP"),Pp=h(),xe=a("p"),nv=r("One of the main features of DeepSpeed is ZeRO, which is a super-scalable extension of DP. It has already been discussed in "),Ya=a("a"),pv=r("ZeRO Data Parallelism"),hv=r(". Normally it\u2019s a standalone feature that doesn\u2019t require PP or TP. But it can be combined with PP and TP."),wp=h(),Ja=a("p"),fv=r("When ZeRO-DP is combined with PP (and optionally TP) it typically enables only ZeRO stage 1 (optimizer sharding)."),yp=h(),Ka=a("p"),dv=r("While it\u2019s theoretically possible to use ZeRO stage 2 (gradient sharding) with Pipeline Parallelism, it will have bad performance impacts. There would need to be an additional reduce-scatter collective for every micro-batch to aggregate the gradients before sharding, which adds a potentially significant communication overhead. By nature of Pipeline Parallelism, small micro-batches are used and instead the focus is on trying to balance arithmetic intensity (micro-batch size) with minimizing the Pipeline bubble (number of micro-batches). Therefore those communication costs are going to hurt."),bp=h(),ze=a("p"),cv=r("In addition, There are already fewer layers than normal due to PP and so the memory savings won\u2019t be huge. PP already reduces gradient size by "),yr=a("code"),uv=r("1/PP"),mv=r(", and so gradient sharding savings on top of that are less significant than pure DP."),_p=h(),Qa=a("p"),vv=r("ZeRO stage 3 is not a good choice either for the same reason - more inter-node communications required."),Ep=h(),ei=a("p"),Pv=r("And since we have ZeRO, the other benefit is ZeRO-Offload. Since this is stage 1 optimizer states can be offloaded to CPU."),gp=h(),ti=a("p"),wv=r("Implementations:"),Lp=h(),Ne=a("ul"),Re=a("li"),Qt=a("a"),yv=r("Megatron-DeepSpeed"),bv=r(" and "),el=a("a"),_v=r("Megatron-Deepspeed from BigScience"),Ev=r(", which is the fork of the former repo."),gv=h(),br=a("li"),tl=a("a"),Lv=r("OSLO"),Dp=h(),li=a("p"),Dv=r("Important papers:"),Gp=h(),ai=a("ul"),_r=a("li"),ll=a("a"),Gv=r("Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model"),Up=h(),ii=a("p"),Uv=r("\u{1F917} Transformers status: not yet implemented, since we have no PP and TP."),kp=h(),he=a("h2"),Ze=a("a"),Er=a("span"),v(al.$$.fragment),kv=h(),gr=a("span"),Tv=r("FlexFlow"),Tp=h(),il=a("p"),ol=a("a"),Iv=r("FlexFlow"),Sv=r(" also solves the parallelization problem in a slightly different approach."),Ip=h(),rl=a("p"),Av=r("Paper: "),sl=a("a"),Ov=r("\u201CBeyond Data and Model Parallelism for Deep Neural Networks\u201D by Zhihao Jia, Matei Zaharia, Alex Aiken"),Sp=h(),oi=a("p"),Mv=r("It performs a sort of 4D Parallelism over Sample-Operator-Attribute-Parameter."),Ap=h(),A=a("ol"),Lr=a("li"),$v=r("Sample = Data Parallelism (sample-wise parallel)"),xv=h(),Dr=a("li"),zv=r("Operator = Parallelize a single operation into several sub-operations"),Nv=h(),Gr=a("li"),Rv=r("Attribute = Data Parallelism (length-wise parallel)"),Zv=h(),Ur=a("li"),Cv=r("Parameter = Model Parallelism (regardless of dimension - horizontal or vertical)"),Op=h(),ri=a("p"),jv=r("Examples:"),Mp=h(),si=a("ul"),kr=a("li"),Fv=r("Sample"),$p=h(),ni=a("p"),Bv=r("Let\u2019s take 10 batches of sequence length 512. If we parallelize them by sample dimension into 2 devices, we get 10 x 512 which becomes be 5 x 2 x 512."),xp=h(),pi=a("ul"),Tr=a("li"),qv=r("Operator"),zp=h(),hi=a("p"),Wv=r("If we perform layer normalization, we compute std first and mean second, and then we can normalize data. Operator parallelism allows computing std and mean in parallel. So if we parallelize them by operator dimension into 2 devices (cuda:0, cuda:1), first we copy input data into both devices, and cuda:0 computes std, cuda:1 computes mean at the same time."),Np=h(),fi=a("ul"),Ir=a("li"),Vv=r("Attribute"),Rp=h(),di=a("p"),Hv=r("We have 10 batches of 512 length. If we parallelize them by attribute dimension into 2 devices, 10 x 512 will be 10 x 2 x 256."),Zp=h(),ci=a("ul"),Sr=a("li"),Xv=r("Parameter"),Cp=h(),ui=a("p"),Yv=r("It is similar with tensor model parallelism or naive layer-wise model parallelism."),jp=h(),mi=a("p"),vi=a("img"),Fp=h(),Pi=a("p"),Jv=r("The significance of this framework is that it takes resources like (1) GPU/TPU/CPU vs. (2) RAM/DRAM vs. (3) fast-intra-connect/slow-inter-connect and it automatically optimizes all these algorithmically deciding which parallelisation to use where."),Bp=h(),wi=a("p"),Kv=r("One very important aspect is that FlexFlow is designed for optimizing DNN parallelizations for models with static and fixed workloads, since models with dynamic behavior may prefer different parallelization strategies across iterations."),qp=h(),yi=a("p"),Qv=r("So the promise is very attractive - it runs a 30min simulation on the cluster of choice and it comes up with the best strategy to utilise this specific environment. If you add/remove/replace any parts it\u2019ll run and re-optimize the plan for that. And then you can train. A different setup will have its own custom optimization."),Wp=h(),Ce=a("p"),eP=r("\u{1F917} Transformers status: not yet integrated. We already have our models FX-trace-able via "),nl=a("a"),tP=r("transformers.utils.fx"),lP=r(", which is a prerequisite for FlexFlow, so someone needs to figure out what needs to be done to make FlexFlow work with our models."),Vp=h(),fe=a("h2"),je=a("a"),Ar=a("span"),v(pl.$$.fragment),aP=h(),Or=a("span"),iP=r("Which Strategy To Use When"),Hp=h(),bi=a("p"),oP=r("Here is a very rough outline at which parallelism strategy to use when. The first on each list is typically faster."),Xp=h(),_i=a("p"),Mr=a("strong"),rP=r("\u21E8 Single GPU"),Yp=h(),W=a("ul"),hl=a("li"),$r=a("p"),sP=r("Model fits onto a single GPU:"),nP=h(),xr=a("ol"),zr=a("li"),pP=r("Normal use"),hP=h(),fl=a("li"),Nr=a("p"),fP=r("Model doesn\u2019t fit onto a single GPU:"),dP=h(),dl=a("ol"),Rr=a("li"),cP=r("ZeRO + Offload CPU and optionally NVMe"),uP=h(),Zr=a("li"),mP=r("as above plus Memory Centric Tiling (see below for details) if the largest layer can\u2019t fit into a single GPU"),vP=h(),Cr=a("li"),jr=a("p"),PP=r("Largest Layer not fitting into a single GPU:"),Jp=h(),Ei=a("ol"),de=a("li"),wP=r("ZeRO - Enable "),cl=a("a"),yP=r("Memory Centric Tiling"),bP=r(" (MCT). It allows you to run arbitrarily large layers by automatically splitting them and executing them sequentially. MCT reduces the number of parameters that are live on a GPU, but it does not affect the activation memory. As this need is very rare as of this writing a manual override of "),Fr=a("code"),_P=r("torch.nn.Linear"),EP=r(" needs to be done by the user."),Kp=h(),gi=a("p"),Br=a("strong"),gP=r("\u21E8 Single Node / Multi-GPU"),Qp=h(),V=a("ul"),ul=a("li"),qr=a("p"),LP=r("Model fits onto a single GPU:"),DP=h(),ml=a("ol"),Wr=a("li"),GP=r("DDP - Distributed DP"),UP=h(),Vr=a("li"),kP=r("ZeRO - may or may not be faster depending on the situation and configuration used"),TP=h(),vl=a("li"),Hr=a("p"),IP=r("Model doesn\u2019t fit onto a single GPU:"),SP=h(),ce=a("ol"),Xr=a("li"),Yr=a("p"),AP=r("PP"),OP=h(),Jr=a("li"),Kr=a("p"),MP=r("ZeRO"),$P=h(),ue=a("li"),Qr=a("p"),xP=r("TP"),zP=h(),es=a("p"),NP=r("With very fast intra-node connectivity of NVLINK or NVSwitch all three should be mostly on par, without these PP will be faster than TP or ZeRO. The degree of TP may also make a difference. Best to experiment to find the winner on your particular setup."),RP=h(),ts=a("p"),ZP=r("TP is almost always used within a single node. That is TP size <= gpus per node."),CP=h(),Pl=a("li"),ls=a("p"),jP=r("Largest Layer not fitting into a single GPU:"),FP=h(),wl=a("ol"),as=a("li"),BP=r("If not using ZeRO - must use TP, as PP alone won\u2019t be able to fit."),qP=h(),is=a("li"),WP=r("With ZeRO see the same entry for \u201CSingle GPU\u201D above"),eh=h(),Li=a("p"),os=a("strong"),VP=r("\u21E8 Multi-Node / Multi-GPU"),th=h(),Fe=a("ul"),yl=a("li"),rs=a("p"),HP=r("When you have fast inter-node connectivity:"),XP=h(),bl=a("ol"),ss=a("li"),YP=r("ZeRO - as it requires close to no modifications to the model"),JP=h(),ns=a("li"),KP=r("PP+TP+DP - less communications, but requires massive changes to the model"),QP=h(),_l=a("li"),ps=a("p"),e1=r("when you have slow inter-node connectivity and still low on GPU memory:"),t1=h(),hs=a("ol"),fs=a("li"),l1=r("DP+PP+TP+ZeRO-1"),this.h()},l(e){const n=j3('[data-svelte="svelte-1phssyn"]',document.head);$=i(n,"META",{name:!0,content:!0}),n.forEach(t),El=f(e),U=i(e,"H1",{class:!0});var ah=o(U);M=i(ah,"A",{id:!0,class:!0,href:!0});var A1=o(M);Zi=i(A1,"SPAN",{});var O1=o(Zi);P(Xe.$$.fragment,O1),O1.forEach(t),A1.forEach(t),Hh=f(ah),Ci=i(ah,"SPAN",{});var M1=o(Ci);Xh=s(M1,"Model Parallelism"),M1.forEach(t),ah.forEach(t),ms=f(e),Q=i(e,"H2",{class:!0});var ih=o(Q);me=i(ih,"A",{id:!0,class:!0,href:!0});var $1=o(me);ji=i($1,"SPAN",{});var x1=o(ji);P(Ye.$$.fragment,x1),x1.forEach(t),$1.forEach(t),Yh=f(ih),Fi=i(ih,"SPAN",{});var z1=o(Fi);Jh=s(z1,"Parallelism overview"),z1.forEach(t),ih.forEach(t),vs=f(e),gl=i(e,"P",{});var N1=o(gl);Kh=s(N1,"In the modern machine learning the various approaches to parallelism are used to:"),N1.forEach(t),Ps=f(e),ve=i(e,"OL",{});var oh=o(ve);Bi=i(oh,"LI",{});var R1=o(Bi);Qh=s(R1,"fit very large models onto limited hardware - e.g. t5-11b is 45GB in just model params"),R1.forEach(t),ef=f(oh),qi=i(oh,"LI",{});var Z1=o(qi);tf=s(Z1,"significantly speed up training - finish training that would take a year in hours"),Z1.forEach(t),oh.forEach(t),ws=f(e),Ll=i(e,"P",{});var C1=o(Ll);lf=s(C1,"We will first discuss in depth various 1D parallelism techniques and their pros and cons and then look at how they can be combined into 2D and 3D parallelism to enable an even faster training and to support even bigger models. Various other powerful alternative approaches will be presented."),C1.forEach(t),ys=f(e),Dl=i(e,"P",{});var j1=o(Dl);af=s(j1,"While the main concepts most likely will apply to any other framework, this article is focused on PyTorch-based implementations."),j1.forEach(t),bs=f(e),ee=i(e,"H2",{class:!0});var rh=o(ee);Pe=i(rh,"A",{id:!0,class:!0,href:!0});var F1=o(Pe);Wi=i(F1,"SPAN",{});var B1=o(Wi);P(Je.$$.fragment,B1),B1.forEach(t),F1.forEach(t),of=f(rh),Vi=i(rh,"SPAN",{});var q1=o(Vi);rf=s(q1,"Concepts"),q1.forEach(t),rh.forEach(t),_s=f(e),Gl=i(e,"P",{});var W1=o(Gl);sf=s(W1,"The following is the brief description of the main concepts that will be described later in depth in this document."),W1.forEach(t),Es=f(e),E=i(e,"OL",{});var H=o(E);Hi=i(H,"LI",{});var V1=o(Hi);nf=s(V1,"DataParallel (DP) - the same setup is replicated multiple times, and each being fed a slice of the data. The processing is done in parallel and all setups are synchronized at the end of each training step."),V1.forEach(t),pf=f(H),Xi=i(H,"LI",{});var H1=o(Xi);hf=s(H1,"TensorParallel (TP) - each tensor is split up into multiple chunks, so instead of having the whole tensor reside on a single gpu, each shard of the tensor resides on its designated gpu. During processing each shard gets processed separately and in parallel on different GPUs and the results are synced at the end of the step. This is what one may call horizontal parallelism, as the splitting happens on horizontal level."),H1.forEach(t),ff=f(H),Yi=i(H,"LI",{});var X1=o(Yi);df=s(X1,"PipelineParallel (PP) - the model is split up vertically (layer-level) across multiple GPUs, so that only one or several layers of the model are places on a single gpu. Each gpu processes in parallel different stages of the pipeline and working on a small chunk of the batch."),X1.forEach(t),cf=f(H),Ji=i(H,"LI",{});var Y1=o(Ji);uf=s(Y1,"Zero Redundancy Optimizer (ZeRO) - Also performs sharding of the tensors somewhat similar to TP, except the whole tensor gets reconstructed in time for a forward or backward computation, therefore the model doesn\u2019t need to be modified. It also supports various offloading techniques to compensate for limited GPU memory."),Y1.forEach(t),mf=f(H),Ki=i(H,"LI",{});var J1=o(Ki);vf=s(J1,"Sharded DDP - is another name for the foundational ZeRO concept as used by various other implementations of ZeRO."),J1.forEach(t),H.forEach(t),gs=f(e),te=i(e,"H2",{class:!0});var sh=o(te);we=i(sh,"A",{id:!0,class:!0,href:!0});var K1=o(we);Qi=i(K1,"SPAN",{});var Q1=o(Qi);P(Ke.$$.fragment,Q1),Q1.forEach(t),K1.forEach(t),Pf=f(sh),eo=i(sh,"SPAN",{});var ew=o(eo);wf=s(ew,"Data Parallelism"),ew.forEach(t),sh.forEach(t),Ls=f(e),x=i(e,"P",{});var Di=o(x);yf=s(Di,"Most users with just 2 GPUs already enjoy the increased training speed up thanks to "),to=i(Di,"CODE",{});var tw=o(to);bf=s(tw,"DataParallel"),tw.forEach(t),_f=s(Di," (DP) and "),lo=i(Di,"CODE",{});var lw=o(lo);Ef=s(lw,"DistributedDataParallel"),lw.forEach(t),gf=s(Di," (DDP) that are almost trivial to use. This is a built-in feature of Pytorch."),Di.forEach(t),Ds=f(e),le=i(e,"H2",{class:!0});var nh=o(le);ye=i(nh,"A",{id:!0,class:!0,href:!0});var aw=o(ye);ao=i(aw,"SPAN",{});var iw=o(ao);P(Qe.$$.fragment,iw),iw.forEach(t),aw.forEach(t),Lf=f(nh),io=i(nh,"SPAN",{});var ow=o(io);Df=s(ow,"ZeRO Data Parallelism"),ow.forEach(t),nh.forEach(t),Gs=f(e),ae=i(e,"P",{});var ds=o(ae);Gf=s(ds,"ZeRO-powered data parallelism (ZeRO-DP) is described on the following diagram from this "),et=i(ds,"A",{href:!0,rel:!0});var rw=o(et);Uf=s(rw,"blog post"),rw.forEach(t),kf=f(ds),Ul=i(ds,"IMG",{src:!0,alt:!0}),ds.forEach(t),Us=f(e),be=i(e,"P",{});var ph=o(be);Tf=s(ph,"It can be difficult to wrap one\u2019s head around it, but in reality the concept is quite simple. This is just the usual "),oo=i(ph,"CODE",{});var sw=o(oo);If=s(sw,"DataParallel"),sw.forEach(t),Sf=s(ph," (DP), except, instead of replicating the full model params, gradients and optimizer states, each GPU stores only a slice of it. And then at run-time when the full layer params are needed just for the given layer, all GPUs synchronize to give each other parts that they miss - this is it."),ph.forEach(t),ks=f(e),kl=i(e,"P",{});var nw=o(kl);Af=s(nw,"Consider this simple model with 3 layers, where each layer has 3 params:"),nw.forEach(t),Ts=f(e),P(tt.$$.fragment,e),Is=f(e),Tl=i(e,"P",{});var pw=o(Tl);Of=s(pw,"Layer La has weights a0, a1 and a2."),pw.forEach(t),Ss=f(e),Il=i(e,"P",{});var hw=o(Il);Mf=s(hw,"If we have 3 GPUs, the Sharded DDP (= Zero-DP) splits the model onto 3 GPUs like so:"),hw.forEach(t),As=f(e),P(lt.$$.fragment,e),Os=f(e),Sl=i(e,"P",{});var fw=o(Sl);$f=s(fw,"In a way this is the same horizontal slicing, as tensor parallelism, if you imagine the typical DNN diagram. Vertical slicing is where one puts whole layer-groups on different GPUs. But it\u2019s just the starting point."),fw.forEach(t),Ms=f(e),Al=i(e,"P",{});var dw=o(Al);xf=s(dw,"Now each of these GPUs will get the usual mini-batch as it works in DP:"),dw.forEach(t),$s=f(e),P(at.$$.fragment,e),xs=f(e),Ol=i(e,"P",{});var cw=o(Ol);zf=s(cw,"The inputs are unmodified - they think they are going to be processed by the normal model."),cw.forEach(t),zs=f(e),Ml=i(e,"P",{});var uw=o(Ml);Nf=s(uw,"First, the inputs hit the layer La."),uw.forEach(t),Ns=f(e),$l=i(e,"P",{});var mw=o($l);Rf=s(mw,"Let\u2019s focus just on GPU0: x0 needs a0, a1, a2 params to do its forward path, but GPU0 has only a0 - it gets sent a1 from GPU1 and a2 from GPU2, bringing all pieces of the model together."),mw.forEach(t),Rs=f(e),xl=i(e,"P",{});var vw=o(xl);Zf=s(vw,"In parallel, GPU1 gets mini-batch x1 and it only has a1, but needs a0 and a2 params, so it gets those from GPU0 and GPU2."),vw.forEach(t),Zs=f(e),zl=i(e,"P",{});var Pw=o(zl);Cf=s(Pw,"Same happens to GPU2 that gets input x2. It gets a0 and a1 from GPU0 and GPU1, and with its a2 it reconstructs the full tensor."),Pw.forEach(t),Cs=f(e),Nl=i(e,"P",{});var ww=o(Nl);jf=s(ww,"All 3 GPUs get the full tensors reconstructed and a forward happens."),ww.forEach(t),js=f(e),Rl=i(e,"P",{});var yw=o(Rl);Ff=s(yw,"As soon as the calculation is done, the data that is no longer needed gets dropped - it\u2019s only used during the calculation. The reconstruction is done efficiently via a pre-fetch."),yw.forEach(t),Fs=f(e),Zl=i(e,"P",{});var bw=o(Zl);Bf=s(bw,"And the whole process is repeated for layer Lb, then Lc forward-wise, and then backward Lc -> Lb -> La."),bw.forEach(t),Bs=f(e),Cl=i(e,"P",{});var _w=o(Cl);qf=s(_w,"To me this sounds like an efficient group backpacking weight distribution strategy:"),_w.forEach(t),qs=f(e),z=i(e,"OL",{});var Gi=o(z);ro=i(Gi,"LI",{});var Ew=o(ro);Wf=s(Ew,"person A carries the tent"),Ew.forEach(t),Vf=f(Gi),so=i(Gi,"LI",{});var gw=o(so);Hf=s(gw,"person B carries the stove"),gw.forEach(t),Xf=f(Gi),no=i(Gi,"LI",{});var Lw=o(no);Yf=s(Lw,"person C carries the axe"),Lw.forEach(t),Gi.forEach(t),Ws=f(e),jl=i(e,"P",{});var Dw=o(jl);Jf=s(Dw,"Now each night they all share what they have with others and get from others what they don\u2019t have, and in the morning they pack up their allocated type of gear and continue on their way. This is Sharded DDP / Zero DP."),Dw.forEach(t),Vs=f(e),Fl=i(e,"P",{});var Gw=o(Fl);Kf=s(Gw,"Compare this strategy to the simple one where each person has to carry their own tent, stove and axe, which would be far more inefficient. This is DataParallel (DP and DDP) in Pytorch."),Gw.forEach(t),Hs=f(e),Bl=i(e,"P",{});var Uw=o(Bl);Qf=s(Uw,"While reading the literature on this topic you may encounter the following synonyms: Sharded, Partitioned."),Uw.forEach(t),Xs=f(e),ql=i(e,"P",{});var kw=o(ql);ed=s(kw,"If you pay close attention the way ZeRO partitions the model\u2019s weights - it looks very similar to tensor parallelism which will be discussed later. This is because it partitions/shards each layer\u2019s weights, unlike vertical model parallelism which is discussed next."),kw.forEach(t),Ys=f(e),Wl=i(e,"P",{});var Tw=o(Wl);td=s(Tw,"Implementations:"),Tw.forEach(t),Js=f(e),N=i(e,"UL",{});var Ui=o(N);Vl=i(Ui,"LI",{});var a1=o(Vl);it=i(a1,"A",{href:!0,rel:!0});var Iw=o(it);ld=s(Iw,"DeepSpeed"),Iw.forEach(t),ad=s(a1," ZeRO-DP stages 1+2+3"),a1.forEach(t),id=f(Ui),Hl=i(Ui,"LI",{});var i1=o(Hl);ot=i(i1,"A",{href:!0,rel:!0});var Sw=o(ot);od=s(Sw,"Fairscale"),Sw.forEach(t),rd=s(i1," ZeRO-DP stages 1+2+3"),i1.forEach(t),sd=f(Ui),po=i(Ui,"LI",{});var Aw=o(po);rt=i(Aw,"A",{href:!0});var o1=o(rt);ho=i(o1,"CODE",{});var Ow=o(ho);nd=s(Ow,"transformers"),Ow.forEach(t),pd=s(o1," integration"),o1.forEach(t),Aw.forEach(t),Ui.forEach(t),Ks=f(e),ie=i(e,"H2",{class:!0});var hh=o(ie);_e=i(hh,"A",{id:!0,class:!0,href:!0});var Mw=o(_e);fo=i(Mw,"SPAN",{});var $w=o(fo);P(st.$$.fragment,$w),$w.forEach(t),Mw.forEach(t),hd=f(hh),co=i(hh,"SPAN",{});var xw=o(co);fd=s(xw,"Naive Model Parallelism (Vertical) and Pipeline Parallelism"),xw.forEach(t),hh.forEach(t),Qs=f(e),Ee=i(e,"P",{});var fh=o(Ee);dd=s(fh,"Naive Model Parallelism (MP) is where one spreads groups of model layers across multiple GPUs. The mechanism is relatively simple - switch the desired layers "),uo=i(fh,"CODE",{});var zw=o(uo);cd=s(zw,".to()"),zw.forEach(t),ud=s(fh," the desired devices and now whenever the data goes in and out those layers switch the data to the same device as the layer and leave the rest unmodified."),fh.forEach(t),en=f(e),Xl=i(e,"P",{});var Nw=o(Xl);md=s(Nw,"We refer to it as Vertical MP, because if you remember how most models are drawn, we slice the layers vertically. For example, if the following diagram shows an 8-layer model:"),Nw.forEach(t),tn=f(e),P(nt.$$.fragment,e),ln=f(e),Yl=i(e,"P",{});var Rw=o(Yl);vd=s(Rw,"we just sliced it in 2 vertically, placing layers 0-3 onto GPU0 and 4-7 to GPU1."),Rw.forEach(t),an=f(e),Jl=i(e,"P",{});var Zw=o(Jl);Pd=s(Zw,"Now while data travels from layer 0 to 1, 1 to 2 and 2 to 3 this is just the normal model. But when data needs to pass from layer 3 to layer 4 it needs to travel from GPU0 to GPU1 which introduces a communication overhead. If the participating GPUs are on the same compute node (e.g. same physical machine) this copying is pretty fast, but if the GPUs are located on different compute nodes (e.g. multiple machines) the communication overhead could be significantly larger."),Zw.forEach(t),on=f(e),Kl=i(e,"P",{});var Cw=o(Kl);wd=s(Cw,"Then layers 4 to 5 to 6 to 7 are as a normal model would have and when the 7th layer completes we often need to send the data back to layer 0 where the labels are (or alternatively send the labels to the last layer). Now the loss can be computed and the optimizer can do its work."),Cw.forEach(t),rn=f(e),Ql=i(e,"P",{});var jw=o(Ql);yd=s(jw,"Problems:"),jw.forEach(t),sn=f(e),ge=i(e,"UL",{});var dh=o(ge);mo=i(dh,"LI",{});var Fw=o(mo);bd=s(Fw,"the main deficiency and why this one is called \u201Cnaive\u201D MP, is that all but one GPU is idle at any given moment. So if 4 GPUs are used, it\u2019s almost identical to quadrupling the amount of memory of a single GPU, and ignoring the rest of the hardware. Plus there is the overhead of copying the data between devices. So 4x 6GB cards will be able to accommodate the same size as 1x 24GB card using naive MP, except the latter will complete the training faster, since it doesn\u2019t have the data copying overhead. But, say, if you have 40GB cards and need to fit a 45GB model you can with 4x 40GB cards (but barely because of the gradient and optimizer states)"),Fw.forEach(t),_d=f(dh),vo=i(dh,"LI",{});var Bw=o(vo);Ed=s(Bw,"shared embeddings may need to get copied back and forth between GPUs."),Bw.forEach(t),dh.forEach(t),nn=f(e),ea=i(e,"P",{});var qw=o(ea);gd=s(qw,"Pipeline Parallelism (PP) is almost identical to a naive MP, but it solves the GPU idling problem, by chunking the incoming batch into micro-batches and artificially creating a pipeline, which allows different GPUs to concurrently participate in the computation process."),qw.forEach(t),pn=f(e),Le=i(e,"P",{});var ch=o(Le);Ld=s(ch,"The following illustration from the "),pt=i(ch,"A",{href:!0,rel:!0});var Ww=o(pt);Dd=s(Ww,"GPipe paper"),Ww.forEach(t),Gd=s(ch," shows the naive MP on the top, and PP on the bottom:"),ch.forEach(t),hn=f(e),ta=i(e,"P",{});var Vw=o(ta);la=i(Vw,"IMG",{src:!0,alt:!0}),Vw.forEach(t),fn=f(e),aa=i(e,"P",{});var Hw=o(aa);Ud=s(Hw,"It\u2019s easy to see from the bottom diagram how PP has less dead zones, where GPUs are idle. The idle parts are referred to as the \u201Cbubble\u201D."),Hw.forEach(t),dn=f(e),ia=i(e,"P",{});var Xw=o(ia);kd=s(Xw,"Both parts of the diagram show a parallelism that is of degree 4. That is 4 GPUs are participating in the pipeline. So there is the forward path of 4 pipe stages F0, F1, F2 and F3 and then the return reverse order backward path of B3, B2, B1 and B0."),Xw.forEach(t),cn=f(e),R=i(e,"P",{});var ki=o(R);Td=s(ki,"PP introduces a new hyper-parameter to tune and it\u2019s "),Po=i(ki,"CODE",{});var Yw=o(Po);Id=s(Yw,"chunks"),Yw.forEach(t),Sd=s(ki," which defines how many chunks of data are sent in a sequence through the same pipe stage. For example, in the bottomw diagram you can see that "),wo=i(ki,"CODE",{});var Jw=o(wo);Ad=s(Jw,"chunks=4"),Jw.forEach(t),Od=s(ki,". GPU0 performs the same forward path on chunk 0, 1, 2 and 3 (F0,0, F0,1, F0,2, F0,3) and then it waits for other GPUs to do their work and only when their work is starting to be complete, GPU0 starts to work again doing the backward path for chunks 3, 2, 1 and 0 (B0,3, B0,2, B0,1, B0,0)."),ki.forEach(t),un=f(e),De=i(e,"P",{});var uh=o(De);Md=s(uh,"Note that conceptually this is the same concept as gradient accumulation steps (GAS). Pytorch uses "),yo=i(uh,"CODE",{});var Kw=o(yo);$d=s(Kw,"chunks"),Kw.forEach(t),xd=s(uh,", whereas DeepSpeed refers to the same hyper-parameter as GAS."),uh.forEach(t),mn=f(e),Ge=i(e,"P",{});var mh=o(Ge);zd=s(mh,"Because of the chunks, PP introduces the concept of micro-batches (MBS). DP splits the global data batch size into mini-batches, so if you have a DP degree of 4, a global batch size of 1024 gets split up into 4 mini-batches of 256 each (1024/4). And if the number of "),bo=i(mh,"CODE",{});var Qw=o(bo);Nd=s(Qw,"chunks"),Qw.forEach(t),Rd=s(mh," (or GAS) is 32 we end up with a micro-batch size of 8 (256/32). Each Pipeline stage works with a single micro-batch at a time."),mh.forEach(t),vn=f(e),Z=i(e,"P",{});var Ti=o(Z);Zd=s(Ti,"To calculate the global batch size of the DP + PP setup we then do: "),_o=i(Ti,"CODE",{});var e2=o(_o);Cd=s(e2,"mbs*chunks*dp_degree"),e2.forEach(t),jd=s(Ti," ("),Eo=i(Ti,"CODE",{});var t2=o(Eo);Fd=s(t2,"8*32*4=1024"),t2.forEach(t),Bd=s(Ti,")."),Ti.forEach(t),Pn=f(e),oa=i(e,"P",{});var l2=o(oa);qd=s(l2,"Let\u2019s go back to the diagram."),l2.forEach(t),wn=f(e),C=i(e,"P",{});var Ii=o(C);Wd=s(Ii,"With "),go=i(Ii,"CODE",{});var a2=o(go);Vd=s(a2,"chunks=1"),a2.forEach(t),Hd=s(Ii," you end up with the naive MP, which is very inefficient. With a very large "),Lo=i(Ii,"CODE",{});var i2=o(Lo);Xd=s(i2,"chunks"),i2.forEach(t),Yd=s(Ii," value you end up with tiny micro-batch sizes which could be not every efficient either. So one has to experiment to find the value that leads to the highest efficient utilization of the gpus."),Ii.forEach(t),yn=f(e),k=i(e,"P",{});var Be=o(k);Jd=s(Be,"While the diagram shows that there is a bubble of \u201Cdead\u201D time that can\u2019t be parallelized because the last "),Do=i(Be,"CODE",{});var o2=o(Do);Kd=s(o2,"forward"),o2.forEach(t),Qd=s(Be," stage has to wait for "),Go=i(Be,"CODE",{});var r2=o(Go);ec=s(r2,"backward"),r2.forEach(t),tc=s(Be," to complete the pipeline, the purpose of finding the best value for "),Uo=i(Be,"CODE",{});var s2=o(Uo);lc=s(s2,"chunks"),s2.forEach(t),ac=s(Be," is to enable a high concurrent GPU utilization across all participating GPUs which translates to minimizing the size of the bubble."),Be.forEach(t),bn=f(e),ra=i(e,"P",{});var n2=o(ra);ic=s(n2,"There are 2 groups of solutions - the traditional Pipeline API and the more modern solutions that make things much easier for the end user."),n2.forEach(t),_n=f(e),sa=i(e,"P",{});var p2=o(sa);oc=s(p2,"Traditional Pipeline API solutions:"),p2.forEach(t),En=f(e),T=i(e,"UL",{});var qe=o(T);ko=i(qe,"LI",{});var h2=o(ko);rc=s(h2,"PyTorch"),h2.forEach(t),sc=f(qe),To=i(qe,"LI",{});var f2=o(To);nc=s(f2,"FairScale"),f2.forEach(t),pc=f(qe),Io=i(qe,"LI",{});var d2=o(Io);hc=s(d2,"DeepSpeed"),d2.forEach(t),fc=f(qe),So=i(qe,"LI",{});var c2=o(So);dc=s(c2,"Megatron-LM"),c2.forEach(t),qe.forEach(t),gn=f(e),na=i(e,"P",{});var u2=o(na);cc=s(u2,"Modern solutions:"),u2.forEach(t),Ln=f(e),Ue=i(e,"UL",{});var vh=o(Ue);Ao=i(vh,"LI",{});var m2=o(Ao);uc=s(m2,"Varuna"),m2.forEach(t),mc=f(vh),Oo=i(vh,"LI",{});var v2=o(Oo);vc=s(v2,"Sagemaker"),v2.forEach(t),vh.forEach(t),Dn=f(e),pa=i(e,"P",{});var P2=o(pa);Pc=s(P2,"Problems with traditional Pipeline API solutions:"),P2.forEach(t),Gn=f(e),I=i(e,"UL",{});var We=o(I);ht=i(We,"LI",{});var Ph=o(ht);wc=s(Ph,"have to modify the model quite heavily, because Pipeline requires one to rewrite the normal flow of modules into a "),Mo=i(Ph,"CODE",{});var w2=o(Mo);yc=s(w2,"nn.Sequential"),w2.forEach(t),bc=s(Ph," sequence of the same, which may require changes to the design of the model."),Ph.forEach(t),_c=f(We),ha=i(We,"LI",{});var r1=o(ha);Ec=s(r1,"currently the Pipeline API is very restricted. If you had a bunch of python variables being passed in the very first stage of the Pipeline, you will have to find a way around it. Currently, the pipeline interface requires either a single Tensor or a tuple of Tensors as the only input and output. These tensors must have a batch size as the very first dimension, since pipeline is going to chunk the mini batch into micro-batches. Possible improvements are being discussed here "),ft=i(r1,"A",{href:!0,rel:!0});var y2=o(ft);gc=s(y2,"https://github.com/pytorch/pytorch/pull/50693"),y2.forEach(t),r1.forEach(t),Lc=f(We),$o=i(We,"LI",{});var b2=o($o);Dc=s(b2,"conditional control flow at the level of pipe stages is not possible - e.g., Encoder-Decoder models like T5 require special workarounds to handle a conditional encoder stage."),b2.forEach(t),Gc=f(We),xo=i(We,"LI",{});var _2=o(xo);Uc=s(_2,"have to arrange each layer so that the output of one model becomes an input to the other model."),_2.forEach(t),We.forEach(t),Un=f(e),fa=i(e,"P",{});var E2=o(fa);kc=s(E2,"We are yet to experiment with Varuna and SageMaker but their papers report that they have overcome the list of problems mentioned above and that they require much smaller changes to the user\u2019s model."),E2.forEach(t),kn=f(e),da=i(e,"P",{});var g2=o(da);Tc=s(g2,"Implementations:"),g2.forEach(t),Tn=f(e),u=i(e,"UL",{});var G=o(u);dt=i(G,"LI",{});var wh=o(dt);ct=i(wh,"A",{href:!0,rel:!0});var L2=o(ct);Ic=s(L2,"Pytorch"),L2.forEach(t),Sc=s(wh," (initial support in pytorch-1.8, and progressively getting improved in 1.9 and more so in 1.10). Some "),ut=i(wh,"A",{href:!0,rel:!0});var D2=o(ut);Ac=s(D2,"examples"),D2.forEach(t),wh.forEach(t),Oc=f(G),zo=i(G,"LI",{});var G2=o(zo);mt=i(G2,"A",{href:!0,rel:!0});var U2=o(mt);Mc=s(U2,"FairScale"),U2.forEach(t),G2.forEach(t),$c=f(G),No=i(G,"LI",{});var k2=o(No);vt=i(k2,"A",{href:!0,rel:!0});var T2=o(vt);xc=s(T2,"DeepSpeed"),T2.forEach(t),k2.forEach(t),zc=f(G),ca=i(G,"LI",{});var s1=o(ca);Pt=i(s1,"A",{href:!0,rel:!0});var I2=o(Pt);Nc=s(I2,"Megatron-LM"),I2.forEach(t),Rc=s(s1," has an internal implementation - no API."),s1.forEach(t),Zc=f(G),Ro=i(G,"LI",{});var S2=o(Ro);wt=i(S2,"A",{href:!0,rel:!0});var A2=o(wt);Cc=s(A2,"Varuna"),A2.forEach(t),S2.forEach(t),jc=f(G),ua=i(G,"LI",{});var n1=o(ua);yt=i(n1,"A",{href:!0,rel:!0});var O2=o(yt);Fc=s(O2,"SageMaker"),O2.forEach(t),Bc=s(n1," - this is a proprietary solution that can only be used on AWS."),n1.forEach(t),qc=f(G),ma=i(G,"LI",{});var p1=o(ma);bt=i(p1,"A",{href:!0,rel:!0});var M2=o(bt);Wc=s(M2,"OSLO"),M2.forEach(t),Vc=s(p1," - this is implemented based on the Hugging Face Transformers."),p1.forEach(t),G.forEach(t),In=f(e),ke=i(e,"P",{});var yh=o(ke);Hc=s(yh,"\u{1F917} Transformers status: as of this writing none of the models supports full-PP. GPT2 and T5 models have naive MP support. The main obstacle is being unable to convert the models to "),Zo=i(yh,"CODE",{});var $2=o(Zo);Xc=s($2,"nn.Sequential"),$2.forEach(t),Yc=s(yh," and have all the inputs to be Tensors. This is because currently the models include many features that make the conversion very complicated, and will need to be removed to accomplish that."),yh.forEach(t),Sn=f(e),va=i(e,"P",{});var x2=o(va);Jc=s(x2,"Other approaches:"),x2.forEach(t),An=f(e),oe=i(e,"P",{});var cs=o(oe);Kc=s(cs,"DeepSpeed, Varuna and SageMaker use the concept of an "),_t=i(cs,"A",{href:!0,rel:!0});var z2=o(_t);Qc=s(z2,"Interleaved Pipeline"),z2.forEach(t),eu=f(cs),Pa=i(cs,"IMG",{src:!0,alt:!0}),cs.forEach(t),On=f(e),wa=i(e,"P",{});var N2=o(wa);tu=s(N2,"Here the bubble (idle time) is further minimized by prioritizing backward passes."),N2.forEach(t),Mn=f(e),ya=i(e,"P",{});var R2=o(ya);lu=s(R2,"Varuna further tries to improve the schedule by using simulations to discover the most efficient scheduling."),R2.forEach(t),$n=f(e),Te=i(e,"P",{});var bh=o(Te);au=s(bh,"OSLO has pipeline parallelism implementation based on the Transformers without "),Co=i(bh,"CODE",{});var Z2=o(Co);iu=s(Z2,"nn.Sequential"),Z2.forEach(t),ou=s(bh," converting."),bh.forEach(t),xn=f(e),re=i(e,"H2",{class:!0});var _h=o(re);Ie=i(_h,"A",{id:!0,class:!0,href:!0});var C2=o(Ie);jo=i(C2,"SPAN",{});var j2=o(jo);P(Et.$$.fragment,j2),j2.forEach(t),C2.forEach(t),ru=f(_h),Fo=i(_h,"SPAN",{});var F2=o(Fo);su=s(F2,"Tensor Parallelism"),F2.forEach(t),_h.forEach(t),zn=f(e),ba=i(e,"P",{});var B2=o(ba);nu=s(B2,"In Tensor Parallelism each GPU processes only a slice of a tensor and only aggregates the full tensor for operations that require the whole thing."),B2.forEach(t),Nn=f(e),j=i(e,"P",{});var Si=o(j);pu=s(Si,"In this section we use concepts and diagrams from the "),gt=i(Si,"A",{href:!0,rel:!0});var q2=o(gt);hu=s(q2,"Megatron-LM"),q2.forEach(t),fu=s(Si," paper: "),Lt=i(Si,"A",{href:!0,rel:!0});var W2=o(Lt);du=s(W2,"Efficient Large-Scale Language Model Training on GPU Clusters"),W2.forEach(t),cu=s(Si,"."),Si.forEach(t),Rn=f(e),F=i(e,"P",{});var Ai=o(F);uu=s(Ai,"The main building block of any transformer is a fully connected "),Bo=i(Ai,"CODE",{});var V2=o(Bo);mu=s(V2,"nn.Linear"),V2.forEach(t),vu=s(Ai," followed by a nonlinear activation "),qo=i(Ai,"CODE",{});var H2=o(qo);Pu=s(H2,"GeLU"),H2.forEach(t),wu=s(Ai,"."),Ai.forEach(t),Zn=f(e),g=i(e,"P",{});var X=o(g);yu=s(X,"Following the Megatron\u2019s paper notation, we can write the dot-product part of it as "),Wo=i(X,"CODE",{});var X2=o(Wo);bu=s(X2,"Y = GeLU(XA)"),X2.forEach(t),_u=s(X,", where "),Vo=i(X,"CODE",{});var Y2=o(Vo);Eu=s(Y2,"X"),Y2.forEach(t),gu=s(X," and "),Ho=i(X,"CODE",{});var J2=o(Ho);Lu=s(J2,"Y"),J2.forEach(t),Du=s(X," are the input and output vectors, and "),Xo=i(X,"CODE",{});var K2=o(Xo);Gu=s(K2,"A"),K2.forEach(t),Uu=s(X," is the weight matrix."),X.forEach(t),Cn=f(e),Dt=i(e,"P",{});var h1=o(Dt);ku=s(h1,`If we look at the computation in matrix form, it\u2019s easy to see how the matrix multiplication can be split between multiple GPUs: `),_a=i(h1,"IMG",{src:!0,alt:!0}),h1.forEach(t),jn=f(e),c=i(e,"P",{});var m=o(c);Tu=s(m,"If we split the weight matrix "),Yo=i(m,"CODE",{});var Q2=o(Yo);Iu=s(Q2,"A"),Q2.forEach(t),Su=s(m," column-wise across "),Jo=i(m,"CODE",{});var ey=o(Jo);Au=s(ey,"N"),ey.forEach(t),Ou=s(m," GPUs and perform matrix multiplications "),Ko=i(m,"CODE",{});var ty=o(Ko);Mu=s(ty,"XA_1"),ty.forEach(t),$u=s(m," through "),Qo=i(m,"CODE",{});var ly=o(Qo);xu=s(ly,"XA_n"),ly.forEach(t),zu=s(m," in parallel, then we will end up with "),er=i(m,"CODE",{});var ay=o(er);Nu=s(ay,"N"),ay.forEach(t),Ru=s(m," output vectors "),tr=i(m,"CODE",{});var iy=o(tr);Zu=s(iy,"Y_1, Y_2, ..., Y_n"),iy.forEach(t),Cu=s(m," which can be fed into "),lr=i(m,"CODE",{});var oy=o(lr);ju=s(oy,"GeLU"),oy.forEach(t),Fu=s(m,` independently: `),Ea=i(m,"IMG",{src:!0,alt:!0}),m.forEach(t),Fn=f(e),Gt=i(e,"P",{});var f1=o(Gt);Bu=s(f1,`Using this principle, we can update an MLP of arbitrary depth, without the need for any synchronization between GPUs until the very end, where we need to reconstruct the output vector from shards. The Megatron-LM paper authors provide a helpful illustration for that: `),ga=i(f1,"IMG",{src:!0,alt:!0}),f1.forEach(t),Bn=f(e),Ut=i(e,"P",{});var d1=o(Ut);qu=s(d1,`Parallelizing the multi-headed attention layers is even simpler, since they are already inherently parallel, due to having multiple independent heads! `),La=i(d1,"IMG",{src:!0,alt:!0}),d1.forEach(t),qn=f(e),Da=i(e,"P",{});var ry=o(Da);Wu=s(ry,"Special considerations: TP requires very fast network, and therefore it\u2019s not advisable to do TP across more than one node. Practically, if a node has 4 GPUs, the highest TP degree is therefore 4. If you need a TP degree of 8, you need to use nodes that have at least 8 GPUs."),ry.forEach(t),Wn=f(e),B=i(e,"P",{});var Oi=o(B);Vu=s(Oi,"This section is based on the original much more "),kt=i(Oi,"A",{href:!0,rel:!0});var sy=o(kt);Hu=s(sy,"detailed TP overview"),sy.forEach(t),Xu=s(Oi,`. by `),Tt=i(Oi,"A",{href:!0,rel:!0});var ny=o(Tt);Yu=s(ny,"@anton-l"),ny.forEach(t),Ju=s(Oi,"."),Oi.forEach(t),Vn=f(e),Ga=i(e,"P",{});var py=o(Ga);Ku=s(py,"SageMaker combines TP with DP for a more efficient processing."),py.forEach(t),Hn=f(e),Ua=i(e,"P",{});var hy=o(Ua);Qu=s(hy,"Alternative names:"),hy.forEach(t),Xn=f(e),ka=i(e,"UL",{});var fy=o(ka);Ta=i(fy,"LI",{});var c1=o(Ta);em=s(c1,"DeepSpeed calls it "),It=i(c1,"A",{href:!0,rel:!0});var dy=o(It);tm=s(dy,"tensor slicing"),dy.forEach(t),c1.forEach(t),fy.forEach(t),Yn=f(e),Ia=i(e,"P",{});var cy=o(Ia);lm=s(cy,"Implementations:"),cy.forEach(t),Jn=f(e),S=i(e,"UL",{});var Ve=o(S);Sa=i(Ve,"LI",{});var u1=o(Sa);St=i(u1,"A",{href:!0,rel:!0});var uy=o(St);am=s(uy,"Megatron-LM"),uy.forEach(t),im=s(u1," has an internal implementation, as it\u2019s very model-specific"),u1.forEach(t),om=f(Ve),Aa=i(Ve,"LI",{});var m1=o(Aa);At=i(m1,"A",{href:!0,rel:!0});var my=o(At);rm=s(my,"parallelformers"),my.forEach(t),sm=s(m1," (only inference at the moment)"),m1.forEach(t),nm=f(Ve),Oa=i(Ve,"LI",{});var v1=o(Oa);Ot=i(v1,"A",{href:!0,rel:!0});var vy=o(Ot);pm=s(vy,"SageMaker"),vy.forEach(t),hm=s(v1," - this is a proprietary solution that can only be used on AWS."),v1.forEach(t),fm=f(Ve),Ma=i(Ve,"LI",{});var P1=o(Ma);Mt=i(P1,"A",{href:!0,rel:!0});var Py=o(Mt);dm=s(Py,"OSLO"),Py.forEach(t),cm=s(P1," has the tensor parallelism implementation based on the Transformers."),P1.forEach(t),Ve.forEach(t),Kn=f(e),$a=i(e,"P",{});var wy=o($a);um=s(wy,"\u{1F917} Transformers status:"),wy.forEach(t),Qn=f(e),q=i(e,"UL",{});var Mi=o(q);ar=i(Mi,"LI",{});var yy=o(ar);mm=s(yy,"core: not yet implemented in the core"),yy.forEach(t),vm=f(Mi),$t=i(Mi,"LI",{});var Eh=o($t);Pm=s(Eh,"but if you want inference "),xt=i(Eh,"A",{href:!0,rel:!0});var by=o(xt);wm=s(by,"parallelformers"),by.forEach(t),ym=s(Eh," provides this support for most of our models. So until this is implemented in the core you can use theirs. And hopefully training mode will be supported too."),Eh.forEach(t),bm=f(Mi),xa=i(Mi,"LI",{});var w1=o(xa);_m=s(w1,"Deepspeed-Inference also supports our BERT, GPT-2, and GPT-Neo models in their super-fast CUDA-kernel-based inference mode, see more "),zt=i(w1,"A",{href:!0,rel:!0});var _y=o(zt);Em=s(_y,"here"),_y.forEach(t),w1.forEach(t),Mi.forEach(t),ep=f(e),se=i(e,"H2",{class:!0});var gh=o(se);Se=i(gh,"A",{id:!0,class:!0,href:!0});var Ey=o(Se);ir=i(Ey,"SPAN",{});var gy=o(ir);P(Nt.$$.fragment,gy),gy.forEach(t),Ey.forEach(t),gm=f(gh),or=i(gh,"SPAN",{});var Ly=o(or);Lm=s(Ly,"DP+PP"),Ly.forEach(t),gh.forEach(t),tp=f(e),Ae=i(e,"P",{});var Lh=o(Ae);Dm=s(Lh,"The following diagram from the DeepSpeed "),Rt=i(Lh,"A",{href:!0,rel:!0});var Dy=o(Rt);Gm=s(Dy,"pipeline tutorial"),Dy.forEach(t),Um=s(Lh," demonstrates how one combines DP with PP."),Lh.forEach(t),lp=f(e),za=i(e,"P",{});var Gy=o(za);Na=i(Gy,"IMG",{src:!0,alt:!0}),Gy.forEach(t),ap=f(e),Ra=i(e,"P",{});var Uy=o(Ra);km=s(Uy,"Here it\u2019s important to see how DP rank 0 doesn\u2019t see GPU2 and DP rank 1 doesn\u2019t see GPU3. To DP there is just GPUs 0 and 1 where it feeds data as if there were just 2 GPUs. GPU0 \u201Csecretly\u201D offloads some of its load to GPU2 using PP. And GPU1 does the same by enlisting GPU3 to its aid."),Uy.forEach(t),ip=f(e),Za=i(e,"P",{});var ky=o(Za);Tm=s(ky,"Since each dimension requires at least 2 GPUs, here you\u2019d need at least 4 GPUs."),ky.forEach(t),op=f(e),Ca=i(e,"P",{});var Ty=o(Ca);Im=s(Ty,"Implementations:"),Ty.forEach(t),rp=f(e),L=i(e,"UL",{});var Y=o(L);rr=i(Y,"LI",{});var Iy=o(rr);Zt=i(Iy,"A",{href:!0,rel:!0});var Sy=o(Zt);Sm=s(Sy,"DeepSpeed"),Sy.forEach(t),Iy.forEach(t),Am=f(Y),sr=i(Y,"LI",{});var Ay=o(sr);Ct=i(Ay,"A",{href:!0,rel:!0});var Oy=o(Ct);Om=s(Oy,"Megatron-LM"),Oy.forEach(t),Ay.forEach(t),Mm=f(Y),nr=i(Y,"LI",{});var My=o(nr);jt=i(My,"A",{href:!0,rel:!0});var $y=o(jt);$m=s($y,"Varuna"),$y.forEach(t),My.forEach(t),xm=f(Y),pr=i(Y,"LI",{});var xy=o(pr);Ft=i(xy,"A",{href:!0,rel:!0});var zy=o(Ft);zm=s(zy,"SageMaker"),zy.forEach(t),xy.forEach(t),Nm=f(Y),hr=i(Y,"LI",{});var Ny=o(hr);Bt=i(Ny,"A",{href:!0,rel:!0});var Ry=o(Bt);Rm=s(Ry,"OSLO"),Ry.forEach(t),Ny.forEach(t),Y.forEach(t),sp=f(e),ja=i(e,"P",{});var Zy=o(ja);Zm=s(Zy,"\u{1F917} Transformers status: not yet implemented"),Zy.forEach(t),np=f(e),ne=i(e,"H2",{class:!0});var Dh=o(ne);Oe=i(Dh,"A",{id:!0,class:!0,href:!0});var Cy=o(Oe);fr=i(Cy,"SPAN",{});var jy=o(fr);P(qt.$$.fragment,jy),jy.forEach(t),Cy.forEach(t),Cm=f(Dh),dr=i(Dh,"SPAN",{});var Fy=o(dr);jm=s(Fy,"DP+PP+TP"),Fy.forEach(t),Dh.forEach(t),pp=f(e),Fa=i(e,"P",{});var By=o(Fa);Fm=s(By,"To get an even more efficient training a 3D parallelism is used where PP is combined with TP and DP. This can be seen in the following diagram."),By.forEach(t),hp=f(e),Ba=i(e,"P",{});var qy=o(Ba);qa=i(qy,"IMG",{src:!0,alt:!0}),qy.forEach(t),fp=f(e),Me=i(e,"P",{});var Gh=o(Me);Bm=s(Gh,"This diagram is from a blog post "),Wt=i(Gh,"A",{href:!0,rel:!0});var Wy=o(Wt);qm=s(Wy,"3D parallelism: Scaling to trillion-parameter models"),Wy.forEach(t),Wm=s(Gh,", which is a good read as well."),Gh.forEach(t),dp=f(e),Wa=i(e,"P",{});var Vy=o(Wa);Vm=s(Vy,"Since each dimension requires at least 2 GPUs, here you\u2019d need at least 8 GPUs."),Vy.forEach(t),cp=f(e),Va=i(e,"P",{});var Hy=o(Va);Hm=s(Hy,"Implementations:"),Hy.forEach(t),up=f(e),D=i(e,"UL",{});var J=o(D);Ha=i(J,"LI",{});var y1=o(Ha);Vt=i(y1,"A",{href:!0,rel:!0});var Xy=o(Vt);Xm=s(Xy,"DeepSpeed"),Xy.forEach(t),Ym=s(y1," - DeepSpeed also includes an even more efficient DP, which they call ZeRO-DP."),y1.forEach(t),Jm=f(J),cr=i(J,"LI",{});var Yy=o(cr);Ht=i(Yy,"A",{href:!0,rel:!0});var Jy=o(Ht);Km=s(Jy,"Megatron-LM"),Jy.forEach(t),Yy.forEach(t),Qm=f(J),ur=i(J,"LI",{});var Ky=o(ur);Xt=i(Ky,"A",{href:!0,rel:!0});var Qy=o(Xt);ev=s(Qy,"Varuna"),Qy.forEach(t),Ky.forEach(t),tv=f(J),mr=i(J,"LI",{});var eb=o(mr);Yt=i(eb,"A",{href:!0,rel:!0});var tb=o(Yt);lv=s(tb,"SageMaker"),tb.forEach(t),eb.forEach(t),av=f(J),vr=i(J,"LI",{});var lb=o(vr);Jt=i(lb,"A",{href:!0,rel:!0});var ab=o(Jt);iv=s(ab,"OSLO"),ab.forEach(t),lb.forEach(t),J.forEach(t),mp=f(e),Xa=i(e,"P",{});var ib=o(Xa);ov=s(ib,"\u{1F917} Transformers status: not yet implemented, since we have no PP and TP."),ib.forEach(t),vp=f(e),pe=i(e,"H2",{class:!0});var Uh=o(pe);$e=i(Uh,"A",{id:!0,class:!0,href:!0});var ob=o($e);Pr=i(ob,"SPAN",{});var rb=o(Pr);P(Kt.$$.fragment,rb),rb.forEach(t),ob.forEach(t),rv=f(Uh),wr=i(Uh,"SPAN",{});var sb=o(wr);sv=s(sb,"ZeRO DP+PP+TP"),sb.forEach(t),Uh.forEach(t),Pp=f(e),xe=i(e,"P",{});var kh=o(xe);nv=s(kh,"One of the main features of DeepSpeed is ZeRO, which is a super-scalable extension of DP. It has already been discussed in "),Ya=i(kh,"A",{href:!0});var nb=o(Ya);pv=s(nb,"ZeRO Data Parallelism"),nb.forEach(t),hv=s(kh,". Normally it\u2019s a standalone feature that doesn\u2019t require PP or TP. But it can be combined with PP and TP."),kh.forEach(t),wp=f(e),Ja=i(e,"P",{});var pb=o(Ja);fv=s(pb,"When ZeRO-DP is combined with PP (and optionally TP) it typically enables only ZeRO stage 1 (optimizer sharding)."),pb.forEach(t),yp=f(e),Ka=i(e,"P",{});var hb=o(Ka);dv=s(hb,"While it\u2019s theoretically possible to use ZeRO stage 2 (gradient sharding) with Pipeline Parallelism, it will have bad performance impacts. There would need to be an additional reduce-scatter collective for every micro-batch to aggregate the gradients before sharding, which adds a potentially significant communication overhead. By nature of Pipeline Parallelism, small micro-batches are used and instead the focus is on trying to balance arithmetic intensity (micro-batch size) with minimizing the Pipeline bubble (number of micro-batches). Therefore those communication costs are going to hurt."),hb.forEach(t),bp=f(e),ze=i(e,"P",{});var Th=o(ze);cv=s(Th,"In addition, There are already fewer layers than normal due to PP and so the memory savings won\u2019t be huge. PP already reduces gradient size by "),yr=i(Th,"CODE",{});var fb=o(yr);uv=s(fb,"1/PP"),fb.forEach(t),mv=s(Th,", and so gradient sharding savings on top of that are less significant than pure DP."),Th.forEach(t),_p=f(e),Qa=i(e,"P",{});var db=o(Qa);vv=s(db,"ZeRO stage 3 is not a good choice either for the same reason - more inter-node communications required."),db.forEach(t),Ep=f(e),ei=i(e,"P",{});var cb=o(ei);Pv=s(cb,"And since we have ZeRO, the other benefit is ZeRO-Offload. Since this is stage 1 optimizer states can be offloaded to CPU."),cb.forEach(t),gp=f(e),ti=i(e,"P",{});var ub=o(ti);wv=s(ub,"Implementations:"),ub.forEach(t),Lp=f(e),Ne=i(e,"UL",{});var Ih=o(Ne);Re=i(Ih,"LI",{});var us=o(Re);Qt=i(us,"A",{href:!0,rel:!0});var mb=o(Qt);yv=s(mb,"Megatron-DeepSpeed"),mb.forEach(t),bv=s(us," and "),el=i(us,"A",{href:!0,rel:!0});var vb=o(el);_v=s(vb,"Megatron-Deepspeed from BigScience"),vb.forEach(t),Ev=s(us,", which is the fork of the former repo."),us.forEach(t),gv=f(Ih),br=i(Ih,"LI",{});var Pb=o(br);tl=i(Pb,"A",{href:!0,rel:!0});var wb=o(tl);Lv=s(wb,"OSLO"),wb.forEach(t),Pb.forEach(t),Ih.forEach(t),Dp=f(e),li=i(e,"P",{});var yb=o(li);Dv=s(yb,"Important papers:"),yb.forEach(t),Gp=f(e),ai=i(e,"UL",{});var bb=o(ai);_r=i(bb,"LI",{});var _b=o(_r);ll=i(_b,"A",{href:!0,rel:!0});var Eb=o(ll);Gv=s(Eb,"Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model"),Eb.forEach(t),_b.forEach(t),bb.forEach(t),Up=f(e),ii=i(e,"P",{});var gb=o(ii);Uv=s(gb,"\u{1F917} Transformers status: not yet implemented, since we have no PP and TP."),gb.forEach(t),kp=f(e),he=i(e,"H2",{class:!0});var Sh=o(he);Ze=i(Sh,"A",{id:!0,class:!0,href:!0});var Lb=o(Ze);Er=i(Lb,"SPAN",{});var Db=o(Er);P(al.$$.fragment,Db),Db.forEach(t),Lb.forEach(t),kv=f(Sh),gr=i(Sh,"SPAN",{});var Gb=o(gr);Tv=s(Gb,"FlexFlow"),Gb.forEach(t),Sh.forEach(t),Tp=f(e),il=i(e,"P",{});var b1=o(il);ol=i(b1,"A",{href:!0,rel:!0});var Ub=o(ol);Iv=s(Ub,"FlexFlow"),Ub.forEach(t),Sv=s(b1," also solves the parallelization problem in a slightly different approach."),b1.forEach(t),Ip=f(e),rl=i(e,"P",{});var _1=o(rl);Av=s(_1,"Paper: "),sl=i(_1,"A",{href:!0,rel:!0});var kb=o(sl);Ov=s(kb,"\u201CBeyond Data and Model Parallelism for Deep Neural Networks\u201D by Zhihao Jia, Matei Zaharia, Alex Aiken"),kb.forEach(t),_1.forEach(t),Sp=f(e),oi=i(e,"P",{});var Tb=o(oi);Mv=s(Tb,"It performs a sort of 4D Parallelism over Sample-Operator-Attribute-Parameter."),Tb.forEach(t),Ap=f(e),A=i(e,"OL",{});var He=o(A);Lr=i(He,"LI",{});var Ib=o(Lr);$v=s(Ib,"Sample = Data Parallelism (sample-wise parallel)"),Ib.forEach(t),xv=f(He),Dr=i(He,"LI",{});var Sb=o(Dr);zv=s(Sb,"Operator = Parallelize a single operation into several sub-operations"),Sb.forEach(t),Nv=f(He),Gr=i(He,"LI",{});var Ab=o(Gr);Rv=s(Ab,"Attribute = Data Parallelism (length-wise parallel)"),Ab.forEach(t),Zv=f(He),Ur=i(He,"LI",{});var Ob=o(Ur);Cv=s(Ob,"Parameter = Model Parallelism (regardless of dimension - horizontal or vertical)"),Ob.forEach(t),He.forEach(t),Op=f(e),ri=i(e,"P",{});var Mb=o(ri);jv=s(Mb,"Examples:"),Mb.forEach(t),Mp=f(e),si=i(e,"UL",{});var $b=o(si);kr=i($b,"LI",{});var xb=o(kr);Fv=s(xb,"Sample"),xb.forEach(t),$b.forEach(t),$p=f(e),ni=i(e,"P",{});var zb=o(ni);Bv=s(zb,"Let\u2019s take 10 batches of sequence length 512. If we parallelize them by sample dimension into 2 devices, we get 10 x 512 which becomes be 5 x 2 x 512."),zb.forEach(t),xp=f(e),pi=i(e,"UL",{});var Nb=o(pi);Tr=i(Nb,"LI",{});var Rb=o(Tr);qv=s(Rb,"Operator"),Rb.forEach(t),Nb.forEach(t),zp=f(e),hi=i(e,"P",{});var Zb=o(hi);Wv=s(Zb,"If we perform layer normalization, we compute std first and mean second, and then we can normalize data. Operator parallelism allows computing std and mean in parallel. So if we parallelize them by operator dimension into 2 devices (cuda:0, cuda:1), first we copy input data into both devices, and cuda:0 computes std, cuda:1 computes mean at the same time."),Zb.forEach(t),Np=f(e),fi=i(e,"UL",{});var Cb=o(fi);Ir=i(Cb,"LI",{});var jb=o(Ir);Vv=s(jb,"Attribute"),jb.forEach(t),Cb.forEach(t),Rp=f(e),di=i(e,"P",{});var Fb=o(di);Hv=s(Fb,"We have 10 batches of 512 length. If we parallelize them by attribute dimension into 2 devices, 10 x 512 will be 10 x 2 x 256."),Fb.forEach(t),Zp=f(e),ci=i(e,"UL",{});var Bb=o(ci);Sr=i(Bb,"LI",{});var qb=o(Sr);Xv=s(qb,"Parameter"),qb.forEach(t),Bb.forEach(t),Cp=f(e),ui=i(e,"P",{});var Wb=o(ui);Yv=s(Wb,"It is similar with tensor model parallelism or naive layer-wise model parallelism."),Wb.forEach(t),jp=f(e),mi=i(e,"P",{});var Vb=o(mi);vi=i(Vb,"IMG",{src:!0,alt:!0}),Vb.forEach(t),Fp=f(e),Pi=i(e,"P",{});var Hb=o(Pi);Jv=s(Hb,"The significance of this framework is that it takes resources like (1) GPU/TPU/CPU vs. (2) RAM/DRAM vs. (3) fast-intra-connect/slow-inter-connect and it automatically optimizes all these algorithmically deciding which parallelisation to use where."),Hb.forEach(t),Bp=f(e),wi=i(e,"P",{});var Xb=o(wi);Kv=s(Xb,"One very important aspect is that FlexFlow is designed for optimizing DNN parallelizations for models with static and fixed workloads, since models with dynamic behavior may prefer different parallelization strategies across iterations."),Xb.forEach(t),qp=f(e),yi=i(e,"P",{});var Yb=o(yi);Qv=s(Yb,"So the promise is very attractive - it runs a 30min simulation on the cluster of choice and it comes up with the best strategy to utilise this specific environment. If you add/remove/replace any parts it\u2019ll run and re-optimize the plan for that. And then you can train. A different setup will have its own custom optimization."),Yb.forEach(t),Wp=f(e),Ce=i(e,"P",{});var Ah=o(Ce);eP=s(Ah,"\u{1F917} Transformers status: not yet integrated. We already have our models FX-trace-able via "),nl=i(Ah,"A",{href:!0,rel:!0});var Jb=o(nl);tP=s(Jb,"transformers.utils.fx"),Jb.forEach(t),lP=s(Ah,", which is a prerequisite for FlexFlow, so someone needs to figure out what needs to be done to make FlexFlow work with our models."),Ah.forEach(t),Vp=f(e),fe=i(e,"H2",{class:!0});var Oh=o(fe);je=i(Oh,"A",{id:!0,class:!0,href:!0});var Kb=o(je);Ar=i(Kb,"SPAN",{});var Qb=o(Ar);P(pl.$$.fragment,Qb),Qb.forEach(t),Kb.forEach(t),aP=f(Oh),Or=i(Oh,"SPAN",{});var e3=o(Or);iP=s(e3,"Which Strategy To Use When"),e3.forEach(t),Oh.forEach(t),Hp=f(e),bi=i(e,"P",{});var t3=o(bi);oP=s(t3,"Here is a very rough outline at which parallelism strategy to use when. The first on each list is typically faster."),t3.forEach(t),Xp=f(e),_i=i(e,"P",{});var l3=o(_i);Mr=i(l3,"STRONG",{});var a3=o(Mr);rP=s(a3,"\u21E8 Single GPU"),a3.forEach(t),l3.forEach(t),Yp=f(e),W=i(e,"UL",{});var $i=o(W);hl=i($i,"LI",{});var Mh=o(hl);$r=i(Mh,"P",{});var i3=o($r);sP=s(i3,"Model fits onto a single GPU:"),i3.forEach(t),nP=f(Mh),xr=i(Mh,"OL",{});var o3=o(xr);zr=i(o3,"LI",{});var r3=o(zr);pP=s(r3,"Normal use"),r3.forEach(t),o3.forEach(t),Mh.forEach(t),hP=f($i),fl=i($i,"LI",{});var $h=o(fl);Nr=i($h,"P",{});var s3=o(Nr);fP=s(s3,"Model doesn\u2019t fit onto a single GPU:"),s3.forEach(t),dP=f($h),dl=i($h,"OL",{});var xh=o(dl);Rr=i(xh,"LI",{});var n3=o(Rr);cP=s(n3,"ZeRO + Offload CPU and optionally NVMe"),n3.forEach(t),uP=f(xh),Zr=i(xh,"LI",{});var p3=o(Zr);mP=s(p3,"as above plus Memory Centric Tiling (see below for details) if the largest layer can\u2019t fit into a single GPU"),p3.forEach(t),xh.forEach(t),$h.forEach(t),vP=f($i),Cr=i($i,"LI",{});var h3=o(Cr);jr=i(h3,"P",{});var f3=o(jr);PP=s(f3,"Largest Layer not fitting into a single GPU:"),f3.forEach(t),h3.forEach(t),$i.forEach(t),Jp=f(e),Ei=i(e,"OL",{});var d3=o(Ei);de=i(d3,"LI",{});var xi=o(de);wP=s(xi,"ZeRO - Enable "),cl=i(xi,"A",{href:!0,rel:!0});var c3=o(cl);yP=s(c3,"Memory Centric Tiling"),c3.forEach(t),bP=s(xi," (MCT). It allows you to run arbitrarily large layers by automatically splitting them and executing them sequentially. MCT reduces the number of parameters that are live on a GPU, but it does not affect the activation memory. As this need is very rare as of this writing a manual override of "),Fr=i(xi,"CODE",{});var u3=o(Fr);_P=s(u3,"torch.nn.Linear"),u3.forEach(t),EP=s(xi," needs to be done by the user."),xi.forEach(t),d3.forEach(t),Kp=f(e),gi=i(e,"P",{});var m3=o(gi);Br=i(m3,"STRONG",{});var v3=o(Br);gP=s(v3,"\u21E8 Single Node / Multi-GPU"),v3.forEach(t),m3.forEach(t),Qp=f(e),V=i(e,"UL",{});var zi=o(V);ul=i(zi,"LI",{});var zh=o(ul);qr=i(zh,"P",{});var P3=o(qr);LP=s(P3,"Model fits onto a single GPU:"),P3.forEach(t),DP=f(zh),ml=i(zh,"OL",{});var Nh=o(ml);Wr=i(Nh,"LI",{});var w3=o(Wr);GP=s(w3,"DDP - Distributed DP"),w3.forEach(t),UP=f(Nh),Vr=i(Nh,"LI",{});var y3=o(Vr);kP=s(y3,"ZeRO - may or may not be faster depending on the situation and configuration used"),y3.forEach(t),Nh.forEach(t),zh.forEach(t),TP=f(zi),vl=i(zi,"LI",{});var Rh=o(vl);Hr=i(Rh,"P",{});var b3=o(Hr);IP=s(b3,"Model doesn\u2019t fit onto a single GPU:"),b3.forEach(t),SP=f(Rh),ce=i(Rh,"OL",{});var Ni=o(ce);Xr=i(Ni,"LI",{});var _3=o(Xr);Yr=i(_3,"P",{});var E3=o(Yr);AP=s(E3,"PP"),E3.forEach(t),_3.forEach(t),OP=f(Ni),Jr=i(Ni,"LI",{});var g3=o(Jr);Kr=i(g3,"P",{});var L3=o(Kr);MP=s(L3,"ZeRO"),L3.forEach(t),g3.forEach(t),$P=f(Ni),ue=i(Ni,"LI",{});var Ri=o(ue);Qr=i(Ri,"P",{});var D3=o(Qr);xP=s(D3,"TP"),D3.forEach(t),zP=f(Ri),es=i(Ri,"P",{});var G3=o(es);NP=s(G3,"With very fast intra-node connectivity of NVLINK or NVSwitch all three should be mostly on par, without these PP will be faster than TP or ZeRO. The degree of TP may also make a difference. Best to experiment to find the winner on your particular setup."),G3.forEach(t),RP=f(Ri),ts=i(Ri,"P",{});var U3=o(ts);ZP=s(U3,"TP is almost always used within a single node. That is TP size <= gpus per node."),U3.forEach(t),Ri.forEach(t),Ni.forEach(t),Rh.forEach(t),CP=f(zi),Pl=i(zi,"LI",{});var Zh=o(Pl);ls=i(Zh,"P",{});var k3=o(ls);jP=s(k3,"Largest Layer not fitting into a single GPU:"),k3.forEach(t),FP=f(Zh),wl=i(Zh,"OL",{});var Ch=o(wl);as=i(Ch,"LI",{});var T3=o(as);BP=s(T3,"If not using ZeRO - must use TP, as PP alone won\u2019t be able to fit."),T3.forEach(t),qP=f(Ch),is=i(Ch,"LI",{});var I3=o(is);WP=s(I3,"With ZeRO see the same entry for \u201CSingle GPU\u201D above"),I3.forEach(t),Ch.forEach(t),Zh.forEach(t),zi.forEach(t),eh=f(e),Li=i(e,"P",{});var S3=o(Li);os=i(S3,"STRONG",{});var A3=o(os);VP=s(A3,"\u21E8 Multi-Node / Multi-GPU"),A3.forEach(t),S3.forEach(t),th=f(e),Fe=i(e,"UL",{});var jh=o(Fe);yl=i(jh,"LI",{});var Fh=o(yl);rs=i(Fh,"P",{});var O3=o(rs);HP=s(O3,"When you have fast inter-node connectivity:"),O3.forEach(t),XP=f(Fh),bl=i(Fh,"OL",{});var Bh=o(bl);ss=i(Bh,"LI",{});var M3=o(ss);YP=s(M3,"ZeRO - as it requires close to no modifications to the model"),M3.forEach(t),JP=f(Bh),ns=i(Bh,"LI",{});var $3=o(ns);KP=s($3,"PP+TP+DP - less communications, but requires massive changes to the model"),$3.forEach(t),Bh.forEach(t),Fh.forEach(t),QP=f(jh),_l=i(jh,"LI",{});var qh=o(_l);ps=i(qh,"P",{});var x3=o(ps);e1=s(x3,"when you have slow inter-node connectivity and still low on GPU memory:"),x3.forEach(t),t1=f(qh),hs=i(qh,"OL",{});var z3=o(hs);fs=i(z3,"LI",{});var N3=o(fs);l1=s(N3,"DP+PP+TP+ZeRO-1"),N3.forEach(t),z3.forEach(t),qh.forEach(t),jh.forEach(t),this.h()},h(){d($,"name","hf:doc:metadata"),d($,"content",JSON.stringify(q3)),d(M,"id","model-parallelism"),d(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(M,"href","#model-parallelism"),d(U,"class","relative group"),d(me,"id","parallelism-overview"),d(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(me,"href","#parallelism-overview"),d(Q,"class","relative group"),d(Pe,"id","concepts"),d(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Pe,"href","#concepts"),d(ee,"class","relative group"),d(we,"id","data-parallelism"),d(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(we,"href","#data-parallelism"),d(te,"class","relative group"),d(ye,"id","zero-data-parallelism"),d(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ye,"href","#zero-data-parallelism"),d(le,"class","relative group"),d(et,"href","https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/"),d(et,"rel","nofollow"),K(Ul.src,E1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png")||d(Ul,"src",E1),d(Ul,"alt","DeepSpeed-Image-1"),d(it,"href","https://www.deepspeed.ai/features/#the-zero-redundancy-optimizer"),d(it,"rel","nofollow"),d(ot,"href","https://github.com/facebookresearch/fairscale/#optimizer-state-sharding-zero"),d(ot,"rel","nofollow"),d(rt,"href","main_classes/trainer#trainer-integrations"),d(_e,"id","naive-model-parallelism-vertical-and-pipeline-parallelism"),d(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(_e,"href","#naive-model-parallelism-vertical-and-pipeline-parallelism"),d(ie,"class","relative group"),d(pt,"href","https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html"),d(pt,"rel","nofollow"),K(la.src,g1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-gpipe-bubble.png")||d(la,"src",g1),d(la,"alt","mp-pp"),d(ft,"href","https://github.com/pytorch/pytorch/pull/50693"),d(ft,"rel","nofollow"),d(ct,"href","https://pytorch.org/docs/stable/pipeline.html"),d(ct,"rel","nofollow"),d(ut,"href","https://github.com/pytorch/pytorch/blob/master/benchmarks/distributed/pipeline/pipe.py"),d(ut,"rel","nofollow"),d(mt,"href","https://fairscale.readthedocs.io/en/latest/tutorials/pipe.html"),d(mt,"rel","nofollow"),d(vt,"href","https://www.deepspeed.ai/tutorials/pipeline/"),d(vt,"rel","nofollow"),d(Pt,"href","https://github.com/NVIDIA/Megatron-LM"),d(Pt,"rel","nofollow"),d(wt,"href","https://github.com/microsoft/varuna"),d(wt,"rel","nofollow"),d(yt,"href","https://arxiv.org/abs/2111.05972"),d(yt,"rel","nofollow"),d(bt,"href","https://github.com/tunib-ai/oslo"),d(bt,"rel","nofollow"),d(_t,"href","https://docs.aws.amazon.com/sagemaker/latest/dg/model-parallel-core-features.html"),d(_t,"rel","nofollow"),K(Pa.src,L1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-sagemaker-interleaved-pipeline.png")||d(Pa,"src",L1),d(Pa,"alt","interleaved-pipeline-execution"),d(Ie,"id","tensor-parallelism"),d(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ie,"href","#tensor-parallelism"),d(re,"class","relative group"),d(gt,"href","https://github.com/NVIDIA/Megatron-LM"),d(gt,"rel","nofollow"),d(Lt,"href","https://arxiv.org/abs/2104.04473"),d(Lt,"rel","nofollow"),K(_a.src,D1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_gemm.png")||d(_a,"src",D1),d(_a,"alt","Parallel GEMM"),K(Ea.src,G1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-independent-gelu.png")||d(Ea,"src",G1),d(Ea,"alt","independent GeLU"),K(ga.src,U1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_shard_processing.png")||d(ga,"src",U1),d(ga,"alt","parallel shard processing"),K(La.src,k1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_self_attention.png")||d(La,"src",k1),d(La,"alt","parallel self-attention"),d(kt,"href","https://github.com/huggingface/transformers/issues/10321#issuecomment-783543530"),d(kt,"rel","nofollow"),d(Tt,"href","https://github.com/anton-l"),d(Tt,"rel","nofollow"),d(It,"href","https://www.deepspeed.ai/features/#model-parallelism"),d(It,"rel","nofollow"),d(St,"href","https://github.com/NVIDIA/Megatron-LM"),d(St,"rel","nofollow"),d(At,"href","https://github.com/tunib-ai/parallelformers"),d(At,"rel","nofollow"),d(Ot,"href","https://arxiv.org/abs/2111.05972"),d(Ot,"rel","nofollow"),d(Mt,"href","https://github.com/tunib-ai/oslo"),d(Mt,"rel","nofollow"),d(xt,"href","https://github.com/tunib-ai/parallelformers"),d(xt,"rel","nofollow"),d(zt,"href","https://www.deepspeed.ai/tutorials/inference-tutorial/"),d(zt,"rel","nofollow"),d(Se,"id","dppp"),d(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Se,"href","#dppp"),d(se,"class","relative group"),d(Rt,"href","https://www.deepspeed.ai/tutorials/pipeline/"),d(Rt,"rel","nofollow"),K(Na.src,T1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero-dp-pp.png")||d(Na,"src",T1),d(Na,"alt","dp-pp-2d"),d(Zt,"href","https://github.com/microsoft/DeepSpeed"),d(Zt,"rel","nofollow"),d(Ct,"href","https://github.com/NVIDIA/Megatron-LM"),d(Ct,"rel","nofollow"),d(jt,"href","https://github.com/microsoft/varuna"),d(jt,"rel","nofollow"),d(Ft,"href","https://arxiv.org/abs/2111.05972"),d(Ft,"rel","nofollow"),d(Bt,"href","https://github.com/tunib-ai/oslo"),d(Bt,"rel","nofollow"),d(Oe,"id","dppptp"),d(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Oe,"href","#dppptp"),d(ne,"class","relative group"),K(qa.src,I1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-deepspeed-3d.png")||d(qa,"src",I1),d(qa,"alt","dp-pp-tp-3d"),d(Wt,"href","https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/"),d(Wt,"rel","nofollow"),d(Vt,"href","https://github.com/microsoft/DeepSpeed"),d(Vt,"rel","nofollow"),d(Ht,"href","https://github.com/NVIDIA/Megatron-LM"),d(Ht,"rel","nofollow"),d(Xt,"href","https://github.com/microsoft/varuna"),d(Xt,"rel","nofollow"),d(Yt,"href","https://arxiv.org/abs/2111.05972"),d(Yt,"rel","nofollow"),d(Jt,"href","https://github.com/tunib-ai/oslo"),d(Jt,"rel","nofollow"),d($e,"id","zero-dppptp"),d($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d($e,"href","#zero-dppptp"),d(pe,"class","relative group"),d(Ya,"href","#zero-data-parallelism"),d(Qt,"href","https://github.com/microsoft/Megatron-DeepSpeed"),d(Qt,"rel","nofollow"),d(el,"href","https://github.com/bigscience-workshop/Megatron-DeepSpeed"),d(el,"rel","nofollow"),d(tl,"href","https://github.com/tunib-ai/oslo"),d(tl,"rel","nofollow"),d(ll,"href","https://arxiv.org/abs/2201.11990"),d(ll,"rel","nofollow"),d(Ze,"id","flexflow"),d(Ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ze,"href","#flexflow"),d(he,"class","relative group"),d(ol,"href","https://github.com/flexflow/FlexFlow"),d(ol,"rel","nofollow"),d(sl,"href","https://arxiv.org/abs/1807.05358"),d(sl,"rel","nofollow"),K(vi.src,S1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-flexflow.jpeg")||d(vi,"src",S1),d(vi,"alt","flex-flow-soap"),d(nl,"href","https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py"),d(nl,"rel","nofollow"),d(je,"id","which-strategy-to-use-when"),d(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(je,"href","#which-strategy-to-use-when"),d(fe,"class","relative group"),d(cl,"href","https://deepspeed.readthedocs.io/en/latest/zero3.html#memory-centric-tiling"),d(cl,"rel","nofollow")},m(e,n){l(document.head,$),p(e,El,n),p(e,U,n),l(U,M),l(M,Zi),w(Xe,Zi,null),l(U,Hh),l(U,Ci),l(Ci,Xh),p(e,ms,n),p(e,Q,n),l(Q,me),l(me,ji),w(Ye,ji,null),l(Q,Yh),l(Q,Fi),l(Fi,Jh),p(e,vs,n),p(e,gl,n),l(gl,Kh),p(e,Ps,n),p(e,ve,n),l(ve,Bi),l(Bi,Qh),l(ve,ef),l(ve,qi),l(qi,tf),p(e,ws,n),p(e,Ll,n),l(Ll,lf),p(e,ys,n),p(e,Dl,n),l(Dl,af),p(e,bs,n),p(e,ee,n),l(ee,Pe),l(Pe,Wi),w(Je,Wi,null),l(ee,of),l(ee,Vi),l(Vi,rf),p(e,_s,n),p(e,Gl,n),l(Gl,sf),p(e,Es,n),p(e,E,n),l(E,Hi),l(Hi,nf),l(E,pf),l(E,Xi),l(Xi,hf),l(E,ff),l(E,Yi),l(Yi,df),l(E,cf),l(E,Ji),l(Ji,uf),l(E,mf),l(E,Ki),l(Ki,vf),p(e,gs,n),p(e,te,n),l(te,we),l(we,Qi),w(Ke,Qi,null),l(te,Pf),l(te,eo),l(eo,wf),p(e,Ls,n),p(e,x,n),l(x,yf),l(x,to),l(to,bf),l(x,_f),l(x,lo),l(lo,Ef),l(x,gf),p(e,Ds,n),p(e,le,n),l(le,ye),l(ye,ao),w(Qe,ao,null),l(le,Lf),l(le,io),l(io,Df),p(e,Gs,n),p(e,ae,n),l(ae,Gf),l(ae,et),l(et,Uf),l(ae,kf),l(ae,Ul),p(e,Us,n),p(e,be,n),l(be,Tf),l(be,oo),l(oo,If),l(be,Sf),p(e,ks,n),p(e,kl,n),l(kl,Af),p(e,Ts,n),w(tt,e,n),p(e,Is,n),p(e,Tl,n),l(Tl,Of),p(e,Ss,n),p(e,Il,n),l(Il,Mf),p(e,As,n),w(lt,e,n),p(e,Os,n),p(e,Sl,n),l(Sl,$f),p(e,Ms,n),p(e,Al,n),l(Al,xf),p(e,$s,n),w(at,e,n),p(e,xs,n),p(e,Ol,n),l(Ol,zf),p(e,zs,n),p(e,Ml,n),l(Ml,Nf),p(e,Ns,n),p(e,$l,n),l($l,Rf),p(e,Rs,n),p(e,xl,n),l(xl,Zf),p(e,Zs,n),p(e,zl,n),l(zl,Cf),p(e,Cs,n),p(e,Nl,n),l(Nl,jf),p(e,js,n),p(e,Rl,n),l(Rl,Ff),p(e,Fs,n),p(e,Zl,n),l(Zl,Bf),p(e,Bs,n),p(e,Cl,n),l(Cl,qf),p(e,qs,n),p(e,z,n),l(z,ro),l(ro,Wf),l(z,Vf),l(z,so),l(so,Hf),l(z,Xf),l(z,no),l(no,Yf),p(e,Ws,n),p(e,jl,n),l(jl,Jf),p(e,Vs,n),p(e,Fl,n),l(Fl,Kf),p(e,Hs,n),p(e,Bl,n),l(Bl,Qf),p(e,Xs,n),p(e,ql,n),l(ql,ed),p(e,Ys,n),p(e,Wl,n),l(Wl,td),p(e,Js,n),p(e,N,n),l(N,Vl),l(Vl,it),l(it,ld),l(Vl,ad),l(N,id),l(N,Hl),l(Hl,ot),l(ot,od),l(Hl,rd),l(N,sd),l(N,po),l(po,rt),l(rt,ho),l(ho,nd),l(rt,pd),p(e,Ks,n),p(e,ie,n),l(ie,_e),l(_e,fo),w(st,fo,null),l(ie,hd),l(ie,co),l(co,fd),p(e,Qs,n),p(e,Ee,n),l(Ee,dd),l(Ee,uo),l(uo,cd),l(Ee,ud),p(e,en,n),p(e,Xl,n),l(Xl,md),p(e,tn,n),w(nt,e,n),p(e,ln,n),p(e,Yl,n),l(Yl,vd),p(e,an,n),p(e,Jl,n),l(Jl,Pd),p(e,on,n),p(e,Kl,n),l(Kl,wd),p(e,rn,n),p(e,Ql,n),l(Ql,yd),p(e,sn,n),p(e,ge,n),l(ge,mo),l(mo,bd),l(ge,_d),l(ge,vo),l(vo,Ed),p(e,nn,n),p(e,ea,n),l(ea,gd),p(e,pn,n),p(e,Le,n),l(Le,Ld),l(Le,pt),l(pt,Dd),l(Le,Gd),p(e,hn,n),p(e,ta,n),l(ta,la),p(e,fn,n),p(e,aa,n),l(aa,Ud),p(e,dn,n),p(e,ia,n),l(ia,kd),p(e,cn,n),p(e,R,n),l(R,Td),l(R,Po),l(Po,Id),l(R,Sd),l(R,wo),l(wo,Ad),l(R,Od),p(e,un,n),p(e,De,n),l(De,Md),l(De,yo),l(yo,$d),l(De,xd),p(e,mn,n),p(e,Ge,n),l(Ge,zd),l(Ge,bo),l(bo,Nd),l(Ge,Rd),p(e,vn,n),p(e,Z,n),l(Z,Zd),l(Z,_o),l(_o,Cd),l(Z,jd),l(Z,Eo),l(Eo,Fd),l(Z,Bd),p(e,Pn,n),p(e,oa,n),l(oa,qd),p(e,wn,n),p(e,C,n),l(C,Wd),l(C,go),l(go,Vd),l(C,Hd),l(C,Lo),l(Lo,Xd),l(C,Yd),p(e,yn,n),p(e,k,n),l(k,Jd),l(k,Do),l(Do,Kd),l(k,Qd),l(k,Go),l(Go,ec),l(k,tc),l(k,Uo),l(Uo,lc),l(k,ac),p(e,bn,n),p(e,ra,n),l(ra,ic),p(e,_n,n),p(e,sa,n),l(sa,oc),p(e,En,n),p(e,T,n),l(T,ko),l(ko,rc),l(T,sc),l(T,To),l(To,nc),l(T,pc),l(T,Io),l(Io,hc),l(T,fc),l(T,So),l(So,dc),p(e,gn,n),p(e,na,n),l(na,cc),p(e,Ln,n),p(e,Ue,n),l(Ue,Ao),l(Ao,uc),l(Ue,mc),l(Ue,Oo),l(Oo,vc),p(e,Dn,n),p(e,pa,n),l(pa,Pc),p(e,Gn,n),p(e,I,n),l(I,ht),l(ht,wc),l(ht,Mo),l(Mo,yc),l(ht,bc),l(I,_c),l(I,ha),l(ha,Ec),l(ha,ft),l(ft,gc),l(I,Lc),l(I,$o),l($o,Dc),l(I,Gc),l(I,xo),l(xo,Uc),p(e,Un,n),p(e,fa,n),l(fa,kc),p(e,kn,n),p(e,da,n),l(da,Tc),p(e,Tn,n),p(e,u,n),l(u,dt),l(dt,ct),l(ct,Ic),l(dt,Sc),l(dt,ut),l(ut,Ac),l(u,Oc),l(u,zo),l(zo,mt),l(mt,Mc),l(u,$c),l(u,No),l(No,vt),l(vt,xc),l(u,zc),l(u,ca),l(ca,Pt),l(Pt,Nc),l(ca,Rc),l(u,Zc),l(u,Ro),l(Ro,wt),l(wt,Cc),l(u,jc),l(u,ua),l(ua,yt),l(yt,Fc),l(ua,Bc),l(u,qc),l(u,ma),l(ma,bt),l(bt,Wc),l(ma,Vc),p(e,In,n),p(e,ke,n),l(ke,Hc),l(ke,Zo),l(Zo,Xc),l(ke,Yc),p(e,Sn,n),p(e,va,n),l(va,Jc),p(e,An,n),p(e,oe,n),l(oe,Kc),l(oe,_t),l(_t,Qc),l(oe,eu),l(oe,Pa),p(e,On,n),p(e,wa,n),l(wa,tu),p(e,Mn,n),p(e,ya,n),l(ya,lu),p(e,$n,n),p(e,Te,n),l(Te,au),l(Te,Co),l(Co,iu),l(Te,ou),p(e,xn,n),p(e,re,n),l(re,Ie),l(Ie,jo),w(Et,jo,null),l(re,ru),l(re,Fo),l(Fo,su),p(e,zn,n),p(e,ba,n),l(ba,nu),p(e,Nn,n),p(e,j,n),l(j,pu),l(j,gt),l(gt,hu),l(j,fu),l(j,Lt),l(Lt,du),l(j,cu),p(e,Rn,n),p(e,F,n),l(F,uu),l(F,Bo),l(Bo,mu),l(F,vu),l(F,qo),l(qo,Pu),l(F,wu),p(e,Zn,n),p(e,g,n),l(g,yu),l(g,Wo),l(Wo,bu),l(g,_u),l(g,Vo),l(Vo,Eu),l(g,gu),l(g,Ho),l(Ho,Lu),l(g,Du),l(g,Xo),l(Xo,Gu),l(g,Uu),p(e,Cn,n),p(e,Dt,n),l(Dt,ku),l(Dt,_a),p(e,jn,n),p(e,c,n),l(c,Tu),l(c,Yo),l(Yo,Iu),l(c,Su),l(c,Jo),l(Jo,Au),l(c,Ou),l(c,Ko),l(Ko,Mu),l(c,$u),l(c,Qo),l(Qo,xu),l(c,zu),l(c,er),l(er,Nu),l(c,Ru),l(c,tr),l(tr,Zu),l(c,Cu),l(c,lr),l(lr,ju),l(c,Fu),l(c,Ea),p(e,Fn,n),p(e,Gt,n),l(Gt,Bu),l(Gt,ga),p(e,Bn,n),p(e,Ut,n),l(Ut,qu),l(Ut,La),p(e,qn,n),p(e,Da,n),l(Da,Wu),p(e,Wn,n),p(e,B,n),l(B,Vu),l(B,kt),l(kt,Hu),l(B,Xu),l(B,Tt),l(Tt,Yu),l(B,Ju),p(e,Vn,n),p(e,Ga,n),l(Ga,Ku),p(e,Hn,n),p(e,Ua,n),l(Ua,Qu),p(e,Xn,n),p(e,ka,n),l(ka,Ta),l(Ta,em),l(Ta,It),l(It,tm),p(e,Yn,n),p(e,Ia,n),l(Ia,lm),p(e,Jn,n),p(e,S,n),l(S,Sa),l(Sa,St),l(St,am),l(Sa,im),l(S,om),l(S,Aa),l(Aa,At),l(At,rm),l(Aa,sm),l(S,nm),l(S,Oa),l(Oa,Ot),l(Ot,pm),l(Oa,hm),l(S,fm),l(S,Ma),l(Ma,Mt),l(Mt,dm),l(Ma,cm),p(e,Kn,n),p(e,$a,n),l($a,um),p(e,Qn,n),p(e,q,n),l(q,ar),l(ar,mm),l(q,vm),l(q,$t),l($t,Pm),l($t,xt),l(xt,wm),l($t,ym),l(q,bm),l(q,xa),l(xa,_m),l(xa,zt),l(zt,Em),p(e,ep,n),p(e,se,n),l(se,Se),l(Se,ir),w(Nt,ir,null),l(se,gm),l(se,or),l(or,Lm),p(e,tp,n),p(e,Ae,n),l(Ae,Dm),l(Ae,Rt),l(Rt,Gm),l(Ae,Um),p(e,lp,n),p(e,za,n),l(za,Na),p(e,ap,n),p(e,Ra,n),l(Ra,km),p(e,ip,n),p(e,Za,n),l(Za,Tm),p(e,op,n),p(e,Ca,n),l(Ca,Im),p(e,rp,n),p(e,L,n),l(L,rr),l(rr,Zt),l(Zt,Sm),l(L,Am),l(L,sr),l(sr,Ct),l(Ct,Om),l(L,Mm),l(L,nr),l(nr,jt),l(jt,$m),l(L,xm),l(L,pr),l(pr,Ft),l(Ft,zm),l(L,Nm),l(L,hr),l(hr,Bt),l(Bt,Rm),p(e,sp,n),p(e,ja,n),l(ja,Zm),p(e,np,n),p(e,ne,n),l(ne,Oe),l(Oe,fr),w(qt,fr,null),l(ne,Cm),l(ne,dr),l(dr,jm),p(e,pp,n),p(e,Fa,n),l(Fa,Fm),p(e,hp,n),p(e,Ba,n),l(Ba,qa),p(e,fp,n),p(e,Me,n),l(Me,Bm),l(Me,Wt),l(Wt,qm),l(Me,Wm),p(e,dp,n),p(e,Wa,n),l(Wa,Vm),p(e,cp,n),p(e,Va,n),l(Va,Hm),p(e,up,n),p(e,D,n),l(D,Ha),l(Ha,Vt),l(Vt,Xm),l(Ha,Ym),l(D,Jm),l(D,cr),l(cr,Ht),l(Ht,Km),l(D,Qm),l(D,ur),l(ur,Xt),l(Xt,ev),l(D,tv),l(D,mr),l(mr,Yt),l(Yt,lv),l(D,av),l(D,vr),l(vr,Jt),l(Jt,iv),p(e,mp,n),p(e,Xa,n),l(Xa,ov),p(e,vp,n),p(e,pe,n),l(pe,$e),l($e,Pr),w(Kt,Pr,null),l(pe,rv),l(pe,wr),l(wr,sv),p(e,Pp,n),p(e,xe,n),l(xe,nv),l(xe,Ya),l(Ya,pv),l(xe,hv),p(e,wp,n),p(e,Ja,n),l(Ja,fv),p(e,yp,n),p(e,Ka,n),l(Ka,dv),p(e,bp,n),p(e,ze,n),l(ze,cv),l(ze,yr),l(yr,uv),l(ze,mv),p(e,_p,n),p(e,Qa,n),l(Qa,vv),p(e,Ep,n),p(e,ei,n),l(ei,Pv),p(e,gp,n),p(e,ti,n),l(ti,wv),p(e,Lp,n),p(e,Ne,n),l(Ne,Re),l(Re,Qt),l(Qt,yv),l(Re,bv),l(Re,el),l(el,_v),l(Re,Ev),l(Ne,gv),l(Ne,br),l(br,tl),l(tl,Lv),p(e,Dp,n),p(e,li,n),l(li,Dv),p(e,Gp,n),p(e,ai,n),l(ai,_r),l(_r,ll),l(ll,Gv),p(e,Up,n),p(e,ii,n),l(ii,Uv),p(e,kp,n),p(e,he,n),l(he,Ze),l(Ze,Er),w(al,Er,null),l(he,kv),l(he,gr),l(gr,Tv),p(e,Tp,n),p(e,il,n),l(il,ol),l(ol,Iv),l(il,Sv),p(e,Ip,n),p(e,rl,n),l(rl,Av),l(rl,sl),l(sl,Ov),p(e,Sp,n),p(e,oi,n),l(oi,Mv),p(e,Ap,n),p(e,A,n),l(A,Lr),l(Lr,$v),l(A,xv),l(A,Dr),l(Dr,zv),l(A,Nv),l(A,Gr),l(Gr,Rv),l(A,Zv),l(A,Ur),l(Ur,Cv),p(e,Op,n),p(e,ri,n),l(ri,jv),p(e,Mp,n),p(e,si,n),l(si,kr),l(kr,Fv),p(e,$p,n),p(e,ni,n),l(ni,Bv),p(e,xp,n),p(e,pi,n),l(pi,Tr),l(Tr,qv),p(e,zp,n),p(e,hi,n),l(hi,Wv),p(e,Np,n),p(e,fi,n),l(fi,Ir),l(Ir,Vv),p(e,Rp,n),p(e,di,n),l(di,Hv),p(e,Zp,n),p(e,ci,n),l(ci,Sr),l(Sr,Xv),p(e,Cp,n),p(e,ui,n),l(ui,Yv),p(e,jp,n),p(e,mi,n),l(mi,vi),p(e,Fp,n),p(e,Pi,n),l(Pi,Jv),p(e,Bp,n),p(e,wi,n),l(wi,Kv),p(e,qp,n),p(e,yi,n),l(yi,Qv),p(e,Wp,n),p(e,Ce,n),l(Ce,eP),l(Ce,nl),l(nl,tP),l(Ce,lP),p(e,Vp,n),p(e,fe,n),l(fe,je),l(je,Ar),w(pl,Ar,null),l(fe,aP),l(fe,Or),l(Or,iP),p(e,Hp,n),p(e,bi,n),l(bi,oP),p(e,Xp,n),p(e,_i,n),l(_i,Mr),l(Mr,rP),p(e,Yp,n),p(e,W,n),l(W,hl),l(hl,$r),l($r,sP),l(hl,nP),l(hl,xr),l(xr,zr),l(zr,pP),l(W,hP),l(W,fl),l(fl,Nr),l(Nr,fP),l(fl,dP),l(fl,dl),l(dl,Rr),l(Rr,cP),l(dl,uP),l(dl,Zr),l(Zr,mP),l(W,vP),l(W,Cr),l(Cr,jr),l(jr,PP),p(e,Jp,n),p(e,Ei,n),l(Ei,de),l(de,wP),l(de,cl),l(cl,yP),l(de,bP),l(de,Fr),l(Fr,_P),l(de,EP),p(e,Kp,n),p(e,gi,n),l(gi,Br),l(Br,gP),p(e,Qp,n),p(e,V,n),l(V,ul),l(ul,qr),l(qr,LP),l(ul,DP),l(ul,ml),l(ml,Wr),l(Wr,GP),l(ml,UP),l(ml,Vr),l(Vr,kP),l(V,TP),l(V,vl),l(vl,Hr),l(Hr,IP),l(vl,SP),l(vl,ce),l(ce,Xr),l(Xr,Yr),l(Yr,AP),l(ce,OP),l(ce,Jr),l(Jr,Kr),l(Kr,MP),l(ce,$P),l(ce,ue),l(ue,Qr),l(Qr,xP),l(ue,zP),l(ue,es),l(es,NP),l(ue,RP),l(ue,ts),l(ts,ZP),l(V,CP),l(V,Pl),l(Pl,ls),l(ls,jP),l(Pl,FP),l(Pl,wl),l(wl,as),l(as,BP),l(wl,qP),l(wl,is),l(is,WP),p(e,eh,n),p(e,Li,n),l(Li,os),l(os,VP),p(e,th,n),p(e,Fe,n),l(Fe,yl),l(yl,rs),l(rs,HP),l(yl,XP),l(yl,bl),l(bl,ss),l(ss,YP),l(bl,JP),l(bl,ns),l(ns,KP),l(Fe,QP),l(Fe,_l),l(_l,ps),l(ps,e1),l(_l,t1),l(_l,hs),l(hs,fs),l(fs,l1),lh=!0},p:F3,i(e){lh||(y(Xe.$$.fragment,e),y(Ye.$$.fragment,e),y(Je.$$.fragment,e),y(Ke.$$.fragment,e),y(Qe.$$.fragment,e),y(tt.$$.fragment,e),y(lt.$$.fragment,e),y(at.$$.fragment,e),y(st.$$.fragment,e),y(nt.$$.fragment,e),y(Et.$$.fragment,e),y(Nt.$$.fragment,e),y(qt.$$.fragment,e),y(Kt.$$.fragment,e),y(al.$$.fragment,e),y(pl.$$.fragment,e),lh=!0)},o(e){b(Xe.$$.fragment,e),b(Ye.$$.fragment,e),b(Je.$$.fragment,e),b(Ke.$$.fragment,e),b(Qe.$$.fragment,e),b(tt.$$.fragment,e),b(lt.$$.fragment,e),b(at.$$.fragment,e),b(st.$$.fragment,e),b(nt.$$.fragment,e),b(Et.$$.fragment,e),b(Nt.$$.fragment,e),b(qt.$$.fragment,e),b(Kt.$$.fragment,e),b(al.$$.fragment,e),b(pl.$$.fragment,e),lh=!1},d(e){t($),e&&t(El),e&&t(U),_(Xe),e&&t(ms),e&&t(Q),_(Ye),e&&t(vs),e&&t(gl),e&&t(Ps),e&&t(ve),e&&t(ws),e&&t(Ll),e&&t(ys),e&&t(Dl),e&&t(bs),e&&t(ee),_(Je),e&&t(_s),e&&t(Gl),e&&t(Es),e&&t(E),e&&t(gs),e&&t(te),_(Ke),e&&t(Ls),e&&t(x),e&&t(Ds),e&&t(le),_(Qe),e&&t(Gs),e&&t(ae),e&&t(Us),e&&t(be),e&&t(ks),e&&t(kl),e&&t(Ts),_(tt,e),e&&t(Is),e&&t(Tl),e&&t(Ss),e&&t(Il),e&&t(As),_(lt,e),e&&t(Os),e&&t(Sl),e&&t(Ms),e&&t(Al),e&&t($s),_(at,e),e&&t(xs),e&&t(Ol),e&&t(zs),e&&t(Ml),e&&t(Ns),e&&t($l),e&&t(Rs),e&&t(xl),e&&t(Zs),e&&t(zl),e&&t(Cs),e&&t(Nl),e&&t(js),e&&t(Rl),e&&t(Fs),e&&t(Zl),e&&t(Bs),e&&t(Cl),e&&t(qs),e&&t(z),e&&t(Ws),e&&t(jl),e&&t(Vs),e&&t(Fl),e&&t(Hs),e&&t(Bl),e&&t(Xs),e&&t(ql),e&&t(Ys),e&&t(Wl),e&&t(Js),e&&t(N),e&&t(Ks),e&&t(ie),_(st),e&&t(Qs),e&&t(Ee),e&&t(en),e&&t(Xl),e&&t(tn),_(nt,e),e&&t(ln),e&&t(Yl),e&&t(an),e&&t(Jl),e&&t(on),e&&t(Kl),e&&t(rn),e&&t(Ql),e&&t(sn),e&&t(ge),e&&t(nn),e&&t(ea),e&&t(pn),e&&t(Le),e&&t(hn),e&&t(ta),e&&t(fn),e&&t(aa),e&&t(dn),e&&t(ia),e&&t(cn),e&&t(R),e&&t(un),e&&t(De),e&&t(mn),e&&t(Ge),e&&t(vn),e&&t(Z),e&&t(Pn),e&&t(oa),e&&t(wn),e&&t(C),e&&t(yn),e&&t(k),e&&t(bn),e&&t(ra),e&&t(_n),e&&t(sa),e&&t(En),e&&t(T),e&&t(gn),e&&t(na),e&&t(Ln),e&&t(Ue),e&&t(Dn),e&&t(pa),e&&t(Gn),e&&t(I),e&&t(Un),e&&t(fa),e&&t(kn),e&&t(da),e&&t(Tn),e&&t(u),e&&t(In),e&&t(ke),e&&t(Sn),e&&t(va),e&&t(An),e&&t(oe),e&&t(On),e&&t(wa),e&&t(Mn),e&&t(ya),e&&t($n),e&&t(Te),e&&t(xn),e&&t(re),_(Et),e&&t(zn),e&&t(ba),e&&t(Nn),e&&t(j),e&&t(Rn),e&&t(F),e&&t(Zn),e&&t(g),e&&t(Cn),e&&t(Dt),e&&t(jn),e&&t(c),e&&t(Fn),e&&t(Gt),e&&t(Bn),e&&t(Ut),e&&t(qn),e&&t(Da),e&&t(Wn),e&&t(B),e&&t(Vn),e&&t(Ga),e&&t(Hn),e&&t(Ua),e&&t(Xn),e&&t(ka),e&&t(Yn),e&&t(Ia),e&&t(Jn),e&&t(S),e&&t(Kn),e&&t($a),e&&t(Qn),e&&t(q),e&&t(ep),e&&t(se),_(Nt),e&&t(tp),e&&t(Ae),e&&t(lp),e&&t(za),e&&t(ap),e&&t(Ra),e&&t(ip),e&&t(Za),e&&t(op),e&&t(Ca),e&&t(rp),e&&t(L),e&&t(sp),e&&t(ja),e&&t(np),e&&t(ne),_(qt),e&&t(pp),e&&t(Fa),e&&t(hp),e&&t(Ba),e&&t(fp),e&&t(Me),e&&t(dp),e&&t(Wa),e&&t(cp),e&&t(Va),e&&t(up),e&&t(D),e&&t(mp),e&&t(Xa),e&&t(vp),e&&t(pe),_(Kt),e&&t(Pp),e&&t(xe),e&&t(wp),e&&t(Ja),e&&t(yp),e&&t(Ka),e&&t(bp),e&&t(ze),e&&t(_p),e&&t(Qa),e&&t(Ep),e&&t(ei),e&&t(gp),e&&t(ti),e&&t(Lp),e&&t(Ne),e&&t(Dp),e&&t(li),e&&t(Gp),e&&t(ai),e&&t(Up),e&&t(ii),e&&t(kp),e&&t(he),_(al),e&&t(Tp),e&&t(il),e&&t(Ip),e&&t(rl),e&&t(Sp),e&&t(oi),e&&t(Ap),e&&t(A),e&&t(Op),e&&t(ri),e&&t(Mp),e&&t(si),e&&t($p),e&&t(ni),e&&t(xp),e&&t(pi),e&&t(zp),e&&t(hi),e&&t(Np),e&&t(fi),e&&t(Rp),e&&t(di),e&&t(Zp),e&&t(ci),e&&t(Cp),e&&t(ui),e&&t(jp),e&&t(mi),e&&t(Fp),e&&t(Pi),e&&t(Bp),e&&t(wi),e&&t(qp),e&&t(yi),e&&t(Wp),e&&t(Ce),e&&t(Vp),e&&t(fe),_(pl),e&&t(Hp),e&&t(bi),e&&t(Xp),e&&t(_i),e&&t(Yp),e&&t(W),e&&t(Jp),e&&t(Ei),e&&t(Kp),e&&t(gi),e&&t(Qp),e&&t(V),e&&t(eh),e&&t(Li),e&&t(th),e&&t(Fe)}}}const q3={local:"model-parallelism",sections:[{local:"parallelism-overview",title:"Parallelism overview"},{local:"concepts",title:"Concepts"},{local:"data-parallelism",title:"Data Parallelism"},{local:"zero-data-parallelism",title:"ZeRO Data Parallelism"},{local:"naive-model-parallelism-vertical-and-pipeline-parallelism",title:"Naive Model Parallelism (Vertical) and Pipeline Parallelism"},{local:"tensor-parallelism",title:"Tensor Parallelism"},{local:"dppp",title:"DP+PP"},{local:"dppptp",title:"DP+PP+TP"},{local:"zero-dppptp",title:"ZeRO DP+PP+TP"},{local:"flexflow",title:"FlexFlow"},{local:"which-strategy-to-use-when",title:"Which Strategy To Use When"}],title:"Model Parallelism"};function W3(Vh,$,El){let{fw:U}=$;return Vh.$$set=M=>{"fw"in M&&El(0,U=M.fw)},[U]}class J3 extends R3{constructor($){super();Z3(this,$,W3,B3,C3,{fw:0})}}export{J3 as default,q3 as metadata};
244
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/contributing.mdx-24741e59.js
import{S as Gy,i as My,s as Uy,e as a,k as i,w as m,t as s,M as Yy,c as l,d as o,m as h,a as n,x as y,h as r,b as d,F as e,g as f,y as g,L as By,q as v,o as w,B as b}from"../chunks/vendor-4833417e.js";import{I as C}from"../chunks/IconCopyLink-4b81c553.js";import{C as $}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function zy(fn){let H,lo,x,W,Xo,Ye,dn,es,cn,va,no,pn,wa,io,mn,ba,be,yn,Be,gn,vn,$a,te,$e,ts,ze,wn,os,bn,_a,ho,$n,ka,A,ss,_n,kn,rs,En,Pn,as,In,Tn,ls,Sn,Ea,L,On,Je,Cn,xn,ns,An,Ln,is,Dn,Nn,Pa,uo,hs,qn,Ia,oe,_e,us,Ke,Rn,fs,Wn,Ta,fo,Fn,Sa,se,ke,ds,Qe,Hn,cs,jn,Oa,co,Gn,Ca,Ee,Mn,ps,Un,Yn,xa,po,Bn,Aa,j,S,zn,ms,Jn,Kn,ys,Qn,Zn,gs,Vn,Xn,vs,ei,ti,oi,ws,si,ri,Ze,ai,bs,li,ni,La,mo,ii,Da,Ve,Na,yo,hi,qa,Xe,Ra,re,Pe,$s,et,ui,_s,fi,Wa,go,di,Fa,G,ks,ci,pi,Es,mi,yi,Ps,gi,Ha,vo,vi,ja,M,wi,Is,bi,$i,tt,Ts,_i,ki,Ga,ae,Ie,Ss,ot,Ei,Os,Pi,Ma,wo,Ii,Ua,bo,Cs,Ti,Ya,U,xs,Si,Oi,As,Ci,xi,Ls,Ai,Ba,O,st,Li,Ds,Di,Ni,qi,rt,Ri,Ns,Wi,Fi,Hi,qs,ji,Gi,Rs,Mi,za,$o,Ui,Ja,Y,Yi,Ws,Bi,zi,at,Fs,Ji,Ki,Ka,le,Te,Hs,lt,Qi,js,Zi,Qa,_o,Vi,Za,P,Xi,Gs,eh,th,Ms,oh,sh,Us,rh,ah,nt,lh,nh,Va,ko,ih,Xa,_,Ys,it,hh,ht,uh,fh,dh,ut,Bs,ch,ph,ft,mh,ne,zs,yh,gh,dt,vh,Se,Js,wh,bh,Ks,$h,_h,kh,E,Qs,Eh,Ph,ct,Ih,ie,Th,Zs,Sh,Oh,Vs,Ch,xh,Ah,pt,Lh,Xs,Dh,Nh,qh,mt,Rh,he,Wh,er,Fh,Hh,tr,jh,Gh,Mh,c,or,Uh,Yh,sr,Bh,zh,yt,Jh,rr,Kh,Qh,gt,Zh,Eo,Vh,vt,Xh,eu,ue,tu,ar,ou,su,lr,ru,au,lu,wt,nu,nr,iu,hu,ir,uu,fu,bt,du,$t,cu,hr,pu,mu,yu,_t,gu,ur,vu,wu,kt,bu,Po,$u,Et,_u,ku,Pt,Eu,fr,Pu,Iu,Tu,It,Su,dr,Ou,Cu,Tt,xu,cr,Au,Lu,St,Du,Ot,Nu,pr,qu,Ru,Wu,fe,Fu,mr,Hu,ju,yr,Gu,Mu,Uu,Ct,Yu,xt,Bu,At,zu,Ju,Ku,gr,Qu,Zu,Lt,Vu,vr,Xu,ef,Dt,tf,wr,Nt,of,br,sf,rf,af,$r,_r,lf,el,de,Oe,kr,qt,nf,Er,hf,tl,k,Pr,uf,ff,Ir,df,cf,Rt,pf,Tr,mf,yf,gf,Sr,vf,wf,Io,bf,ce,Wt,$f,Or,_f,kf,Ef,pe,Pf,Cr,If,Tf,xr,Sf,Of,Cf,Ft,xf,Ar,Af,Lf,Df,Ht,Nf,Lr,qf,Rf,Wf,F,Ff,Dr,Hf,jf,jt,Nr,Gf,Mf,Gt,Uf,Yf,ol,Mt,Bf,To,zf,sl,me,Ce,qr,Ut,Jf,Rr,Kf,rl,B,Qf,Yt,Zf,Vf,Bt,Xf,ed,al,D,td,Wr,od,sd,Fr,rd,ad,Hr,ld,nd,ll,zt,nl,So,id,il,Jt,hl,N,hd,jr,ud,fd,Gr,dd,cd,Mr,pd,md,ul,Oo,yd,fl,z,gd,Ur,vd,wd,Yr,bd,$d,dl,Kt,cl,J,_d,Br,kd,Ed,zr,Pd,Id,pl,K,Td,Jr,Sd,Od,Kr,Cd,xd,ml,Q,Ad,Qr,Ld,Dd,Zr,Nd,qd,yl,Qt,gl,ye,xe,Vr,Zt,Rd,Xr,Wd,vl,Z,Fd,Vt,Hd,jd,Xt,Gd,Md,wl,ge,Ae,ea,eo,Ud,ta,Yd,bl,ve,Le,oa,to,Bd,sa,zd,$l,V,Jd,ra,Kd,Qd,aa,Zd,Vd,_l,Co,la,Xd,kl,xo,ec,El,q,Ao,oo,tc,oc,sc,na,rc,ac,De,lc,ia,nc,ic,ha,hc,uc,so,fc,ua,dc,cc,Pl,Ne,pc,fa,mc,yc,Il,we,qe,da,ro,gc,ca,vc,Tl,Lo,wc,Sl,Re,pa,bc,$c,ma,_c,Ol,ao,Cl;return Ye=new C({}),ze=new C({}),Ke=new C({}),Qe=new C({}),Ve=new $({props:{code:"transformers-cli env",highlighted:'transformers-cli <span class="hljs-built_in">env</span>'}}),Xe=new $({props:{code:"python src/transformers/commands/transformers_cli.py env",highlighted:'python src/transformers/commands/transformers_cli.py <span class="hljs-built_in">env</span>'}}),et=new C({}),ot=new C({}),lt=new C({}),ft=new $({props:{code:`$ git clone [email protected]:<your Github handle>/transformers.git $ cd transformers $ git remote add upstream https://github.com/huggingface/transformers.git`,highlighted:`$ git <span class="hljs-built_in">clone</span> [email protected]:&lt;your Github handle&gt;/transformers.git $ <span class="hljs-built_in">cd</span> transformers $ git remote add upstream https://github.com/huggingface/transformers.git`}}),dt=new $({props:{code:"$ git checkout -b a-descriptive-name-for-my-changes",highlighted:"$ git checkout -b a-descriptive-name-for-my-changes"}}),ct=new $({props:{code:'$ pip install -e ".[dev]"',highlighted:'$ pip install -e <span class="hljs-string">&quot;.[dev]&quot;</span>'}}),mt=new $({props:{code:`$ git clone https://github.com/huggingface/datasets $ cd datasets $ pip install -e .`,highlighted:`$ git <span class="hljs-built_in">clone</span> https://github.com/huggingface/datasets $ <span class="hljs-built_in">cd</span> datasets $ pip install -e .`}}),yt=new $({props:{code:"$ pytest tests/<TEST_TO_RUN>.py",highlighted:"$ pytest tests/&lt;TEST_TO_RUN&gt;.py"}}),gt=new $({props:{code:"$ make test",highlighted:'$ make <span class="hljs-built_in">test</span>'}}),wt=new $({props:{code:"$ make fixup",highlighted:"$ make fixup"}}),bt=new $({props:{code:"$ make style",highlighted:"$ make style"}}),_t=new $({props:{code:"$ make quality",highlighted:"$ make quality"}}),kt=new $({props:{code:"$ make repo-consistency",highlighted:"$ make repo-consistency"}}),It=new $({props:{code:"$ pip install git+https://github.com/huggingface/doc-builder",highlighted:"$ pip install git+https://github.com/huggingface/doc-builder"}}),Tt=new $({props:{code:'$ pip install ".[docs]"',highlighted:'$ pip install <span class="hljs-string">&quot;.[docs]&quot;</span>'}}),St=new $({props:{code:"$ doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build",highlighted:"$ doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build"}}),Ct=new $({props:{code:`$ git add modified_file.py $ git commit`,highlighted:`$ git add modified_file.py $ git commit`}}),Lt=new $({props:{code:`$ git fetch upstream $ git rebase upstream/master`,highlighted:`$ git fetch upstream $ git rebase upstream/master`}}),Dt=new $({props:{code:"$ git push -u origin a-descriptive-name-for-my-changes",highlighted:"$ git push -u origin a-descriptive-name-for-my-changes"}}),qt=new C({}),Ut=new C({}),zt=new $({props:{code:"$ python -m pytest -n auto --dist=loadfile -s -v ./tests/",highlighted:"$ python -m pytest -n auto --dist=loadfile -s -v ./tests/"}}),Jt=new $({props:{code:`$ pip install -r examples/xxx/requirements.txt # only needed the first time $ python -m pytest -n auto --dist=loadfile -s -v ./examples/`,highlighted:`$ pip install -r examples/xxx/requirements.txt <span class="hljs-comment"># only needed the first time</span> $ python -m pytest -n auto --dist=loadfile -s -v ./examples/`}}),Kt=new $({props:{code:`$ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/ $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/`,highlighted:`$ RUN_SLOW=<span class="hljs-built_in">yes</span> python -m pytest -n auto --dist=loadfile -s -v ./tests/ $ RUN_SLOW=<span class="hljs-built_in">yes</span> python -m pytest -n auto --dist=loadfile -s -v ./examples/`}}),Qt=new $({props:{code:`$ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v`,highlighted:`$ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v`}}),Zt=new C({}),eo=new C({}),to=new C({}),ro=new C({}),ao=new $({props:{code:`$ git checkout -b your-branch-for-syncing $ git pull --squash --no-commit upstream master $ git commit -m '<your message without GitHub references>' $ git push --set-upstream origin your-branch-for-syncing`,highlighted:`<span class="hljs-symbol">$</span> git checkout -b your-branch-<span class="hljs-keyword">for</span>-syncing <span class="hljs-symbol">$</span> git pull --squash --<span class="hljs-keyword">no</span>-commit upstream master <span class="hljs-symbol">$</span> git commit -m <span class="hljs-string">&#x27;&lt;your message without GitHub references&gt;&#x27;</span> <span class="hljs-symbol">$</span> git push --<span class="hljs-keyword">set</span>-upstream <span class="hljs-comment">origin your-branch-for-syncing</span>`}}),{c(){H=a("meta"),lo=i(),x=a("h1"),W=a("a"),Xo=a("span"),m(Ye.$$.fragment),dn=i(),es=a("span"),cn=s("How to contribute to transformers?"),va=i(),no=a("p"),pn=s(`Everyone is welcome to contribute, and we value everybody\u2019s contribution. Code is thus not the only way to help the community. Answering questions, helping others, reaching out and improving the documentations are immensely valuable to the community.`),wa=i(),io=a("p"),mn=s(`It also helps us if you spread the word: reference the library from blog posts on the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply star the repo to say \u201Cthank you\u201D.`),ba=i(),be=a("p"),yn=s(`Whichever way you choose to contribute, please be mindful to respect our `),Be=a("a"),gn=s("code of conduct"),vn=s("."),$a=i(),te=a("h2"),$e=a("a"),ts=a("span"),m(ze.$$.fragment),wn=i(),os=a("span"),bn=s("You can contribute in so many ways!"),_a=i(),ho=a("p"),$n=s("There are 4 ways you can contribute to transformers:"),ka=i(),A=a("ul"),ss=a("li"),_n=s("Fixing outstanding issues with the existing code;"),kn=i(),rs=a("li"),En=s("Implementing new models;"),Pn=i(),as=a("li"),In=s("Contributing to the examples or to the documentation;"),Tn=i(),ls=a("li"),Sn=s("Submitting issues related to bugs or desired new features."),Ea=i(),L=a("p"),On=s("In particular there is a special "),Je=a("a"),Cn=s(`Good First Issue`),xn=s(` listing. It will give you a list of open Issues that are open to anybody to work on. Just comment in the issue that you\u2019d like to work on it. In that same listing you will also find some Issues with `),ns=a("code"),An=s("Good Second Issue"),Ln=s(` label. These are typically slightly more complicated than the Issues with just `),is=a("code"),Dn=s("Good First Issue"),Nn=s(` label. But if you feel you know what you\u2019re doing, go for it.`),Pa=i(),uo=a("p"),hs=a("em"),qn=s("All are equally valuable to the community."),Ia=i(),oe=a("h2"),_e=a("a"),us=a("span"),m(Ke.$$.fragment),Rn=i(),fs=a("span"),Wn=s("Submitting a new issue or feature request"),Ta=i(),fo=a("p"),Fn=s(`Do your best to follow these guidelines when submitting an issue or a feature request. It will make it easier for us to come back to you quickly and with good feedback.`),Sa=i(),se=a("h3"),ke=a("a"),ds=a("span"),m(Qe.$$.fragment),Hn=i(),cs=a("span"),jn=s("Did you find a bug?"),Oa=i(),co=a("p"),Gn=s(`The \u{1F917} Transformers library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue.`),Ca=i(),Ee=a("p"),Mn=s("First, we would really appreciate it if you could "),ps=a("strong"),Un=s(`make sure the bug was not already reported`),Yn=s(" (use the search bar on Github under Issues)."),xa=i(),po=a("p"),Bn=s("Did not find it? :( So we can act quickly on it, please follow these steps:"),Aa=i(),j=a("ul"),S=a("li"),zn=s("Include your "),ms=a("strong"),Jn=s("OS type and version"),Kn=s(", the versions of "),ys=a("strong"),Qn=s("Python"),Zn=s(", "),gs=a("strong"),Vn=s("PyTorch"),Xn=s(` and `),vs=a("strong"),ei=s("Tensorflow"),ti=s(" when applicable;"),oi=i(),ws=a("li"),si=s(`A short, self-contained, code snippet that allows us to reproduce the bug in less than 30s;`),ri=i(),Ze=a("li"),ai=s("Provide the "),bs=a("em"),li=s("full"),ni=s(" traceback if an exception is raised."),La=i(),mo=a("p"),ii=s("To get the OS and software versions automatically, you can run the following command:"),Da=i(),m(Ve.$$.fragment),Na=i(),yo=a("p"),hi=s("or from the root of the repository the following command:"),qa=i(),m(Xe.$$.fragment),Ra=i(),re=a("h3"),Pe=a("a"),$s=a("span"),m(et.$$.fragment),ui=i(),_s=a("span"),fi=s("Do you want to implement a new model?"),Wa=i(),go=a("p"),di=s("Awesome! Please provide the following information:"),Fa=i(),G=a("ul"),ks=a("li"),ci=s("Short description of the model and link to the paper;"),pi=i(),Es=a("li"),mi=s("Link to the implementation if it is open-source;"),yi=i(),Ps=a("li"),gi=s("Link to the model weights if they are available."),Ha=i(),vo=a("p"),vi=s(`If you are willing to contribute the model yourself, let us know so we can best guide you.`),ja=i(),M=a("p"),wi=s("We have added a "),Is=a("strong"),bi=s("detailed guide and templates"),$i=s(` to guide you in the process of adding a new model. You can find them in the `),tt=a("a"),Ts=a("code"),_i=s("templates"),ki=s(" folder."),Ga=i(),ae=a("h3"),Ie=a("a"),Ss=a("span"),m(ot.$$.fragment),Ei=i(),Os=a("span"),Pi=s("Do you want a new feature (that is not a model)?"),Ma=i(),wo=a("p"),Ii=s("A world-class feature request addresses the following points:"),Ua=i(),bo=a("ol"),Cs=a("li"),Ti=s("Motivation first:"),Ya=i(),U=a("ul"),xs=a("li"),Si=s(`Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best.`),Oi=i(),As=a("li"),Ci=s(`Is it related to something you would need for a project? We\u2019d love to hear about it!`),xi=i(),Ls=a("li"),Ai=s(`Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you.`),Ba=i(),O=a("ol"),st=a("li"),Li=s("Write a "),Ds=a("em"),Di=s("full paragraph"),Ni=s(" describing the feature;"),qi=i(),rt=a("li"),Ri=s("Provide a "),Ns=a("strong"),Wi=s("code snippet"),Fi=s(" that demonstrates its future use;"),Hi=i(),qs=a("li"),ji=s("In case this is related to a paper, please attach a link;"),Gi=i(),Rs=a("li"),Mi=s("Attach any additional information (drawings, screenshots, etc.) you think may help."),za=i(),$o=a("p"),Ui=s(`If your issue is well written we\u2019re already 80% of the way there by the time you post it.`),Ja=i(),Y=a("p"),Yi=s("We have added "),Ws=a("strong"),Bi=s("templates"),zi=s(` to guide you in the process of adding a new example script for training or testing the models in the library. You can find them in the `),at=a("a"),Fs=a("code"),Ji=s("templates"),Ki=s(` folder.`),Ka=i(),le=a("h2"),Te=a("a"),Hs=a("span"),m(lt.$$.fragment),Qi=i(),js=a("span"),Zi=s("Start contributing! (Pull Requests)"),Qa=i(),_o=a("p"),Vi=s(`Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback.`),Za=i(),P=a("p"),Xi=s("You will need basic "),Gs=a("code"),eh=s("git"),th=s(` proficiency to be able to contribute to \u{1F917} Transformers. `),Ms=a("code"),oh=s("git"),sh=s(` is not the easiest tool to use but it has the greatest manual. Type `),Us=a("code"),rh=s("git --help"),ah=s(" in a shell and enjoy. If you prefer books, "),nt=a("a"),lh=s(`Pro Git`),nh=s(" is a very good reference."),Va=i(),ko=a("p"),ih=s("Follow these steps to start contributing:"),Xa=i(),_=a("ol"),Ys=a("li"),it=a("p"),hh=s("Fork the "),ht=a("a"),uh=s("repository"),fh=s(` by clicking on the \u2018Fork\u2019 button on the repository\u2019s page. This creates a copy of the code under your GitHub user account.`),dh=i(),ut=a("li"),Bs=a("p"),ch=s("Clone your fork to your local disk, and add the base repository as a remote:"),ph=i(),m(ft.$$.fragment),mh=i(),ne=a("li"),zs=a("p"),yh=s("Create a new branch to hold your development changes:"),gh=i(),m(dt.$$.fragment),vh=i(),Se=a("p"),Js=a("strong"),wh=s("Do not"),bh=s(" work on the "),Ks=a("code"),$h=s("master"),_h=s(" branch."),kh=i(),E=a("li"),Qs=a("p"),Eh=s("Set up a development environment by running the following command in a virtual environment:"),Ph=i(),m(ct.$$.fragment),Ih=i(),ie=a("p"),Th=s(`(If transformers was already installed in the virtual environment, remove it with `),Zs=a("code"),Sh=s("pip uninstall transformers"),Oh=s(` before reinstalling it in editable mode with the `),Vs=a("code"),Ch=s("-e"),xh=s(" flag.)"),Ah=i(),pt=a("p"),Lh=s("To run the full test suite, you might need the additional dependency on "),Xs=a("code"),Dh=s("datasets"),Nh=s(` which requires a separate source install:`),qh=i(),m(mt.$$.fragment),Rh=i(),he=a("p"),Wh=s("If you have already cloned that repo, you might need to "),er=a("code"),Fh=s("git pull"),Hh=s(" to get the most recent changes in the "),tr=a("code"),jh=s("datasets"),Gh=s(` library.`),Mh=i(),c=a("li"),or=a("p"),Uh=s("Develop the features on your branch."),Yh=i(),sr=a("p"),Bh=s(`As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this:`),zh=i(),m(yt.$$.fragment),Jh=i(),rr=a("p"),Kh=s(`You can also run the full suite with the following command, but it takes a beefy machine to produce a result in a decent amount of time now that Transformers has grown a lot. Here is the command for it:`),Qh=i(),m(gt.$$.fragment),Zh=i(),Eo=a("p"),Vh=s(`For more information about tests, check out the `),vt=a("a"),Xh=s("dedicated documentation"),eu=i(),ue=a("p"),tu=s("\u{1F917} Transformers relies on "),ar=a("code"),ou=s("black"),su=s(" and "),lr=a("code"),ru=s("isort"),au=s(` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can\u2019t be automated in one go with:`),lu=i(),m(wt.$$.fragment),nu=i(),nr=a("p"),iu=s("This target is also optimized to only work with files modified by the PR you\u2019re working on."),hu=i(),ir=a("p"),uu=s(`If you prefer to run the checks one after the other, the following command apply the style corrections:`),fu=i(),m(bt.$$.fragment),du=i(),$t=a("p"),cu=s("\u{1F917} Transformers also uses "),hr=a("code"),pu=s("flake8"),mu=s(` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however you can also run the same checks with:`),yu=i(),m(_t.$$.fragment),gu=i(),ur=a("p"),vu=s(`Finally we have a lot of scripts that check we didn\u2019t forget to update some files when adding a new model, that you can run with`),wu=i(),m(kt.$$.fragment),bu=i(),Po=a("p"),$u=s(`To learn more about those checks and how to fix any issue with them, check out the `),Et=a("a"),_u=s("documentation"),ku=i(),Pt=a("p"),Eu=s("If you\u2019re modifying documents under "),fr=a("code"),Pu=s("docs/source"),Iu=s(`, make sure to validate that they can still be built. This check also runs in CI. To run a local check make sure you have installed the documentation builder requirements. First you will need to clone the repository containing our tools to build the documentation:`),Tu=i(),m(It.$$.fragment),Su=i(),dr=a("p"),Ou=s("Then, make sure you have all the dependencies to be able to build the doc with:"),Cu=i(),m(Tt.$$.fragment),xu=i(),cr=a("p"),Au=s("Finally run the following command from the root of the repository:"),Lu=i(),m(St.$$.fragment),Du=i(),Ot=a("p"),Nu=s("This will build the documentation in the "),pr=a("code"),qu=s("~/tmp/test-build"),Ru=s(` folder where you can inspect the generated Markdown files with your favorite editor. You won\u2019t be able to see the final rendering on the website before your PR is merged, we are actively working on adding a tool for this.`),Wu=i(),fe=a("p"),Fu=s("Once you\u2019re happy with your changes, add changed files using "),mr=a("code"),Hu=s("git add"),ju=s(` and make a commit with `),yr=a("code"),Gu=s("git commit"),Mu=s(" to record your changes locally:"),Uu=i(),m(Ct.$$.fragment),Yu=i(),xt=a("p"),Bu=s("Please write "),At=a("a"),zu=s(`good commit messages`),Ju=s("."),Ku=i(),gr=a("p"),Qu=s(`It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes:`),Zu=i(),m(Lt.$$.fragment),Vu=i(),vr=a("p"),Xu=s("Push the changes to your account using:"),ef=i(),m(Dt.$$.fragment),tf=i(),wr=a("li"),Nt=a("p"),of=s("Once you are satisfied ("),br=a("strong"),sf=s("and the checklist below is happy too"),rf=s(`), go to the webpage of your fork on GitHub. Click on \u2018Pull request\u2019 to send your changes to the project maintainers for review.`),af=i(),$r=a("li"),_r=a("p"),lf=s(`It\u2019s ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request.`),el=i(),de=a("h3"),Oe=a("a"),kr=a("span"),m(qt.$$.fragment),nf=i(),Er=a("span"),hf=s("Checklist"),tl=i(),k=a("ol"),Pr=a("li"),uf=s("The title of your pull request should be a summary of its contribution;"),ff=i(),Ir=a("li"),df=s(`If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it);`),cf=i(),Rt=a("li"),pf=s("To indicate a work in progress please prefix the title with "),Tr=a("code"),mf=s("[WIP]"),yf=s(`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged;`),gf=i(),Sr=a("li"),vf=s("Make sure existing tests pass;"),wf=i(),Io=a("li"),bf=s("Add high-coverage tests. No quality testing = no merge."),ce=a("ul"),Wt=a("li"),$f=s(`If you are adding a new model, make sure that you use `),Or=a("code"),_f=s("ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)"),kf=s(", which triggers the common tests."),Ef=i(),pe=a("li"),Pf=s("If you are adding new "),Cr=a("code"),If=s("@slow"),Tf=s(` tests, make sure they pass using `),xr=a("code"),Sf=s("RUN_SLOW=1 python -m pytest tests/test_my_new_model.py"),Of=s("."),Cf=i(),Ft=a("li"),xf=s(`If you are adding a new tokenizer, write tests, and make sure `),Ar=a("code"),Af=s("RUN_SLOW=1 python -m pytest tests/test_tokenization_{your_model_name}.py"),Lf=s(` passes. CircleCI does not run the slow tests, but github actions does every night!`),Df=i(),Ht=a("li"),Nf=s("All public methods must have informative docstrings that work nicely with sphinx. See "),Lr=a("code"),qf=s("modeling_bert.py"),Rf=s(` for an example.`),Wf=i(),F=a("li"),Ff=s("Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted "),Dr=a("code"),Hf=s("dataset"),jf=s(` like the ones hosted on `),jt=a("a"),Nr=a("code"),Gf=s("hf-internal-testing"),Mf=s(` in which to place these files and reference them by URL. We recommend putting them in the following dataset: `),Gt=a("a"),Uf=s("huggingface/documentation-images"),Yf=s(`. If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset.`),ol=i(),Mt=a("p"),Bf=s("See more about the checks run on a pull request in our "),To=a("a"),zf=s("PR guide"),sl=i(),me=a("h3"),Ce=a("a"),qr=a("span"),m(Ut.$$.fragment),Jf=i(),Rr=a("span"),Kf=s("Tests"),rl=i(),B=a("p"),Qf=s(`An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the `),Yt=a("a"),Zf=s("tests folder"),Vf=s(` and examples tests in the `),Bt=a("a"),Xf=s("examples folder"),ed=s("."),al=i(),D=a("p"),td=s("We like "),Wr=a("code"),od=s("pytest"),sd=s(" and "),Fr=a("code"),rd=s("pytest-xdist"),ad=s(` because it\u2019s faster. From the root of the repository, here\u2019s how to run tests with `),Hr=a("code"),ld=s("pytest"),nd=s(" for the library:"),ll=i(),m(zt.$$.fragment),nl=i(),So=a("p"),id=s("and for the examples:"),il=i(),m(Jt.$$.fragment),hl=i(),N=a("p"),hd=s("In fact, that\u2019s how "),jr=a("code"),ud=s("make test"),fd=s(" and "),Gr=a("code"),dd=s("make test-examples"),cd=s(" are implemented (sans the "),Mr=a("code"),pd=s("pip install"),md=s(" line)!"),ul=i(),Oo=a("p"),yd=s(`You can specify a smaller set of tests in order to test only the feature you\u2019re working on.`),fl=i(),z=a("p"),gd=s("By default, slow tests are skipped. Set the "),Ur=a("code"),vd=s("RUN_SLOW"),wd=s(` environment variable to `),Yr=a("code"),bd=s("yes"),$d=s(` to run them. This will download many gigabytes of models \u2014 make sure you have enough disk space and a good Internet connection, or a lot of patience!`),dl=i(),m(Kt.$$.fragment),cl=i(),J=a("p"),_d=s("Likewise, set the "),Br=a("code"),kd=s("RUN_CUSTOM_TOKENIZERS"),Ed=s(" environment variable to "),zr=a("code"),Pd=s("yes"),Id=s(` to run tests for custom tokenizers, which don\u2019t run by default either.`),pl=i(),K=a("p"),Td=s("\u{1F917} Transformers uses "),Jr=a("code"),Sd=s("pytest"),Od=s(` as a test runner only. It doesn\u2019t use any `),Kr=a("code"),Cd=s("pytest"),xd=s("-specific features in the test suite itself."),ml=i(),Q=a("p"),Ad=s("This means "),Qr=a("code"),Ld=s("unittest"),Dd=s(` is fully supported. Here\u2019s how to run tests with `),Zr=a("code"),Nd=s("unittest"),qd=s(":"),yl=i(),m(Qt.$$.fragment),gl=i(),ye=a("h3"),xe=a("a"),Vr=a("span"),m(Zt.$$.fragment),Rd=i(),Xr=a("span"),Wd=s("Style guide"),vl=i(),Z=a("p"),Fd=s("For documentation strings, \u{1F917} Transformers follows the "),Vt=a("a"),Hd=s("google style"),jd=s(`. Check our `),Xt=a("a"),Gd=s("documentation writing guide"),Md=s(` for more information.`),wl=i(),ge=a("h4"),Ae=a("a"),ea=a("span"),m(eo.$$.fragment),Ud=i(),ta=a("span"),Yd=s("This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md)"),bl=i(),ve=a("h3"),Le=a("a"),oa=a("span"),m(to.$$.fragment),Bd=i(),sa=a("span"),zd=s("Develop on Windows"),$l=i(),V=a("p"),Jd=s("On windows, you need to configure git to transform Windows "),ra=a("code"),Kd=s("CRLF"),Qd=s(" line endings to Linux "),aa=a("code"),Zd=s("LF"),Vd=s(" line endings:"),_l=i(),Co=a("p"),la=a("code"),Xd=s("git config core.autocrlf input"),kl=i(),xo=a("p"),ec=s("One way one can run the make command on Window is to pass by MSYS2:"),El=i(),q=a("ol"),Ao=a("li"),oo=a("a"),tc=s("Download MSYS2"),oc=s(", we assume to have it installed in C:\\msys64"),sc=i(),na=a("li"),rc=s("Open the command line C:\\msys64\\msys2.exe (it should be available from the start menu)"),ac=i(),De=a("li"),lc=s("Run in the shell: "),ia=a("code"),nc=s("pacman -Syu"),ic=s(" and install make with "),ha=a("code"),hc=s("pacman -S make"),uc=i(),so=a("li"),fc=s("Add "),ua=a("code"),dc=s("C:\\msys64\\usr\\bin"),cc=s(" to your PATH environment variable."),Pl=i(),Ne=a("p"),pc=s("You can now use "),fa=a("code"),mc=s("make"),yc=s(" from any terminal (Powershell, cmd.exe, etc) \u{1F389}"),Il=i(),we=a("h3"),qe=a("a"),da=a("span"),m(ro.$$.fragment),gc=i(),ca=a("span"),vc=s("Syncing forked master with upstream (HuggingFace) master"),Tl=i(),Lo=a("p"),wc=s(`To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs, when syncing the master branch of a forked repository, please, follow these steps:`),Sl=i(),Re=a("ol"),pa=a("li"),bc=s("When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked master."),$c=i(),ma=a("li"),_c=s("If a PR is absolutely necessary, use the following steps after checking out your branch:"),Ol=i(),m(ao.$$.fragment),this.h()},l(t){const u=Yy('[data-svelte="svelte-1phssyn"]',document.head);H=l(u,"META",{name:!0,content:!0}),u.forEach(o),lo=h(t),x=l(t,"H1",{class:!0});var xl=n(x);W=l(xl,"A",{id:!0,class:!0,href:!0});var Sc=n(W);Xo=l(Sc,"SPAN",{});var Oc=n(Xo);y(Ye.$$.fragment,Oc),Oc.forEach(o),Sc.forEach(o),dn=h(xl),es=l(xl,"SPAN",{});var Cc=n(es);cn=r(Cc,"How to contribute to transformers?"),Cc.forEach(o),xl.forEach(o),va=h(t),no=l(t,"P",{});var xc=n(no);pn=r(xc,`Everyone is welcome to contribute, and we value everybody\u2019s contribution. Code is thus not the only way to help the community. Answering questions, helping others, reaching out and improving the documentations are immensely valuable to the community.`),xc.forEach(o),wa=h(t),io=l(t,"P",{});var Ac=n(io);mn=r(Ac,`It also helps us if you spread the word: reference the library from blog posts on the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply star the repo to say \u201Cthank you\u201D.`),Ac.forEach(o),ba=h(t),be=l(t,"P",{});var Al=n(be);yn=r(Al,`Whichever way you choose to contribute, please be mindful to respect our `),Be=l(Al,"A",{href:!0,rel:!0});var Lc=n(Be);gn=r(Lc,"code of conduct"),Lc.forEach(o),vn=r(Al,"."),Al.forEach(o),$a=h(t),te=l(t,"H2",{class:!0});var Ll=n(te);$e=l(Ll,"A",{id:!0,class:!0,href:!0});var Dc=n($e);ts=l(Dc,"SPAN",{});var Nc=n(ts);y(ze.$$.fragment,Nc),Nc.forEach(o),Dc.forEach(o),wn=h(Ll),os=l(Ll,"SPAN",{});var qc=n(os);bn=r(qc,"You can contribute in so many ways!"),qc.forEach(o),Ll.forEach(o),_a=h(t),ho=l(t,"P",{});var Rc=n(ho);$n=r(Rc,"There are 4 ways you can contribute to transformers:"),Rc.forEach(o),ka=h(t),A=l(t,"UL",{});var We=n(A);ss=l(We,"LI",{});var Wc=n(ss);_n=r(Wc,"Fixing outstanding issues with the existing code;"),Wc.forEach(o),kn=h(We),rs=l(We,"LI",{});var Fc=n(rs);En=r(Fc,"Implementing new models;"),Fc.forEach(o),Pn=h(We),as=l(We,"LI",{});var Hc=n(as);In=r(Hc,"Contributing to the examples or to the documentation;"),Hc.forEach(o),Tn=h(We),ls=l(We,"LI",{});var jc=n(ls);Sn=r(jc,"Submitting issues related to bugs or desired new features."),jc.forEach(o),We.forEach(o),Ea=h(t),L=l(t,"P",{});var Fe=n(L);On=r(Fe,"In particular there is a special "),Je=l(Fe,"A",{href:!0,rel:!0});var Gc=n(Je);Cn=r(Gc,`Good First Issue`),Gc.forEach(o),xn=r(Fe,` listing. It will give you a list of open Issues that are open to anybody to work on. Just comment in the issue that you\u2019d like to work on it. In that same listing you will also find some Issues with `),ns=l(Fe,"CODE",{});var Mc=n(ns);An=r(Mc,"Good Second Issue"),Mc.forEach(o),Ln=r(Fe,` label. These are typically slightly more complicated than the Issues with just `),is=l(Fe,"CODE",{});var Uc=n(is);Dn=r(Uc,"Good First Issue"),Uc.forEach(o),Nn=r(Fe,` label. But if you feel you know what you\u2019re doing, go for it.`),Fe.forEach(o),Pa=h(t),uo=l(t,"P",{});var Yc=n(uo);hs=l(Yc,"EM",{});var Bc=n(hs);qn=r(Bc,"All are equally valuable to the community."),Bc.forEach(o),Yc.forEach(o),Ia=h(t),oe=l(t,"H2",{class:!0});var Dl=n(oe);_e=l(Dl,"A",{id:!0,class:!0,href:!0});var zc=n(_e);us=l(zc,"SPAN",{});var Jc=n(us);y(Ke.$$.fragment,Jc),Jc.forEach(o),zc.forEach(o),Rn=h(Dl),fs=l(Dl,"SPAN",{});var Kc=n(fs);Wn=r(Kc,"Submitting a new issue or feature request"),Kc.forEach(o),Dl.forEach(o),Ta=h(t),fo=l(t,"P",{});var Qc=n(fo);Fn=r(Qc,`Do your best to follow these guidelines when submitting an issue or a feature request. It will make it easier for us to come back to you quickly and with good feedback.`),Qc.forEach(o),Sa=h(t),se=l(t,"H3",{class:!0});var Nl=n(se);ke=l(Nl,"A",{id:!0,class:!0,href:!0});var Zc=n(ke);ds=l(Zc,"SPAN",{});var Vc=n(ds);y(Qe.$$.fragment,Vc),Vc.forEach(o),Zc.forEach(o),Hn=h(Nl),cs=l(Nl,"SPAN",{});var Xc=n(cs);jn=r(Xc,"Did you find a bug?"),Xc.forEach(o),Nl.forEach(o),Oa=h(t),co=l(t,"P",{});var ep=n(co);Gn=r(ep,`The \u{1F917} Transformers library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue.`),ep.forEach(o),Ca=h(t),Ee=l(t,"P",{});var ql=n(Ee);Mn=r(ql,"First, we would really appreciate it if you could "),ps=l(ql,"STRONG",{});var tp=n(ps);Un=r(tp,`make sure the bug was not already reported`),tp.forEach(o),Yn=r(ql," (use the search bar on Github under Issues)."),ql.forEach(o),xa=h(t),po=l(t,"P",{});var op=n(po);Bn=r(op,"Did not find it? :( So we can act quickly on it, please follow these steps:"),op.forEach(o),Aa=h(t),j=l(t,"UL",{});var Do=n(j);S=l(Do,"LI",{});var X=n(S);zn=r(X,"Include your "),ms=l(X,"STRONG",{});var sp=n(ms);Jn=r(sp,"OS type and version"),sp.forEach(o),Kn=r(X,", the versions of "),ys=l(X,"STRONG",{});var rp=n(ys);Qn=r(rp,"Python"),rp.forEach(o),Zn=r(X,", "),gs=l(X,"STRONG",{});var ap=n(gs);Vn=r(ap,"PyTorch"),ap.forEach(o),Xn=r(X,` and `),vs=l(X,"STRONG",{});var lp=n(vs);ei=r(lp,"Tensorflow"),lp.forEach(o),ti=r(X," when applicable;"),X.forEach(o),oi=h(Do),ws=l(Do,"LI",{});var np=n(ws);si=r(np,`A short, self-contained, code snippet that allows us to reproduce the bug in less than 30s;`),np.forEach(o),ri=h(Do),Ze=l(Do,"LI",{});var Rl=n(Ze);ai=r(Rl,"Provide the "),bs=l(Rl,"EM",{});var ip=n(bs);li=r(ip,"full"),ip.forEach(o),ni=r(Rl," traceback if an exception is raised."),Rl.forEach(o),Do.forEach(o),La=h(t),mo=l(t,"P",{});var hp=n(mo);ii=r(hp,"To get the OS and software versions automatically, you can run the following command:"),hp.forEach(o),Da=h(t),y(Ve.$$.fragment,t),Na=h(t),yo=l(t,"P",{});var up=n(yo);hi=r(up,"or from the root of the repository the following command:"),up.forEach(o),qa=h(t),y(Xe.$$.fragment,t),Ra=h(t),re=l(t,"H3",{class:!0});var Wl=n(re);Pe=l(Wl,"A",{id:!0,class:!0,href:!0});var fp=n(Pe);$s=l(fp,"SPAN",{});var dp=n($s);y(et.$$.fragment,dp),dp.forEach(o),fp.forEach(o),ui=h(Wl),_s=l(Wl,"SPAN",{});var cp=n(_s);fi=r(cp,"Do you want to implement a new model?"),cp.forEach(o),Wl.forEach(o),Wa=h(t),go=l(t,"P",{});var pp=n(go);di=r(pp,"Awesome! Please provide the following information:"),pp.forEach(o),Fa=h(t),G=l(t,"UL",{});var No=n(G);ks=l(No,"LI",{});var mp=n(ks);ci=r(mp,"Short description of the model and link to the paper;"),mp.forEach(o),pi=h(No),Es=l(No,"LI",{});var yp=n(Es);mi=r(yp,"Link to the implementation if it is open-source;"),yp.forEach(o),yi=h(No),Ps=l(No,"LI",{});var gp=n(Ps);gi=r(gp,"Link to the model weights if they are available."),gp.forEach(o),No.forEach(o),Ha=h(t),vo=l(t,"P",{});var vp=n(vo);vi=r(vp,`If you are willing to contribute the model yourself, let us know so we can best guide you.`),vp.forEach(o),ja=h(t),M=l(t,"P",{});var qo=n(M);wi=r(qo,"We have added a "),Is=l(qo,"STRONG",{});var wp=n(Is);bi=r(wp,"detailed guide and templates"),wp.forEach(o),$i=r(qo,` to guide you in the process of adding a new model. You can find them in the `),tt=l(qo,"A",{href:!0,rel:!0});var bp=n(tt);Ts=l(bp,"CODE",{});var $p=n(Ts);_i=r($p,"templates"),$p.forEach(o),bp.forEach(o),ki=r(qo," folder."),qo.forEach(o),Ga=h(t),ae=l(t,"H3",{class:!0});var Fl=n(ae);Ie=l(Fl,"A",{id:!0,class:!0,href:!0});var _p=n(Ie);Ss=l(_p,"SPAN",{});var kp=n(Ss);y(ot.$$.fragment,kp),kp.forEach(o),_p.forEach(o),Ei=h(Fl),Os=l(Fl,"SPAN",{});var Ep=n(Os);Pi=r(Ep,"Do you want a new feature (that is not a model)?"),Ep.forEach(o),Fl.forEach(o),Ma=h(t),wo=l(t,"P",{});var Pp=n(wo);Ii=r(Pp,"A world-class feature request addresses the following points:"),Pp.forEach(o),Ua=h(t),bo=l(t,"OL",{});var Ip=n(bo);Cs=l(Ip,"LI",{});var Tp=n(Cs);Ti=r(Tp,"Motivation first:"),Tp.forEach(o),Ip.forEach(o),Ya=h(t),U=l(t,"UL",{});var Ro=n(U);xs=l(Ro,"LI",{});var Sp=n(xs);Si=r(Sp,`Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best.`),Sp.forEach(o),Oi=h(Ro),As=l(Ro,"LI",{});var Op=n(As);Ci=r(Op,`Is it related to something you would need for a project? We\u2019d love to hear about it!`),Op.forEach(o),xi=h(Ro),Ls=l(Ro,"LI",{});var Cp=n(Ls);Ai=r(Cp,`Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you.`),Cp.forEach(o),Ro.forEach(o),Ba=h(t),O=l(t,"OL",{start:!0});var He=n(O);st=l(He,"LI",{});var Hl=n(st);Li=r(Hl,"Write a "),Ds=l(Hl,"EM",{});var xp=n(Ds);Di=r(xp,"full paragraph"),xp.forEach(o),Ni=r(Hl," describing the feature;"),Hl.forEach(o),qi=h(He),rt=l(He,"LI",{});var jl=n(rt);Ri=r(jl,"Provide a "),Ns=l(jl,"STRONG",{});var Ap=n(Ns);Wi=r(Ap,"code snippet"),Ap.forEach(o),Fi=r(jl," that demonstrates its future use;"),jl.forEach(o),Hi=h(He),qs=l(He,"LI",{});var Lp=n(qs);ji=r(Lp,"In case this is related to a paper, please attach a link;"),Lp.forEach(o),Gi=h(He),Rs=l(He,"LI",{});var Dp=n(Rs);Mi=r(Dp,"Attach any additional information (drawings, screenshots, etc.) you think may help."),Dp.forEach(o),He.forEach(o),za=h(t),$o=l(t,"P",{});var Np=n($o);Ui=r(Np,`If your issue is well written we\u2019re already 80% of the way there by the time you post it.`),Np.forEach(o),Ja=h(t),Y=l(t,"P",{});var Wo=n(Y);Yi=r(Wo,"We have added "),Ws=l(Wo,"STRONG",{});var qp=n(Ws);Bi=r(qp,"templates"),qp.forEach(o),zi=r(Wo,` to guide you in the process of adding a new example script for training or testing the models in the library. You can find them in the `),at=l(Wo,"A",{href:!0,rel:!0});var Rp=n(at);Fs=l(Rp,"CODE",{});var Wp=n(Fs);Ji=r(Wp,"templates"),Wp.forEach(o),Rp.forEach(o),Ki=r(Wo,` folder.`),Wo.forEach(o),Ka=h(t),le=l(t,"H2",{class:!0});var Gl=n(le);Te=l(Gl,"A",{id:!0,class:!0,href:!0});var Fp=n(Te);Hs=l(Fp,"SPAN",{});var Hp=n(Hs);y(lt.$$.fragment,Hp),Hp.forEach(o),Fp.forEach(o),Qi=h(Gl),js=l(Gl,"SPAN",{});var jp=n(js);Zi=r(jp,"Start contributing! (Pull Requests)"),jp.forEach(o),Gl.forEach(o),Qa=h(t),_o=l(t,"P",{});var Gp=n(_o);Vi=r(Gp,`Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback.`),Gp.forEach(o),Za=h(t),P=l(t,"P",{});var ee=n(P);Xi=r(ee,"You will need basic "),Gs=l(ee,"CODE",{});var Mp=n(Gs);eh=r(Mp,"git"),Mp.forEach(o),th=r(ee,` proficiency to be able to contribute to \u{1F917} Transformers. `),Ms=l(ee,"CODE",{});var Up=n(Ms);oh=r(Up,"git"),Up.forEach(o),sh=r(ee,` is not the easiest tool to use but it has the greatest manual. Type `),Us=l(ee,"CODE",{});var Yp=n(Us);rh=r(Yp,"git --help"),Yp.forEach(o),ah=r(ee," in a shell and enjoy. If you prefer books, "),nt=l(ee,"A",{href:!0,rel:!0});var Bp=n(nt);lh=r(Bp,`Pro Git`),Bp.forEach(o),nh=r(ee," is a very good reference."),ee.forEach(o),Va=h(t),ko=l(t,"P",{});var zp=n(ko);ih=r(zp,"Follow these steps to start contributing:"),zp.forEach(o),Xa=h(t),_=l(t,"OL",{});var I=n(_);Ys=l(I,"LI",{});var Jp=n(Ys);it=l(Jp,"P",{});var Ml=n(it);hh=r(Ml,"Fork the "),ht=l(Ml,"A",{href:!0,rel:!0});var Kp=n(ht);uh=r(Kp,"repository"),Kp.forEach(o),fh=r(Ml,` by clicking on the \u2018Fork\u2019 button on the repository\u2019s page. This creates a copy of the code under your GitHub user account.`),Ml.forEach(o),Jp.forEach(o),dh=h(I),ut=l(I,"LI",{});var Ul=n(ut);Bs=l(Ul,"P",{});var Qp=n(Bs);ch=r(Qp,"Clone your fork to your local disk, and add the base repository as a remote:"),Qp.forEach(o),ph=h(Ul),y(ft.$$.fragment,Ul),Ul.forEach(o),mh=h(I),ne=l(I,"LI",{});var Fo=n(ne);zs=l(Fo,"P",{});var Zp=n(zs);yh=r(Zp,"Create a new branch to hold your development changes:"),Zp.forEach(o),gh=h(Fo),y(dt.$$.fragment,Fo),vh=h(Fo),Se=l(Fo,"P",{});var ya=n(Se);Js=l(ya,"STRONG",{});var Vp=n(Js);wh=r(Vp,"Do not"),Vp.forEach(o),bh=r(ya," work on the "),Ks=l(ya,"CODE",{});var Xp=n(Ks);$h=r(Xp,"master"),Xp.forEach(o),_h=r(ya," branch."),ya.forEach(o),Fo.forEach(o),kh=h(I),E=l(I,"LI",{});var R=n(E);Qs=l(R,"P",{});var em=n(Qs);Eh=r(em,"Set up a development environment by running the following command in a virtual environment:"),em.forEach(o),Ph=h(R),y(ct.$$.fragment,R),Ih=h(R),ie=l(R,"P",{});var Ho=n(ie);Th=r(Ho,`(If transformers was already installed in the virtual environment, remove it with `),Zs=l(Ho,"CODE",{});var tm=n(Zs);Sh=r(tm,"pip uninstall transformers"),tm.forEach(o),Oh=r(Ho,` before reinstalling it in editable mode with the `),Vs=l(Ho,"CODE",{});var om=n(Vs);Ch=r(om,"-e"),om.forEach(o),xh=r(Ho," flag.)"),Ho.forEach(o),Ah=h(R),pt=l(R,"P",{});var Yl=n(pt);Lh=r(Yl,"To run the full test suite, you might need the additional dependency on "),Xs=l(Yl,"CODE",{});var sm=n(Xs);Dh=r(sm,"datasets"),sm.forEach(o),Nh=r(Yl,` which requires a separate source install:`),Yl.forEach(o),qh=h(R),y(mt.$$.fragment,R),Rh=h(R),he=l(R,"P",{});var jo=n(he);Wh=r(jo,"If you have already cloned that repo, you might need to "),er=l(jo,"CODE",{});var rm=n(er);Fh=r(rm,"git pull"),rm.forEach(o),Hh=r(jo," to get the most recent changes in the "),tr=l(jo,"CODE",{});var am=n(tr);jh=r(am,"datasets"),am.forEach(o),Gh=r(jo,` library.`),jo.forEach(o),R.forEach(o),Mh=h(I),c=l(I,"LI",{});var p=n(c);or=l(p,"P",{});var lm=n(or);Uh=r(lm,"Develop the features on your branch."),lm.forEach(o),Yh=h(p),sr=l(p,"P",{});var nm=n(sr);Bh=r(nm,`As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this:`),nm.forEach(o),zh=h(p),y(yt.$$.fragment,p),Jh=h(p),rr=l(p,"P",{});var im=n(rr);Kh=r(im,`You can also run the full suite with the following command, but it takes a beefy machine to produce a result in a decent amount of time now that Transformers has grown a lot. Here is the command for it:`),im.forEach(o),Qh=h(p),y(gt.$$.fragment,p),Zh=h(p),Eo=l(p,"P",{});var kc=n(Eo);Vh=r(kc,`For more information about tests, check out the `),vt=l(kc,"A",{href:!0,rel:!0});var hm=n(vt);Xh=r(hm,"dedicated documentation"),hm.forEach(o),kc.forEach(o),eu=h(p),ue=l(p,"P",{});var Go=n(ue);tu=r(Go,"\u{1F917} Transformers relies on "),ar=l(Go,"CODE",{});var um=n(ar);ou=r(um,"black"),um.forEach(o),su=r(Go," and "),lr=l(Go,"CODE",{});var fm=n(lr);ru=r(fm,"isort"),fm.forEach(o),au=r(Go,` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can\u2019t be automated in one go with:`),Go.forEach(o),lu=h(p),y(wt.$$.fragment,p),nu=h(p),nr=l(p,"P",{});var dm=n(nr);iu=r(dm,"This target is also optimized to only work with files modified by the PR you\u2019re working on."),dm.forEach(o),hu=h(p),ir=l(p,"P",{});var cm=n(ir);uu=r(cm,`If you prefer to run the checks one after the other, the following command apply the style corrections:`),cm.forEach(o),fu=h(p),y(bt.$$.fragment,p),du=h(p),$t=l(p,"P",{});var Bl=n($t);cu=r(Bl,"\u{1F917} Transformers also uses "),hr=l(Bl,"CODE",{});var pm=n(hr);pu=r(pm,"flake8"),pm.forEach(o),mu=r(Bl,` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however you can also run the same checks with:`),Bl.forEach(o),yu=h(p),y(_t.$$.fragment,p),gu=h(p),ur=l(p,"P",{});var mm=n(ur);vu=r(mm,`Finally we have a lot of scripts that check we didn\u2019t forget to update some files when adding a new model, that you can run with`),mm.forEach(o),wu=h(p),y(kt.$$.fragment,p),bu=h(p),Po=l(p,"P",{});var Ec=n(Po);$u=r(Ec,`To learn more about those checks and how to fix any issue with them, check out the `),Et=l(Ec,"A",{href:!0,rel:!0});var ym=n(Et);_u=r(ym,"documentation"),ym.forEach(o),Ec.forEach(o),ku=h(p),Pt=l(p,"P",{});var zl=n(Pt);Eu=r(zl,"If you\u2019re modifying documents under "),fr=l(zl,"CODE",{});var gm=n(fr);Pu=r(gm,"docs/source"),gm.forEach(o),Iu=r(zl,`, make sure to validate that they can still be built. This check also runs in CI. To run a local check make sure you have installed the documentation builder requirements. First you will need to clone the repository containing our tools to build the documentation:`),zl.forEach(o),Tu=h(p),y(It.$$.fragment,p),Su=h(p),dr=l(p,"P",{});var vm=n(dr);Ou=r(vm,"Then, make sure you have all the dependencies to be able to build the doc with:"),vm.forEach(o),Cu=h(p),y(Tt.$$.fragment,p),xu=h(p),cr=l(p,"P",{});var wm=n(cr);Au=r(wm,"Finally run the following command from the root of the repository:"),wm.forEach(o),Lu=h(p),y(St.$$.fragment,p),Du=h(p),Ot=l(p,"P",{});var Jl=n(Ot);Nu=r(Jl,"This will build the documentation in the "),pr=l(Jl,"CODE",{});var bm=n(pr);qu=r(bm,"~/tmp/test-build"),bm.forEach(o),Ru=r(Jl,` folder where you can inspect the generated Markdown files with your favorite editor. You won\u2019t be able to see the final rendering on the website before your PR is merged, we are actively working on adding a tool for this.`),Jl.forEach(o),Wu=h(p),fe=l(p,"P",{});var Mo=n(fe);Fu=r(Mo,"Once you\u2019re happy with your changes, add changed files using "),mr=l(Mo,"CODE",{});var $m=n(mr);Hu=r($m,"git add"),$m.forEach(o),ju=r(Mo,` and make a commit with `),yr=l(Mo,"CODE",{});var _m=n(yr);Gu=r(_m,"git commit"),_m.forEach(o),Mu=r(Mo," to record your changes locally:"),Mo.forEach(o),Uu=h(p),y(Ct.$$.fragment,p),Yu=h(p),xt=l(p,"P",{});var Kl=n(xt);Bu=r(Kl,"Please write "),At=l(Kl,"A",{href:!0,rel:!0});var km=n(At);zu=r(km,`good commit messages`),km.forEach(o),Ju=r(Kl,"."),Kl.forEach(o),Ku=h(p),gr=l(p,"P",{});var Em=n(gr);Qu=r(Em,`It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes:`),Em.forEach(o),Zu=h(p),y(Lt.$$.fragment,p),Vu=h(p),vr=l(p,"P",{});var Pm=n(vr);Xu=r(Pm,"Push the changes to your account using:"),Pm.forEach(o),ef=h(p),y(Dt.$$.fragment,p),p.forEach(o),tf=h(I),wr=l(I,"LI",{});var Im=n(wr);Nt=l(Im,"P",{});var Ql=n(Nt);of=r(Ql,"Once you are satisfied ("),br=l(Ql,"STRONG",{});var Tm=n(br);sf=r(Tm,"and the checklist below is happy too"),Tm.forEach(o),rf=r(Ql,`), go to the webpage of your fork on GitHub. Click on \u2018Pull request\u2019 to send your changes to the project maintainers for review.`),Ql.forEach(o),Im.forEach(o),af=h(I),$r=l(I,"LI",{});var Sm=n($r);_r=l(Sm,"P",{});var Om=n(_r);lf=r(Om,`It\u2019s ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request.`),Om.forEach(o),Sm.forEach(o),I.forEach(o),el=h(t),de=l(t,"H3",{class:!0});var Zl=n(de);Oe=l(Zl,"A",{id:!0,class:!0,href:!0});var Cm=n(Oe);kr=l(Cm,"SPAN",{});var xm=n(kr);y(qt.$$.fragment,xm),xm.forEach(o),Cm.forEach(o),nf=h(Zl),Er=l(Zl,"SPAN",{});var Am=n(Er);hf=r(Am,"Checklist"),Am.forEach(o),Zl.forEach(o),tl=h(t),k=l(t,"OL",{});var T=n(k);Pr=l(T,"LI",{});var Lm=n(Pr);uf=r(Lm,"The title of your pull request should be a summary of its contribution;"),Lm.forEach(o),ff=h(T),Ir=l(T,"LI",{});var Dm=n(Ir);df=r(Dm,`If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it);`),Dm.forEach(o),cf=h(T),Rt=l(T,"LI",{});var Vl=n(Rt);pf=r(Vl,"To indicate a work in progress please prefix the title with "),Tr=l(Vl,"CODE",{});var Nm=n(Tr);mf=r(Nm,"[WIP]"),Nm.forEach(o),yf=r(Vl,`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged;`),Vl.forEach(o),gf=h(T),Sr=l(T,"LI",{});var qm=n(Sr);vf=r(qm,"Make sure existing tests pass;"),qm.forEach(o),wf=h(T),Io=l(T,"LI",{});var Pc=n(Io);bf=r(Pc,"Add high-coverage tests. No quality testing = no merge."),ce=l(Pc,"UL",{});var Uo=n(ce);Wt=l(Uo,"LI",{});var Xl=n(Wt);$f=r(Xl,`If you are adding a new model, make sure that you use `),Or=l(Xl,"CODE",{});var Rm=n(Or);_f=r(Rm,"ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)"),Rm.forEach(o),kf=r(Xl,", which triggers the common tests."),Xl.forEach(o),Ef=h(Uo),pe=l(Uo,"LI",{});var Yo=n(pe);Pf=r(Yo,"If you are adding new "),Cr=l(Yo,"CODE",{});var Wm=n(Cr);If=r(Wm,"@slow"),Wm.forEach(o),Tf=r(Yo,` tests, make sure they pass using `),xr=l(Yo,"CODE",{});var Fm=n(xr);Sf=r(Fm,"RUN_SLOW=1 python -m pytest tests/test_my_new_model.py"),Fm.forEach(o),Of=r(Yo,"."),Yo.forEach(o),Cf=h(Uo),Ft=l(Uo,"LI",{});var en=n(Ft);xf=r(en,`If you are adding a new tokenizer, write tests, and make sure `),Ar=l(en,"CODE",{});var Hm=n(Ar);Af=r(Hm,"RUN_SLOW=1 python -m pytest tests/test_tokenization_{your_model_name}.py"),Hm.forEach(o),Lf=r(en,` passes. CircleCI does not run the slow tests, but github actions does every night!`),en.forEach(o),Uo.forEach(o),Pc.forEach(o),Df=h(T),Ht=l(T,"LI",{});var tn=n(Ht);Nf=r(tn,"All public methods must have informative docstrings that work nicely with sphinx. See "),Lr=l(tn,"CODE",{});var jm=n(Lr);qf=r(jm,"modeling_bert.py"),jm.forEach(o),Rf=r(tn,` for an example.`),tn.forEach(o),Wf=h(T),F=l(T,"LI",{});var je=n(F);Ff=r(je,"Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted "),Dr=l(je,"CODE",{});var Gm=n(Dr);Hf=r(Gm,"dataset"),Gm.forEach(o),jf=r(je,` like the ones hosted on `),jt=l(je,"A",{href:!0,rel:!0});var Mm=n(jt);Nr=l(Mm,"CODE",{});var Um=n(Nr);Gf=r(Um,"hf-internal-testing"),Um.forEach(o),Mm.forEach(o),Mf=r(je,` in which to place these files and reference them by URL. We recommend putting them in the following dataset: `),Gt=l(je,"A",{href:!0,rel:!0});var Ym=n(Gt);Uf=r(Ym,"huggingface/documentation-images"),Ym.forEach(o),Yf=r(je,`. If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset.`),je.forEach(o),T.forEach(o),ol=h(t),Mt=l(t,"P",{});var Ic=n(Mt);Bf=r(Ic,"See more about the checks run on a pull request in our "),To=l(Ic,"A",{href:!0});var Bm=n(To);zf=r(Bm,"PR guide"),Bm.forEach(o),Ic.forEach(o),sl=h(t),me=l(t,"H3",{class:!0});var on=n(me);Ce=l(on,"A",{id:!0,class:!0,href:!0});var zm=n(Ce);qr=l(zm,"SPAN",{});var Jm=n(qr);y(Ut.$$.fragment,Jm),Jm.forEach(o),zm.forEach(o),Jf=h(on),Rr=l(on,"SPAN",{});var Km=n(Rr);Kf=r(Km,"Tests"),Km.forEach(o),on.forEach(o),rl=h(t),B=l(t,"P",{});var Bo=n(B);Qf=r(Bo,`An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the `),Yt=l(Bo,"A",{href:!0,rel:!0});var Qm=n(Yt);Zf=r(Qm,"tests folder"),Qm.forEach(o),Vf=r(Bo,` and examples tests in the `),Bt=l(Bo,"A",{href:!0,rel:!0});var Zm=n(Bt);Xf=r(Zm,"examples folder"),Zm.forEach(o),ed=r(Bo,"."),Bo.forEach(o),al=h(t),D=l(t,"P",{});var Ge=n(D);td=r(Ge,"We like "),Wr=l(Ge,"CODE",{});var Vm=n(Wr);od=r(Vm,"pytest"),Vm.forEach(o),sd=r(Ge," and "),Fr=l(Ge,"CODE",{});var Xm=n(Fr);rd=r(Xm,"pytest-xdist"),Xm.forEach(o),ad=r(Ge,` because it\u2019s faster. From the root of the repository, here\u2019s how to run tests with `),Hr=l(Ge,"CODE",{});var ey=n(Hr);ld=r(ey,"pytest"),ey.forEach(o),nd=r(Ge," for the library:"),Ge.forEach(o),ll=h(t),y(zt.$$.fragment,t),nl=h(t),So=l(t,"P",{});var ty=n(So);id=r(ty,"and for the examples:"),ty.forEach(o),il=h(t),y(Jt.$$.fragment,t),hl=h(t),N=l(t,"P",{});var Me=n(N);hd=r(Me,"In fact, that\u2019s how "),jr=l(Me,"CODE",{});var oy=n(jr);ud=r(oy,"make test"),oy.forEach(o),fd=r(Me," and "),Gr=l(Me,"CODE",{});var sy=n(Gr);dd=r(sy,"make test-examples"),sy.forEach(o),cd=r(Me," are implemented (sans the "),Mr=l(Me,"CODE",{});var ry=n(Mr);pd=r(ry,"pip install"),ry.forEach(o),md=r(Me," line)!"),Me.forEach(o),ul=h(t),Oo=l(t,"P",{});var ay=n(Oo);yd=r(ay,`You can specify a smaller set of tests in order to test only the feature you\u2019re working on.`),ay.forEach(o),fl=h(t),z=l(t,"P",{});var zo=n(z);gd=r(zo,"By default, slow tests are skipped. Set the "),Ur=l(zo,"CODE",{});var ly=n(Ur);vd=r(ly,"RUN_SLOW"),ly.forEach(o),wd=r(zo,` environment variable to `),Yr=l(zo,"CODE",{});var ny=n(Yr);bd=r(ny,"yes"),ny.forEach(o),$d=r(zo,` to run them. This will download many gigabytes of models \u2014 make sure you have enough disk space and a good Internet connection, or a lot of patience!`),zo.forEach(o),dl=h(t),y(Kt.$$.fragment,t),cl=h(t),J=l(t,"P",{});var Jo=n(J);_d=r(Jo,"Likewise, set the "),Br=l(Jo,"CODE",{});var iy=n(Br);kd=r(iy,"RUN_CUSTOM_TOKENIZERS"),iy.forEach(o),Ed=r(Jo," environment variable to "),zr=l(Jo,"CODE",{});var hy=n(zr);Pd=r(hy,"yes"),hy.forEach(o),Id=r(Jo,` to run tests for custom tokenizers, which don\u2019t run by default either.`),Jo.forEach(o),pl=h(t),K=l(t,"P",{});var Ko=n(K);Td=r(Ko,"\u{1F917} Transformers uses "),Jr=l(Ko,"CODE",{});var uy=n(Jr);Sd=r(uy,"pytest"),uy.forEach(o),Od=r(Ko,` as a test runner only. It doesn\u2019t use any `),Kr=l(Ko,"CODE",{});var fy=n(Kr);Cd=r(fy,"pytest"),fy.forEach(o),xd=r(Ko,"-specific features in the test suite itself."),Ko.forEach(o),ml=h(t),Q=l(t,"P",{});var Qo=n(Q);Ad=r(Qo,"This means "),Qr=l(Qo,"CODE",{});var dy=n(Qr);Ld=r(dy,"unittest"),dy.forEach(o),Dd=r(Qo,` is fully supported. Here\u2019s how to run tests with `),Zr=l(Qo,"CODE",{});var cy=n(Zr);Nd=r(cy,"unittest"),cy.forEach(o),qd=r(Qo,":"),Qo.forEach(o),yl=h(t),y(Qt.$$.fragment,t),gl=h(t),ye=l(t,"H3",{class:!0});var sn=n(ye);xe=l(sn,"A",{id:!0,class:!0,href:!0});var py=n(xe);Vr=l(py,"SPAN",{});var my=n(Vr);y(Zt.$$.fragment,my),my.forEach(o),py.forEach(o),Rd=h(sn),Xr=l(sn,"SPAN",{});var yy=n(Xr);Wd=r(yy,"Style guide"),yy.forEach(o),sn.forEach(o),vl=h(t),Z=l(t,"P",{});var Zo=n(Z);Fd=r(Zo,"For documentation strings, \u{1F917} Transformers follows the "),Vt=l(Zo,"A",{href:!0,rel:!0});var gy=n(Vt);Hd=r(gy,"google style"),gy.forEach(o),jd=r(Zo,`. Check our `),Xt=l(Zo,"A",{href:!0,rel:!0});var vy=n(Xt);Gd=r(vy,"documentation writing guide"),vy.forEach(o),Md=r(Zo,` for more information.`),Zo.forEach(o),wl=h(t),ge=l(t,"H4",{class:!0});var rn=n(ge);Ae=l(rn,"A",{id:!0,class:!0,href:!0});var wy=n(Ae);ea=l(wy,"SPAN",{});var by=n(ea);y(eo.$$.fragment,by),by.forEach(o),wy.forEach(o),Ud=h(rn),ta=l(rn,"SPAN",{});var $y=n(ta);Yd=r($y,"This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md)"),$y.forEach(o),rn.forEach(o),bl=h(t),ve=l(t,"H3",{class:!0});var an=n(ve);Le=l(an,"A",{id:!0,class:!0,href:!0});var _y=n(Le);oa=l(_y,"SPAN",{});var ky=n(oa);y(to.$$.fragment,ky),ky.forEach(o),_y.forEach(o),Bd=h(an),sa=l(an,"SPAN",{});var Ey=n(sa);zd=r(Ey,"Develop on Windows"),Ey.forEach(o),an.forEach(o),$l=h(t),V=l(t,"P",{});var Vo=n(V);Jd=r(Vo,"On windows, you need to configure git to transform Windows "),ra=l(Vo,"CODE",{});var Py=n(ra);Kd=r(Py,"CRLF"),Py.forEach(o),Qd=r(Vo," line endings to Linux "),aa=l(Vo,"CODE",{});var Iy=n(aa);Zd=r(Iy,"LF"),Iy.forEach(o),Vd=r(Vo," line endings:"),Vo.forEach(o),_l=h(t),Co=l(t,"P",{});var Ty=n(Co);la=l(Ty,"CODE",{});var Sy=n(la);Xd=r(Sy,"git config core.autocrlf input"),Sy.forEach(o),Ty.forEach(o),kl=h(t),xo=l(t,"P",{});var Oy=n(xo);ec=r(Oy,"One way one can run the make command on Window is to pass by MSYS2:"),Oy.forEach(o),El=h(t),q=l(t,"OL",{});var Ue=n(q);Ao=l(Ue,"LI",{});var Tc=n(Ao);oo=l(Tc,"A",{href:!0,rel:!0});var Cy=n(oo);tc=r(Cy,"Download MSYS2"),Cy.forEach(o),oc=r(Tc,", we assume to have it installed in C:\\msys64"),Tc.forEach(o),sc=h(Ue),na=l(Ue,"LI",{});var xy=n(na);rc=r(xy,"Open the command line C:\\msys64\\msys2.exe (it should be available from the start menu)"),xy.forEach(o),ac=h(Ue),De=l(Ue,"LI",{});var ga=n(De);lc=r(ga,"Run in the shell: "),ia=l(ga,"CODE",{});var Ay=n(ia);nc=r(Ay,"pacman -Syu"),Ay.forEach(o),ic=r(ga," and install make with "),ha=l(ga,"CODE",{});var Ly=n(ha);hc=r(Ly,"pacman -S make"),Ly.forEach(o),ga.forEach(o),uc=h(Ue),so=l(Ue,"LI",{});var ln=n(so);fc=r(ln,"Add "),ua=l(ln,"CODE",{});var Dy=n(ua);dc=r(Dy,"C:\\msys64\\usr\\bin"),Dy.forEach(o),cc=r(ln," to your PATH environment variable."),ln.forEach(o),Ue.forEach(o),Pl=h(t),Ne=l(t,"P",{});var nn=n(Ne);pc=r(nn,"You can now use "),fa=l(nn,"CODE",{});var Ny=n(fa);mc=r(Ny,"make"),Ny.forEach(o),yc=r(nn," from any terminal (Powershell, cmd.exe, etc) \u{1F389}"),nn.forEach(o),Il=h(t),we=l(t,"H3",{class:!0});var hn=n(we);qe=l(hn,"A",{id:!0,class:!0,href:!0});var qy=n(qe);da=l(qy,"SPAN",{});var Ry=n(da);y(ro.$$.fragment,Ry),Ry.forEach(o),qy.forEach(o),gc=h(hn),ca=l(hn,"SPAN",{});var Wy=n(ca);vc=r(Wy,"Syncing forked master with upstream (HuggingFace) master"),Wy.forEach(o),hn.forEach(o),Tl=h(t),Lo=l(t,"P",{});var Fy=n(Lo);wc=r(Fy,`To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs, when syncing the master branch of a forked repository, please, follow these steps:`),Fy.forEach(o),Sl=h(t),Re=l(t,"OL",{});var un=n(Re);pa=l(un,"LI",{});var Hy=n(pa);bc=r(Hy,"When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked master."),Hy.forEach(o),$c=h(un),ma=l(un,"LI",{});var jy=n(ma);_c=r(jy,"If a PR is absolutely necessary, use the following steps after checking out your branch:"),jy.forEach(o),un.forEach(o),Ol=h(t),y(ao.$$.fragment,t),this.h()},h(){d(H,"name","hf:doc:metadata"),d(H,"content",JSON.stringify(Jy)),d(W,"id","how-to-contribute-to-transformers"),d(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(W,"href","#how-to-contribute-to-transformers"),d(x,"class","relative group"),d(Be,"href","https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md"),d(Be,"rel","nofollow"),d($e,"id","you-can-contribute-in-so-many-ways"),d($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d($e,"href","#you-can-contribute-in-so-many-ways"),d(te,"class","relative group"),d(Je,"href","https://github.com/huggingface/transformers/contribute"),d(Je,"rel","nofollow"),d(_e,"id","submitting-a-new-issue-or-feature-request"),d(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(_e,"href","#submitting-a-new-issue-or-feature-request"),d(oe,"class","relative group"),d(ke,"id","did-you-find-a-bug"),d(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ke,"href","#did-you-find-a-bug"),d(se,"class","relative group"),d(Pe,"id","do-you-want-to-implement-a-new-model"),d(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Pe,"href","#do-you-want-to-implement-a-new-model"),d(re,"class","relative group"),d(tt,"href","https://github.com/huggingface/transformers/tree/master/templates"),d(tt,"rel","nofollow"),d(Ie,"id","do-you-want-a-new-feature-that-is-not-a-model"),d(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ie,"href","#do-you-want-a-new-feature-that-is-not-a-model"),d(ae,"class","relative group"),d(O,"start","2"),d(at,"href","https://github.com/huggingface/transformers/tree/master/templates"),d(at,"rel","nofollow"),d(Te,"id","start-contributing-pull-requests"),d(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Te,"href","#start-contributing-pull-requests"),d(le,"class","relative group"),d(nt,"href","https://git-scm.com/book/en/v2"),d(nt,"rel","nofollow"),d(ht,"href","https://github.com/huggingface/transformers"),d(ht,"rel","nofollow"),d(vt,"href","https://huggingface.co/docs/transformers/testing"),d(vt,"rel","nofollow"),d(Et,"href","https://huggingface.co/docs/transformers/pr_checks"),d(Et,"rel","nofollow"),d(At,"href","https://chris.beams.io/posts/git-commit/"),d(At,"rel","nofollow"),d(Oe,"id","checklist"),d(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Oe,"href","#checklist"),d(de,"class","relative group"),d(jt,"href","https://huggingface.co/hf-internal-testing"),d(jt,"rel","nofollow"),d(Gt,"href","https://huggingface.co/datasets/huggingface/documentation-images"),d(Gt,"rel","nofollow"),d(To,"href","pr_checks"),d(Ce,"id","tests"),d(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ce,"href","#tests"),d(me,"class","relative group"),d(Yt,"href","https://github.com/huggingface/transformers/tree/master/tests"),d(Yt,"rel","nofollow"),d(Bt,"href","https://github.com/huggingface/transformers/tree/master/examples"),d(Bt,"rel","nofollow"),d(xe,"id","style-guide"),d(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(xe,"href","#style-guide"),d(ye,"class","relative group"),d(Vt,"href","https://google.github.io/styleguide/pyguide.html"),d(Vt,"rel","nofollow"),d(Xt,"href","https://github.com/huggingface/transformers/tree/master/docs#writing-documentation---specification"),d(Xt,"rel","nofollow"),d(Ae,"id","this-guide-was-heavily-inspired-by-the-awesome-scikitlearn-guide-to-contributinghttpsgithubcomscikitlearnscikitlearnblobmastercontributingmd"),d(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ae,"href","#this-guide-was-heavily-inspired-by-the-awesome-scikitlearn-guide-to-contributinghttpsgithubcomscikitlearnscikitlearnblobmastercontributingmd"),d(ge,"class","relative group"),d(Le,"id","develop-on-windows"),d(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Le,"href","#develop-on-windows"),d(ve,"class","relative group"),d(oo,"href","https://www.msys2.org/"),d(oo,"rel","nofollow"),d(qe,"id","syncing-forked-master-with-upstream-huggingface-master"),d(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(qe,"href","#syncing-forked-master-with-upstream-huggingface-master"),d(we,"class","relative group")},m(t,u){e(document.head,H),f(t,lo,u),f(t,x,u),e(x,W),e(W,Xo),g(Ye,Xo,null),e(x,dn),e(x,es),e(es,cn),f(t,va,u),f(t,no,u),e(no,pn),f(t,wa,u),f(t,io,u),e(io,mn),f(t,ba,u),f(t,be,u),e(be,yn),e(be,Be),e(Be,gn),e(be,vn),f(t,$a,u),f(t,te,u),e(te,$e),e($e,ts),g(ze,ts,null),e(te,wn),e(te,os),e(os,bn),f(t,_a,u),f(t,ho,u),e(ho,$n),f(t,ka,u),f(t,A,u),e(A,ss),e(ss,_n),e(A,kn),e(A,rs),e(rs,En),e(A,Pn),e(A,as),e(as,In),e(A,Tn),e(A,ls),e(ls,Sn),f(t,Ea,u),f(t,L,u),e(L,On),e(L,Je),e(Je,Cn),e(L,xn),e(L,ns),e(ns,An),e(L,Ln),e(L,is),e(is,Dn),e(L,Nn),f(t,Pa,u),f(t,uo,u),e(uo,hs),e(hs,qn),f(t,Ia,u),f(t,oe,u),e(oe,_e),e(_e,us),g(Ke,us,null),e(oe,Rn),e(oe,fs),e(fs,Wn),f(t,Ta,u),f(t,fo,u),e(fo,Fn),f(t,Sa,u),f(t,se,u),e(se,ke),e(ke,ds),g(Qe,ds,null),e(se,Hn),e(se,cs),e(cs,jn),f(t,Oa,u),f(t,co,u),e(co,Gn),f(t,Ca,u),f(t,Ee,u),e(Ee,Mn),e(Ee,ps),e(ps,Un),e(Ee,Yn),f(t,xa,u),f(t,po,u),e(po,Bn),f(t,Aa,u),f(t,j,u),e(j,S),e(S,zn),e(S,ms),e(ms,Jn),e(S,Kn),e(S,ys),e(ys,Qn),e(S,Zn),e(S,gs),e(gs,Vn),e(S,Xn),e(S,vs),e(vs,ei),e(S,ti),e(j,oi),e(j,ws),e(ws,si),e(j,ri),e(j,Ze),e(Ze,ai),e(Ze,bs),e(bs,li),e(Ze,ni),f(t,La,u),f(t,mo,u),e(mo,ii),f(t,Da,u),g(Ve,t,u),f(t,Na,u),f(t,yo,u),e(yo,hi),f(t,qa,u),g(Xe,t,u),f(t,Ra,u),f(t,re,u),e(re,Pe),e(Pe,$s),g(et,$s,null),e(re,ui),e(re,_s),e(_s,fi),f(t,Wa,u),f(t,go,u),e(go,di),f(t,Fa,u),f(t,G,u),e(G,ks),e(ks,ci),e(G,pi),e(G,Es),e(Es,mi),e(G,yi),e(G,Ps),e(Ps,gi),f(t,Ha,u),f(t,vo,u),e(vo,vi),f(t,ja,u),f(t,M,u),e(M,wi),e(M,Is),e(Is,bi),e(M,$i),e(M,tt),e(tt,Ts),e(Ts,_i),e(M,ki),f(t,Ga,u),f(t,ae,u),e(ae,Ie),e(Ie,Ss),g(ot,Ss,null),e(ae,Ei),e(ae,Os),e(Os,Pi),f(t,Ma,u),f(t,wo,u),e(wo,Ii),f(t,Ua,u),f(t,bo,u),e(bo,Cs),e(Cs,Ti),f(t,Ya,u),f(t,U,u),e(U,xs),e(xs,Si),e(U,Oi),e(U,As),e(As,Ci),e(U,xi),e(U,Ls),e(Ls,Ai),f(t,Ba,u),f(t,O,u),e(O,st),e(st,Li),e(st,Ds),e(Ds,Di),e(st,Ni),e(O,qi),e(O,rt),e(rt,Ri),e(rt,Ns),e(Ns,Wi),e(rt,Fi),e(O,Hi),e(O,qs),e(qs,ji),e(O,Gi),e(O,Rs),e(Rs,Mi),f(t,za,u),f(t,$o,u),e($o,Ui),f(t,Ja,u),f(t,Y,u),e(Y,Yi),e(Y,Ws),e(Ws,Bi),e(Y,zi),e(Y,at),e(at,Fs),e(Fs,Ji),e(Y,Ki),f(t,Ka,u),f(t,le,u),e(le,Te),e(Te,Hs),g(lt,Hs,null),e(le,Qi),e(le,js),e(js,Zi),f(t,Qa,u),f(t,_o,u),e(_o,Vi),f(t,Za,u),f(t,P,u),e(P,Xi),e(P,Gs),e(Gs,eh),e(P,th),e(P,Ms),e(Ms,oh),e(P,sh),e(P,Us),e(Us,rh),e(P,ah),e(P,nt),e(nt,lh),e(P,nh),f(t,Va,u),f(t,ko,u),e(ko,ih),f(t,Xa,u),f(t,_,u),e(_,Ys),e(Ys,it),e(it,hh),e(it,ht),e(ht,uh),e(it,fh),e(_,dh),e(_,ut),e(ut,Bs),e(Bs,ch),e(ut,ph),g(ft,ut,null),e(_,mh),e(_,ne),e(ne,zs),e(zs,yh),e(ne,gh),g(dt,ne,null),e(ne,vh),e(ne,Se),e(Se,Js),e(Js,wh),e(Se,bh),e(Se,Ks),e(Ks,$h),e(Se,_h),e(_,kh),e(_,E),e(E,Qs),e(Qs,Eh),e(E,Ph),g(ct,E,null),e(E,Ih),e(E,ie),e(ie,Th),e(ie,Zs),e(Zs,Sh),e(ie,Oh),e(ie,Vs),e(Vs,Ch),e(ie,xh),e(E,Ah),e(E,pt),e(pt,Lh),e(pt,Xs),e(Xs,Dh),e(pt,Nh),e(E,qh),g(mt,E,null),e(E,Rh),e(E,he),e(he,Wh),e(he,er),e(er,Fh),e(he,Hh),e(he,tr),e(tr,jh),e(he,Gh),e(_,Mh),e(_,c),e(c,or),e(or,Uh),e(c,Yh),e(c,sr),e(sr,Bh),e(c,zh),g(yt,c,null),e(c,Jh),e(c,rr),e(rr,Kh),e(c,Qh),g(gt,c,null),e(c,Zh),e(c,Eo),e(Eo,Vh),e(Eo,vt),e(vt,Xh),e(c,eu),e(c,ue),e(ue,tu),e(ue,ar),e(ar,ou),e(ue,su),e(ue,lr),e(lr,ru),e(ue,au),e(c,lu),g(wt,c,null),e(c,nu),e(c,nr),e(nr,iu),e(c,hu),e(c,ir),e(ir,uu),e(c,fu),g(bt,c,null),e(c,du),e(c,$t),e($t,cu),e($t,hr),e(hr,pu),e($t,mu),e(c,yu),g(_t,c,null),e(c,gu),e(c,ur),e(ur,vu),e(c,wu),g(kt,c,null),e(c,bu),e(c,Po),e(Po,$u),e(Po,Et),e(Et,_u),e(c,ku),e(c,Pt),e(Pt,Eu),e(Pt,fr),e(fr,Pu),e(Pt,Iu),e(c,Tu),g(It,c,null),e(c,Su),e(c,dr),e(dr,Ou),e(c,Cu),g(Tt,c,null),e(c,xu),e(c,cr),e(cr,Au),e(c,Lu),g(St,c,null),e(c,Du),e(c,Ot),e(Ot,Nu),e(Ot,pr),e(pr,qu),e(Ot,Ru),e(c,Wu),e(c,fe),e(fe,Fu),e(fe,mr),e(mr,Hu),e(fe,ju),e(fe,yr),e(yr,Gu),e(fe,Mu),e(c,Uu),g(Ct,c,null),e(c,Yu),e(c,xt),e(xt,Bu),e(xt,At),e(At,zu),e(xt,Ju),e(c,Ku),e(c,gr),e(gr,Qu),e(c,Zu),g(Lt,c,null),e(c,Vu),e(c,vr),e(vr,Xu),e(c,ef),g(Dt,c,null),e(_,tf),e(_,wr),e(wr,Nt),e(Nt,of),e(Nt,br),e(br,sf),e(Nt,rf),e(_,af),e(_,$r),e($r,_r),e(_r,lf),f(t,el,u),f(t,de,u),e(de,Oe),e(Oe,kr),g(qt,kr,null),e(de,nf),e(de,Er),e(Er,hf),f(t,tl,u),f(t,k,u),e(k,Pr),e(Pr,uf),e(k,ff),e(k,Ir),e(Ir,df),e(k,cf),e(k,Rt),e(Rt,pf),e(Rt,Tr),e(Tr,mf),e(Rt,yf),e(k,gf),e(k,Sr),e(Sr,vf),e(k,wf),e(k,Io),e(Io,bf),e(Io,ce),e(ce,Wt),e(Wt,$f),e(Wt,Or),e(Or,_f),e(Wt,kf),e(ce,Ef),e(ce,pe),e(pe,Pf),e(pe,Cr),e(Cr,If),e(pe,Tf),e(pe,xr),e(xr,Sf),e(pe,Of),e(ce,Cf),e(ce,Ft),e(Ft,xf),e(Ft,Ar),e(Ar,Af),e(Ft,Lf),e(k,Df),e(k,Ht),e(Ht,Nf),e(Ht,Lr),e(Lr,qf),e(Ht,Rf),e(k,Wf),e(k,F),e(F,Ff),e(F,Dr),e(Dr,Hf),e(F,jf),e(F,jt),e(jt,Nr),e(Nr,Gf),e(F,Mf),e(F,Gt),e(Gt,Uf),e(F,Yf),f(t,ol,u),f(t,Mt,u),e(Mt,Bf),e(Mt,To),e(To,zf),f(t,sl,u),f(t,me,u),e(me,Ce),e(Ce,qr),g(Ut,qr,null),e(me,Jf),e(me,Rr),e(Rr,Kf),f(t,rl,u),f(t,B,u),e(B,Qf),e(B,Yt),e(Yt,Zf),e(B,Vf),e(B,Bt),e(Bt,Xf),e(B,ed),f(t,al,u),f(t,D,u),e(D,td),e(D,Wr),e(Wr,od),e(D,sd),e(D,Fr),e(Fr,rd),e(D,ad),e(D,Hr),e(Hr,ld),e(D,nd),f(t,ll,u),g(zt,t,u),f(t,nl,u),f(t,So,u),e(So,id),f(t,il,u),g(Jt,t,u),f(t,hl,u),f(t,N,u),e(N,hd),e(N,jr),e(jr,ud),e(N,fd),e(N,Gr),e(Gr,dd),e(N,cd),e(N,Mr),e(Mr,pd),e(N,md),f(t,ul,u),f(t,Oo,u),e(Oo,yd),f(t,fl,u),f(t,z,u),e(z,gd),e(z,Ur),e(Ur,vd),e(z,wd),e(z,Yr),e(Yr,bd),e(z,$d),f(t,dl,u),g(Kt,t,u),f(t,cl,u),f(t,J,u),e(J,_d),e(J,Br),e(Br,kd),e(J,Ed),e(J,zr),e(zr,Pd),e(J,Id),f(t,pl,u),f(t,K,u),e(K,Td),e(K,Jr),e(Jr,Sd),e(K,Od),e(K,Kr),e(Kr,Cd),e(K,xd),f(t,ml,u),f(t,Q,u),e(Q,Ad),e(Q,Qr),e(Qr,Ld),e(Q,Dd),e(Q,Zr),e(Zr,Nd),e(Q,qd),f(t,yl,u),g(Qt,t,u),f(t,gl,u),f(t,ye,u),e(ye,xe),e(xe,Vr),g(Zt,Vr,null),e(ye,Rd),e(ye,Xr),e(Xr,Wd),f(t,vl,u),f(t,Z,u),e(Z,Fd),e(Z,Vt),e(Vt,Hd),e(Z,jd),e(Z,Xt),e(Xt,Gd),e(Z,Md),f(t,wl,u),f(t,ge,u),e(ge,Ae),e(Ae,ea),g(eo,ea,null),e(ge,Ud),e(ge,ta),e(ta,Yd),f(t,bl,u),f(t,ve,u),e(ve,Le),e(Le,oa),g(to,oa,null),e(ve,Bd),e(ve,sa),e(sa,zd),f(t,$l,u),f(t,V,u),e(V,Jd),e(V,ra),e(ra,Kd),e(V,Qd),e(V,aa),e(aa,Zd),e(V,Vd),f(t,_l,u),f(t,Co,u),e(Co,la),e(la,Xd),f(t,kl,u),f(t,xo,u),e(xo,ec),f(t,El,u),f(t,q,u),e(q,Ao),e(Ao,oo),e(oo,tc),e(Ao,oc),e(q,sc),e(q,na),e(na,rc),e(q,ac),e(q,De),e(De,lc),e(De,ia),e(ia,nc),e(De,ic),e(De,ha),e(ha,hc),e(q,uc),e(q,so),e(so,fc),e(so,ua),e(ua,dc),e(so,cc),f(t,Pl,u),f(t,Ne,u),e(Ne,pc),e(Ne,fa),e(fa,mc),e(Ne,yc),f(t,Il,u),f(t,we,u),e(we,qe),e(qe,da),g(ro,da,null),e(we,gc),e(we,ca),e(ca,vc),f(t,Tl,u),f(t,Lo,u),e(Lo,wc),f(t,Sl,u),f(t,Re,u),e(Re,pa),e(pa,bc),e(Re,$c),e(Re,ma),e(ma,_c),f(t,Ol,u),g(ao,t,u),Cl=!0},p:By,i(t){Cl||(v(Ye.$$.fragment,t),v(ze.$$.fragment,t),v(Ke.$$.fragment,t),v(Qe.$$.fragment,t),v(Ve.$$.fragment,t),v(Xe.$$.fragment,t),v(et.$$.fragment,t),v(ot.$$.fragment,t),v(lt.$$.fragment,t),v(ft.$$.fragment,t),v(dt.$$.fragment,t),v(ct.$$.fragment,t),v(mt.$$.fragment,t),v(yt.$$.fragment,t),v(gt.$$.fragment,t),v(wt.$$.fragment,t),v(bt.$$.fragment,t),v(_t.$$.fragment,t),v(kt.$$.fragment,t),v(It.$$.fragment,t),v(Tt.$$.fragment,t),v(St.$$.fragment,t),v(Ct.$$.fragment,t),v(Lt.$$.fragment,t),v(Dt.$$.fragment,t),v(qt.$$.fragment,t),v(Ut.$$.fragment,t),v(zt.$$.fragment,t),v(Jt.$$.fragment,t),v(Kt.$$.fragment,t),v(Qt.$$.fragment,t),v(Zt.$$.fragment,t),v(eo.$$.fragment,t),v(to.$$.fragment,t),v(ro.$$.fragment,t),v(ao.$$.fragment,t),Cl=!0)},o(t){w(Ye.$$.fragment,t),w(ze.$$.fragment,t),w(Ke.$$.fragment,t),w(Qe.$$.fragment,t),w(Ve.$$.fragment,t),w(Xe.$$.fragment,t),w(et.$$.fragment,t),w(ot.$$.fragment,t),w(lt.$$.fragment,t),w(ft.$$.fragment,t),w(dt.$$.fragment,t),w(ct.$$.fragment,t),w(mt.$$.fragment,t),w(yt.$$.fragment,t),w(gt.$$.fragment,t),w(wt.$$.fragment,t),w(bt.$$.fragment,t),w(_t.$$.fragment,t),w(kt.$$.fragment,t),w(It.$$.fragment,t),w(Tt.$$.fragment,t),w(St.$$.fragment,t),w(Ct.$$.fragment,t),w(Lt.$$.fragment,t),w(Dt.$$.fragment,t),w(qt.$$.fragment,t),w(Ut.$$.fragment,t),w(zt.$$.fragment,t),w(Jt.$$.fragment,t),w(Kt.$$.fragment,t),w(Qt.$$.fragment,t),w(Zt.$$.fragment,t),w(eo.$$.fragment,t),w(to.$$.fragment,t),w(ro.$$.fragment,t),w(ao.$$.fragment,t),Cl=!1},d(t){o(H),t&&o(lo),t&&o(x),b(Ye),t&&o(va),t&&o(no),t&&o(wa),t&&o(io),t&&o(ba),t&&o(be),t&&o($a),t&&o(te),b(ze),t&&o(_a),t&&o(ho),t&&o(ka),t&&o(A),t&&o(Ea),t&&o(L),t&&o(Pa),t&&o(uo),t&&o(Ia),t&&o(oe),b(Ke),t&&o(Ta),t&&o(fo),t&&o(Sa),t&&o(se),b(Qe),t&&o(Oa),t&&o(co),t&&o(Ca),t&&o(Ee),t&&o(xa),t&&o(po),t&&o(Aa),t&&o(j),t&&o(La),t&&o(mo),t&&o(Da),b(Ve,t),t&&o(Na),t&&o(yo),t&&o(qa),b(Xe,t),t&&o(Ra),t&&o(re),b(et),t&&o(Wa),t&&o(go),t&&o(Fa),t&&o(G),t&&o(Ha),t&&o(vo),t&&o(ja),t&&o(M),t&&o(Ga),t&&o(ae),b(ot),t&&o(Ma),t&&o(wo),t&&o(Ua),t&&o(bo),t&&o(Ya),t&&o(U),t&&o(Ba),t&&o(O),t&&o(za),t&&o($o),t&&o(Ja),t&&o(Y),t&&o(Ka),t&&o(le),b(lt),t&&o(Qa),t&&o(_o),t&&o(Za),t&&o(P),t&&o(Va),t&&o(ko),t&&o(Xa),t&&o(_),b(ft),b(dt),b(ct),b(mt),b(yt),b(gt),b(wt),b(bt),b(_t),b(kt),b(It),b(Tt),b(St),b(Ct),b(Lt),b(Dt),t&&o(el),t&&o(de),b(qt),t&&o(tl),t&&o(k),t&&o(ol),t&&o(Mt),t&&o(sl),t&&o(me),b(Ut),t&&o(rl),t&&o(B),t&&o(al),t&&o(D),t&&o(ll),b(zt,t),t&&o(nl),t&&o(So),t&&o(il),b(Jt,t),t&&o(hl),t&&o(N),t&&o(ul),t&&o(Oo),t&&o(fl),t&&o(z),t&&o(dl),b(Kt,t),t&&o(cl),t&&o(J),t&&o(pl),t&&o(K),t&&o(ml),t&&o(Q),t&&o(yl),b(Qt,t),t&&o(gl),t&&o(ye),b(Zt),t&&o(vl),t&&o(Z),t&&o(wl),t&&o(ge),b(eo),t&&o(bl),t&&o(ve),b(to),t&&o($l),t&&o(V),t&&o(_l),t&&o(Co),t&&o(kl),t&&o(xo),t&&o(El),t&&o(q),t&&o(Pl),t&&o(Ne),t&&o(Il),t&&o(we),b(ro),t&&o(Tl),t&&o(Lo),t&&o(Sl),t&&o(Re),t&&o(Ol),b(ao,t)}}}const Jy={local:"how-to-contribute-to-transformers",sections:[{local:"you-can-contribute-in-so-many-ways",title:"You can contribute in so many ways!"},{local:"submitting-a-new-issue-or-feature-request",sections:[{local:"did-you-find-a-bug",title:"Did you find a bug?"},{local:"do-you-want-to-implement-a-new-model",title:"Do you want to implement a new model?"},{local:"do-you-want-a-new-feature-that-is-not-a-model",title:"Do you want a new feature (that is not a model)?"}],title:"Submitting a new issue or feature request"},{local:"start-contributing-pull-requests",sections:[{local:"checklist",title:"Checklist"},{local:"tests",title:"Tests"},{local:"style-guide",sections:[{local:"this-guide-was-heavily-inspired-by-the-awesome-scikitlearn-guide-to-contributinghttpsgithubcomscikitlearnscikitlearnblobmastercontributingmd",title:"This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md)"}],title:"Style guide"},{local:"develop-on-windows",title:"Develop on Windows"},{local:"syncing-forked-master-with-upstream-huggingface-master",title:"Syncing forked master with upstream (HuggingFace) master"}],title:"Start contributing! (Pull Requests)"}],title:"How to contribute to transformers?"};function Ky(fn,H,lo){let{fw:x}=H;return fn.$$set=W=>{"fw"in W&&lo(0,x=W.fw)},[x]}class eg extends Gy{constructor(H){super();My(this,H,Ky,zy,Uy,{fw:0})}}export{eg as default,Jy as metadata};
245
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/accelerate.mdx-8837c56a.js
import{S as Ba,i as La,s as Ya,e as o,k as h,w as f,t as c,M as Ja,c as l,d as t,m as d,a as s,x as u,h as p,b as n,F as a,g as i,y as m,L as Ka,q as v,o as _,B as g}from"../chunks/vendor-4833417e.js";import{I as ie}from"../chunks/IconCopyLink-4b81c553.js";import{C as I}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function Qa(wt){let b,ne,w,$,ve,M,$t,_e,bt,Fe,S,yt,U,kt,At,He,A,x,ge,W,Et,we,jt,Oe,ce,Pt,Ge,B,Ie,y,zt,L,$e,Tt,St,be,xt,Ct,Me,Y,Ue,E,C,ye,J,qt,ke,Nt,We,q,Dt,K,Ae,Ft,Ht,Be,Q,Le,j,N,Ee,R,Ot,je,Gt,Ye,k,It,Pe,Mt,Ut,V,ze,Wt,Bt,Je,X,Ke,pe,Lt,Qe,Z,Re,P,D,Te,ee,Yt,Se,Jt,Ve,he,Kt,Xe,z,F,xe,te,Qt,Ce,Rt,Ze,de,Vt,et,ae,tt,fe,Xt,at,re,rt,T,H,qe,oe,Zt,Ne,ea,ot,O,ta,De,aa,ra,lt,le,st,G,oa,se,la,sa,it;return M=new ie({}),W=new ie({}),B=new I({props:{code:"pip install accelerate",highlighted:"pip install accelerate"}}),Y=new I({props:{code:`from accelerate import Accelerator accelerator = Accelerator()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> Accelerator <span class="hljs-meta">&gt;&gt;&gt; </span>accelerator = Accelerator()`}}),J=new ie({}),Q=new I({props:{code:`train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( train_dataloader, eval_dataloader, model, optimizer )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( <span class="hljs-meta">... </span> train_dataloader, eval_dataloader, model, optimizer <span class="hljs-meta">... </span>)`}}),R=new ie({}),X=new I({props:{code:`for epoch in range(num_epochs): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> accelerator.backward(loss) <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)`}}),Z=new I({props:{code:`+ from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`,highlighted:`<span class="hljs-addition">+ from accelerate import Accelerator</span> from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler <span class="hljs-addition">+ accelerator = Accelerator()</span> model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) <span class="hljs-deletion">- device = torch.device(&quot;cuda&quot;) if torch.cuda.is_available() else torch.device(&quot;cpu&quot;)</span> <span class="hljs-deletion">- model.to(device)</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer</span> <span class="hljs-addition">+ )</span> num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( &quot;linear&quot;, optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: <span class="hljs-deletion">- batch = {k: v.to(device) for k, v in batch.items()}</span> outputs = model(**batch) loss = outputs.loss <span class="hljs-deletion">- loss.backward()</span> <span class="hljs-addition">+ accelerator.backward(loss)</span> optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`}}),ee=new ie({}),te=new ie({}),ae=new I({props:{code:"accelerate config",highlighted:"accelerate config"}}),re=new I({props:{code:"accelerate launch train.py",highlighted:"accelerate launch train.py"}}),oe=new ie({}),le=new I({props:{code:`from accelerate import notebook_launcher notebook_launcher(training_function)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> notebook_launcher <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_launcher(training_function)`}}),{c(){b=o("meta"),ne=h(),w=o("h1"),$=o("a"),ve=o("span"),f(M.$$.fragment),$t=h(),_e=o("span"),bt=c("Distributed training with \u{1F917} Accelerate"),Fe=h(),S=o("p"),yt=c("As models get bigger, parallelism has emerged as a strategy for training larger models on limited hardware and accelerating training speed by several orders of magnitude. At Hugging Face, we created the "),U=o("a"),kt=c("\u{1F917} Accelerate"),At=c(" library to help users easily train a \u{1F917} Transformers model on any type of distributed setup, whether it is multiple GPU\u2019s on one machine or multiple GPU\u2019s across several machines. In this tutorial, learn how to customize your native PyTorch training loop to enable training in a distributed environment."),He=h(),A=o("h2"),x=o("a"),ge=o("span"),f(W.$$.fragment),Et=h(),we=o("span"),jt=c("Setup"),Oe=h(),ce=o("p"),Pt=c("Get started by installing \u{1F917} Accelerate:"),Ge=h(),f(B.$$.fragment),Ie=h(),y=o("p"),zt=c("Then import and create an "),L=o("a"),$e=o("code"),Tt=c("Accelerator"),St=c(" object. "),be=o("code"),xt=c("Accelerator"),Ct=c(" will automatically detect your type of distributed setup and initialize all the necessary components for training. You don\u2019t need to explicitly place your model on a device."),Me=h(),f(Y.$$.fragment),Ue=h(),E=o("h2"),C=o("a"),ye=o("span"),f(J.$$.fragment),qt=h(),ke=o("span"),Nt=c("Prepare to accelerate"),We=h(),q=o("p"),Dt=c("The next step is to pass all the relevant training objects to the "),K=o("a"),Ae=o("code"),Ft=c("prepare"),Ht=c(" method. This includes your training and evaluation DataLoaders, a model and an optimizer:"),Be=h(),f(Q.$$.fragment),Le=h(),j=o("h2"),N=o("a"),Ee=o("span"),f(R.$$.fragment),Ot=h(),je=o("span"),Gt=c("Backward"),Ye=h(),k=o("p"),It=c("The last addition is to replace the typical "),Pe=o("code"),Mt=c("loss.backward()"),Ut=c(" in your training loop with \u{1F917} Accelerate\u2019s "),V=o("a"),ze=o("code"),Wt=c("backward"),Bt=c(" method:"),Je=h(),f(X.$$.fragment),Ke=h(),pe=o("p"),Lt=c("As you can see in the following code, you only need to add four additional lines of code to your training loop to enable distributed training!"),Qe=h(),f(Z.$$.fragment),Re=h(),P=o("h2"),D=o("a"),Te=o("span"),f(ee.$$.fragment),Yt=h(),Se=o("span"),Jt=c("Train"),Ve=h(),he=o("p"),Kt=c("Once you\u2019ve added the relevant lines of code, launch your training in a script or a notebook like Colaboratory."),Xe=h(),z=o("h3"),F=o("a"),xe=o("span"),f(te.$$.fragment),Qt=h(),Ce=o("span"),Rt=c("Train with a script"),Ze=h(),de=o("p"),Vt=c("If you are running your training from a script, run the following command to create and save a configuration file:"),et=h(),f(ae.$$.fragment),tt=h(),fe=o("p"),Xt=c("Then launch your training with:"),at=h(),f(re.$$.fragment),rt=h(),T=o("h3"),H=o("a"),qe=o("span"),f(oe.$$.fragment),Zt=h(),Ne=o("span"),ea=c("Train with a notebook"),ot=h(),O=o("p"),ta=c("\u{1F917} Accelerate can also run in a notebook if you\u2019re planning on using Colaboratory\u2019s TPUs. Wrap all the code responsible for training in a function, and pass it to "),De=o("code"),aa=c("notebook_launcher"),ra=c(":"),lt=h(),f(le.$$.fragment),st=h(),G=o("p"),oa=c("For more information about \u{1F917} Accelerate and it\u2019s rich features, refer to the "),se=o("a"),la=c("documentation"),sa=c("."),this.h()},l(e){const r=Ja('[data-svelte="svelte-1phssyn"]',document.head);b=l(r,"META",{name:!0,content:!0}),r.forEach(t),ne=d(e),w=l(e,"H1",{class:!0});var nt=s(w);$=l(nt,"A",{id:!0,class:!0,href:!0});var ia=s($);ve=l(ia,"SPAN",{});var na=s(ve);u(M.$$.fragment,na),na.forEach(t),ia.forEach(t),$t=d(nt),_e=l(nt,"SPAN",{});var ca=s(_e);bt=p(ca,"Distributed training with \u{1F917} Accelerate"),ca.forEach(t),nt.forEach(t),Fe=d(e),S=l(e,"P",{});var ct=s(S);yt=p(ct,"As models get bigger, parallelism has emerged as a strategy for training larger models on limited hardware and accelerating training speed by several orders of magnitude. At Hugging Face, we created the "),U=l(ct,"A",{href:!0,rel:!0});var pa=s(U);kt=p(pa,"\u{1F917} Accelerate"),pa.forEach(t),At=p(ct," library to help users easily train a \u{1F917} Transformers model on any type of distributed setup, whether it is multiple GPU\u2019s on one machine or multiple GPU\u2019s across several machines. In this tutorial, learn how to customize your native PyTorch training loop to enable training in a distributed environment."),ct.forEach(t),He=d(e),A=l(e,"H2",{class:!0});var pt=s(A);x=l(pt,"A",{id:!0,class:!0,href:!0});var ha=s(x);ge=l(ha,"SPAN",{});var da=s(ge);u(W.$$.fragment,da),da.forEach(t),ha.forEach(t),Et=d(pt),we=l(pt,"SPAN",{});var fa=s(we);jt=p(fa,"Setup"),fa.forEach(t),pt.forEach(t),Oe=d(e),ce=l(e,"P",{});var ua=s(ce);Pt=p(ua,"Get started by installing \u{1F917} Accelerate:"),ua.forEach(t),Ge=d(e),u(B.$$.fragment,e),Ie=d(e),y=l(e,"P",{});var ue=s(y);zt=p(ue,"Then import and create an "),L=l(ue,"A",{href:!0,rel:!0});var ma=s(L);$e=l(ma,"CODE",{});var va=s($e);Tt=p(va,"Accelerator"),va.forEach(t),ma.forEach(t),St=p(ue," object. "),be=l(ue,"CODE",{});var _a=s(be);xt=p(_a,"Accelerator"),_a.forEach(t),Ct=p(ue," will automatically detect your type of distributed setup and initialize all the necessary components for training. You don\u2019t need to explicitly place your model on a device."),ue.forEach(t),Me=d(e),u(Y.$$.fragment,e),Ue=d(e),E=l(e,"H2",{class:!0});var ht=s(E);C=l(ht,"A",{id:!0,class:!0,href:!0});var ga=s(C);ye=l(ga,"SPAN",{});var wa=s(ye);u(J.$$.fragment,wa),wa.forEach(t),ga.forEach(t),qt=d(ht),ke=l(ht,"SPAN",{});var $a=s(ke);Nt=p($a,"Prepare to accelerate"),$a.forEach(t),ht.forEach(t),We=d(e),q=l(e,"P",{});var dt=s(q);Dt=p(dt,"The next step is to pass all the relevant training objects to the "),K=l(dt,"A",{href:!0,rel:!0});var ba=s(K);Ae=l(ba,"CODE",{});var ya=s(Ae);Ft=p(ya,"prepare"),ya.forEach(t),ba.forEach(t),Ht=p(dt," method. This includes your training and evaluation DataLoaders, a model and an optimizer:"),dt.forEach(t),Be=d(e),u(Q.$$.fragment,e),Le=d(e),j=l(e,"H2",{class:!0});var ft=s(j);N=l(ft,"A",{id:!0,class:!0,href:!0});var ka=s(N);Ee=l(ka,"SPAN",{});var Aa=s(Ee);u(R.$$.fragment,Aa),Aa.forEach(t),ka.forEach(t),Ot=d(ft),je=l(ft,"SPAN",{});var Ea=s(je);Gt=p(Ea,"Backward"),Ea.forEach(t),ft.forEach(t),Ye=d(e),k=l(e,"P",{});var me=s(k);It=p(me,"The last addition is to replace the typical "),Pe=l(me,"CODE",{});var ja=s(Pe);Mt=p(ja,"loss.backward()"),ja.forEach(t),Ut=p(me," in your training loop with \u{1F917} Accelerate\u2019s "),V=l(me,"A",{href:!0,rel:!0});var Pa=s(V);ze=l(Pa,"CODE",{});var za=s(ze);Wt=p(za,"backward"),za.forEach(t),Pa.forEach(t),Bt=p(me," method:"),me.forEach(t),Je=d(e),u(X.$$.fragment,e),Ke=d(e),pe=l(e,"P",{});var Ta=s(pe);Lt=p(Ta,"As you can see in the following code, you only need to add four additional lines of code to your training loop to enable distributed training!"),Ta.forEach(t),Qe=d(e),u(Z.$$.fragment,e),Re=d(e),P=l(e,"H2",{class:!0});var ut=s(P);D=l(ut,"A",{id:!0,class:!0,href:!0});var Sa=s(D);Te=l(Sa,"SPAN",{});var xa=s(Te);u(ee.$$.fragment,xa),xa.forEach(t),Sa.forEach(t),Yt=d(ut),Se=l(ut,"SPAN",{});var Ca=s(Se);Jt=p(Ca,"Train"),Ca.forEach(t),ut.forEach(t),Ve=d(e),he=l(e,"P",{});var qa=s(he);Kt=p(qa,"Once you\u2019ve added the relevant lines of code, launch your training in a script or a notebook like Colaboratory."),qa.forEach(t),Xe=d(e),z=l(e,"H3",{class:!0});var mt=s(z);F=l(mt,"A",{id:!0,class:!0,href:!0});var Na=s(F);xe=l(Na,"SPAN",{});var Da=s(xe);u(te.$$.fragment,Da),Da.forEach(t),Na.forEach(t),Qt=d(mt),Ce=l(mt,"SPAN",{});var Fa=s(Ce);Rt=p(Fa,"Train with a script"),Fa.forEach(t),mt.forEach(t),Ze=d(e),de=l(e,"P",{});var Ha=s(de);Vt=p(Ha,"If you are running your training from a script, run the following command to create and save a configuration file:"),Ha.forEach(t),et=d(e),u(ae.$$.fragment,e),tt=d(e),fe=l(e,"P",{});var Oa=s(fe);Xt=p(Oa,"Then launch your training with:"),Oa.forEach(t),at=d(e),u(re.$$.fragment,e),rt=d(e),T=l(e,"H3",{class:!0});var vt=s(T);H=l(vt,"A",{id:!0,class:!0,href:!0});var Ga=s(H);qe=l(Ga,"SPAN",{});var Ia=s(qe);u(oe.$$.fragment,Ia),Ia.forEach(t),Ga.forEach(t),Zt=d(vt),Ne=l(vt,"SPAN",{});var Ma=s(Ne);ea=p(Ma,"Train with a notebook"),Ma.forEach(t),vt.forEach(t),ot=d(e),O=l(e,"P",{});var _t=s(O);ta=p(_t,"\u{1F917} Accelerate can also run in a notebook if you\u2019re planning on using Colaboratory\u2019s TPUs. Wrap all the code responsible for training in a function, and pass it to "),De=l(_t,"CODE",{});var Ua=s(De);aa=p(Ua,"notebook_launcher"),Ua.forEach(t),ra=p(_t,":"),_t.forEach(t),lt=d(e),u(le.$$.fragment,e),st=d(e),G=l(e,"P",{});var gt=s(G);oa=p(gt,"For more information about \u{1F917} Accelerate and it\u2019s rich features, refer to the "),se=l(gt,"A",{href:!0,rel:!0});var Wa=s(se);la=p(Wa,"documentation"),Wa.forEach(t),sa=p(gt,"."),gt.forEach(t),this.h()},h(){n(b,"name","hf:doc:metadata"),n(b,"content",JSON.stringify(Ra)),n($,"id","distributed-training-with-accelerate"),n($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n($,"href","#distributed-training-with-accelerate"),n(w,"class","relative group"),n(U,"href","https://huggingface.co/docs/accelerate/index.html"),n(U,"rel","nofollow"),n(x,"id","setup"),n(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(x,"href","#setup"),n(A,"class","relative group"),n(L,"href","https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator"),n(L,"rel","nofollow"),n(C,"id","prepare-to-accelerate"),n(C,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(C,"href","#prepare-to-accelerate"),n(E,"class","relative group"),n(K,"href","https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare"),n(K,"rel","nofollow"),n(N,"id","backward"),n(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(N,"href","#backward"),n(j,"class","relative group"),n(V,"href","https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.backward"),n(V,"rel","nofollow"),n(D,"id","train"),n(D,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(D,"href","#train"),n(P,"class","relative group"),n(F,"id","train-with-a-script"),n(F,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(F,"href","#train-with-a-script"),n(z,"class","relative group"),n(H,"id","train-with-a-notebook"),n(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(H,"href","#train-with-a-notebook"),n(T,"class","relative group"),n(se,"href","https://huggingface.co/docs/accelerate/index.html"),n(se,"rel","nofollow")},m(e,r){a(document.head,b),i(e,ne,r),i(e,w,r),a(w,$),a($,ve),m(M,ve,null),a(w,$t),a(w,_e),a(_e,bt),i(e,Fe,r),i(e,S,r),a(S,yt),a(S,U),a(U,kt),a(S,At),i(e,He,r),i(e,A,r),a(A,x),a(x,ge),m(W,ge,null),a(A,Et),a(A,we),a(we,jt),i(e,Oe,r),i(e,ce,r),a(ce,Pt),i(e,Ge,r),m(B,e,r),i(e,Ie,r),i(e,y,r),a(y,zt),a(y,L),a(L,$e),a($e,Tt),a(y,St),a(y,be),a(be,xt),a(y,Ct),i(e,Me,r),m(Y,e,r),i(e,Ue,r),i(e,E,r),a(E,C),a(C,ye),m(J,ye,null),a(E,qt),a(E,ke),a(ke,Nt),i(e,We,r),i(e,q,r),a(q,Dt),a(q,K),a(K,Ae),a(Ae,Ft),a(q,Ht),i(e,Be,r),m(Q,e,r),i(e,Le,r),i(e,j,r),a(j,N),a(N,Ee),m(R,Ee,null),a(j,Ot),a(j,je),a(je,Gt),i(e,Ye,r),i(e,k,r),a(k,It),a(k,Pe),a(Pe,Mt),a(k,Ut),a(k,V),a(V,ze),a(ze,Wt),a(k,Bt),i(e,Je,r),m(X,e,r),i(e,Ke,r),i(e,pe,r),a(pe,Lt),i(e,Qe,r),m(Z,e,r),i(e,Re,r),i(e,P,r),a(P,D),a(D,Te),m(ee,Te,null),a(P,Yt),a(P,Se),a(Se,Jt),i(e,Ve,r),i(e,he,r),a(he,Kt),i(e,Xe,r),i(e,z,r),a(z,F),a(F,xe),m(te,xe,null),a(z,Qt),a(z,Ce),a(Ce,Rt),i(e,Ze,r),i(e,de,r),a(de,Vt),i(e,et,r),m(ae,e,r),i(e,tt,r),i(e,fe,r),a(fe,Xt),i(e,at,r),m(re,e,r),i(e,rt,r),i(e,T,r),a(T,H),a(H,qe),m(oe,qe,null),a(T,Zt),a(T,Ne),a(Ne,ea),i(e,ot,r),i(e,O,r),a(O,ta),a(O,De),a(De,aa),a(O,ra),i(e,lt,r),m(le,e,r),i(e,st,r),i(e,G,r),a(G,oa),a(G,se),a(se,la),a(G,sa),it=!0},p:Ka,i(e){it||(v(M.$$.fragment,e),v(W.$$.fragment,e),v(B.$$.fragment,e),v(Y.$$.fragment,e),v(J.$$.fragment,e),v(Q.$$.fragment,e),v(R.$$.fragment,e),v(X.$$.fragment,e),v(Z.$$.fragment,e),v(ee.$$.fragment,e),v(te.$$.fragment,e),v(ae.$$.fragment,e),v(re.$$.fragment,e),v(oe.$$.fragment,e),v(le.$$.fragment,e),it=!0)},o(e){_(M.$$.fragment,e),_(W.$$.fragment,e),_(B.$$.fragment,e),_(Y.$$.fragment,e),_(J.$$.fragment,e),_(Q.$$.fragment,e),_(R.$$.fragment,e),_(X.$$.fragment,e),_(Z.$$.fragment,e),_(ee.$$.fragment,e),_(te.$$.fragment,e),_(ae.$$.fragment,e),_(re.$$.fragment,e),_(oe.$$.fragment,e),_(le.$$.fragment,e),it=!1},d(e){t(b),e&&t(ne),e&&t(w),g(M),e&&t(Fe),e&&t(S),e&&t(He),e&&t(A),g(W),e&&t(Oe),e&&t(ce),e&&t(Ge),g(B,e),e&&t(Ie),e&&t(y),e&&t(Me),g(Y,e),e&&t(Ue),e&&t(E),g(J),e&&t(We),e&&t(q),e&&t(Be),g(Q,e),e&&t(Le),e&&t(j),g(R),e&&t(Ye),e&&t(k),e&&t(Je),g(X,e),e&&t(Ke),e&&t(pe),e&&t(Qe),g(Z,e),e&&t(Re),e&&t(P),g(ee),e&&t(Ve),e&&t(he),e&&t(Xe),e&&t(z),g(te),e&&t(Ze),e&&t(de),e&&t(et),g(ae,e),e&&t(tt),e&&t(fe),e&&t(at),g(re,e),e&&t(rt),e&&t(T),g(oe),e&&t(ot),e&&t(O),e&&t(lt),g(le,e),e&&t(st),e&&t(G)}}}const Ra={local:"distributed-training-with-accelerate",sections:[{local:"setup",title:"Setup"},{local:"prepare-to-accelerate",title:"Prepare to accelerate"},{local:"backward",title:"Backward"},{local:"train",sections:[{local:"train-with-a-script",title:"Train with a script"},{local:"train-with-a-notebook",title:"Train with a notebook"}],title:"Train"}],title:"Distributed training with \u{1F917} Accelerate"};function Va(wt,b,ne){let{fw:w}=b;return wt.$$set=$=>{"fw"in $&&ne(0,w=$.fw)},[w]}class ar extends Ba{constructor(b){super();La(this,b,Va,Qa,Ya,{fw:0})}}export{ar as default,Ra as metadata};
246
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/philosophy.mdx-9ffd6814.js
import{S as Va,i as Xa,s as Za,e as a,k as i,w as Wa,t as l,M as er,c as r,d as t,m as d,a as s,x as Da,h as n,b as h,F as e,g as f,y as Ga,L as tr,q as Ra,o as Ya,B as Ja}from"../chunks/vendor-4833417e.js";import{I as Qa}from"../chunks/IconCopyLink-4b81c553.js";function or(vt){let g,Q,u,m,pe,O,yt,me,wt,Ge,V,gt,Re,_,ve,_t,bt,ye,Et,kt,we,Pt,Ye,X,Lt,Je,T,B,ge,Tt,It,v,y,At,Z,zt,Mt,ee,xt,$t,te,Nt,St,qt,k,Ot,_e,Bt,Ct,C,Ft,Ut,jt,w,Ht,oe,Kt,Wt,ae,Dt,Gt,be,Rt,Yt,Jt,Ee,Qt,Vt,F,ke,Xt,Zt,U,Pe,eo,to,j,oo,Le,ao,ro,Qe,re,so,Ve,b,H,Te,lo,no,K,Ie,io,co,Ae,ho,fo,W,ze,uo,po,D,Me,mo,vo,xe,yo,wo,$e,Ne,go,Xe,P,I,Se,G,_o,qe,bo,Ze,se,Eo,et,E,p,Oe,ko,Po,le,Lo,To,R,Io,Ao,Y,zo,Mo,xo,A,Be,$o,No,ne,So,qo,Oo,z,Ce,Bo,Co,ie,Fo,Uo,tt,de,jo,ot,M,x,Fe,Ho,Ko,J,Wo,Do,Go,$,Ue,Ro,Yo,je,Jo,Qo,at;return O=new Qa({}),G=new Qa({}),{c(){g=a("meta"),Q=i(),u=a("h1"),m=a("a"),pe=a("span"),Wa(O.$$.fragment),yt=i(),me=a("span"),wt=l("Philosophy"),Ge=i(),V=a("p"),gt=l("\u{1F917} Transformers is an opinionated library built for:"),Re=i(),_=a("ul"),ve=a("li"),_t=l("NLP researchers and educators seeking to use/study/extend large-scale transformers models"),bt=i(),ye=a("li"),Et=l("hands-on practitioners who want to fine-tune those models and/or serve them in production"),kt=i(),we=a("li"),Pt=l("engineers who just want to download a pretrained model and use it to solve a given NLP task."),Ye=i(),X=a("p"),Lt=l("The library was designed with two strong goals in mind:"),Je=i(),T=a("ul"),B=a("li"),ge=a("p"),Tt=l("Be as easy and fast to use as possible:"),It=i(),v=a("ul"),y=a("li"),At=l(`We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions, just three standard classes required to use each model: `),Z=a("a"),zt=l("configuration"),Mt=l(`, `),ee=a("a"),xt=l("models"),$t=l(" and "),te=a("a"),Nt=l("tokenizer"),St=l("."),qt=i(),k=a("li"),Ot=l(`All of these classes can be initialized in a simple and unified way from pretrained instances by using a common `),_e=a("code"),Bt=l("from_pretrained()"),Ct=l(` instantiation method which will take care of downloading (if needed), caching and loading the related class instance and associated data (configurations\u2019 hyper-parameters, tokenizers\u2019 vocabulary, and models\u2019 weights) from a pretrained checkpoint provided on `),C=a("a"),Ft=l("Hugging Face Hub"),Ut=l(" or your own saved checkpoint."),jt=i(),w=a("li"),Ht=l("On top of those three base classes, the library provides two APIs: "),oe=a("a"),Kt=l("pipeline()"),Wt=l(` for quickly using a model (plus its associated tokenizer and configuration) on a given task and `),ae=a("a"),Dt=l("Trainer"),Gt=l("/"),be=a("code"),Rt=l("Keras.fit"),Yt=l(" to quickly train or fine-tune a given model."),Jt=i(),Ee=a("li"),Qt=l(`As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to extend/build-upon the library, just use regular Python/PyTorch/TensorFlow/Keras modules and inherit from the base classes of the library to reuse functionalities like model loading/saving.`),Vt=i(),F=a("li"),ke=a("p"),Xt=l("Provide state-of-the-art models with performances as close as possible to the original models:"),Zt=i(),U=a("ul"),Pe=a("li"),eo=l(`We provide at least one example for each architecture which reproduces a result provided by the official authors of said architecture.`),to=i(),j=a("li"),oo=l(`The code is usually as close to the original code base as possible which means some PyTorch code may be not as `),Le=a("em"),ao=l("pytorchic"),ro=l(" as it could be as a result of being converted TensorFlow code and vice versa."),Qe=i(),re=a("p"),so=l("A few other goals:"),Ve=i(),b=a("ul"),H=a("li"),Te=a("p"),lo=l("Expose the models\u2019 internals as consistently as possible:"),no=i(),K=a("ul"),Ie=a("li"),io=l("We give access, using a single API, to the full hidden-states and attention weights."),co=i(),Ae=a("li"),ho=l("Tokenizer and base model\u2019s API are standardized to easily switch between models."),fo=i(),W=a("li"),ze=a("p"),uo=l("Incorporate a subjective selection of promising tools for fine-tuning/investigating these models:"),po=i(),D=a("ul"),Me=a("li"),mo=l("A simple/consistent way to add new tokens to the vocabulary and embeddings for fine-tuning."),vo=i(),xe=a("li"),yo=l("Simple ways to mask and prune transformer heads."),wo=i(),$e=a("li"),Ne=a("p"),go=l("Switch easily between PyTorch and TensorFlow 2.0, allowing training using one framework and inference using another."),Xe=i(),P=a("h2"),I=a("a"),Se=a("span"),Wa(G.$$.fragment),_o=i(),qe=a("span"),bo=l("Main concepts"),Ze=i(),se=a("p"),Eo=l("The library is built around three types of classes for each model:"),et=i(),E=a("ul"),p=a("li"),Oe=a("strong"),ko=l("Model classes"),Po=l(" such as "),le=a("a"),Lo=l("BertModel"),To=l(", which are 30+ PyTorch models ("),R=a("a"),Io=l("torch.nn.Module"),Ao=l(") or Keras models ("),Y=a("a"),zo=l("tf.keras.Model"),Mo=l(`) that work with the pretrained weights provided in the library.`),xo=i(),A=a("li"),Be=a("strong"),$o=l("Configuration classes"),No=l(" such as "),ne=a("a"),So=l("BertConfig"),qo=l(`, which store all the parameters required to build a model. You don\u2019t always need to instantiate these yourself. In particular, if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model).`),Oo=i(),z=a("li"),Ce=a("strong"),Bo=l("Tokenizer classes"),Co=l(" such as "),ie=a("a"),Fo=l("BertTokenizer"),Uo=l(`, which store the vocabulary for each model and provide methods for encoding/decoding strings in a list of token embeddings indices to be fed to a model.`),tt=i(),de=a("p"),jo=l("All these classes can be instantiated from pretrained instances and saved locally using two methods:"),ot=i(),M=a("ul"),x=a("li"),Fe=a("code"),Ho=l("from_pretrained()"),Ko=l(` lets you instantiate a model/configuration/tokenizer from a pretrained version either provided by the library itself (the supported models can be found on the `),J=a("a"),Wo=l("Model Hub"),Do=l(`) or stored locally (or on a server) by the user,`),Go=i(),$=a("li"),Ue=a("code"),Ro=l("save_pretrained()"),Yo=l(` lets you save a model/configuration/tokenizer locally so that it can be reloaded using `),je=a("code"),Jo=l("from_pretrained()"),Qo=l("."),this.h()},l(o){const c=er('[data-svelte="svelte-1phssyn"]',document.head);g=r(c,"META",{name:!0,content:!0}),c.forEach(t),Q=d(o),u=r(o,"H1",{class:!0});var rt=s(u);m=r(rt,"A",{id:!0,class:!0,href:!0});var Vo=s(m);pe=r(Vo,"SPAN",{});var Xo=s(pe);Da(O.$$.fragment,Xo),Xo.forEach(t),Vo.forEach(t),yt=d(rt),me=r(rt,"SPAN",{});var Zo=s(me);wt=n(Zo,"Philosophy"),Zo.forEach(t),rt.forEach(t),Ge=d(o),V=r(o,"P",{});var ea=s(V);gt=n(ea,"\u{1F917} Transformers is an opinionated library built for:"),ea.forEach(t),Re=d(o),_=r(o,"UL",{});var ce=s(_);ve=r(ce,"LI",{});var ta=s(ve);_t=n(ta,"NLP researchers and educators seeking to use/study/extend large-scale transformers models"),ta.forEach(t),bt=d(ce),ye=r(ce,"LI",{});var oa=s(ye);Et=n(oa,"hands-on practitioners who want to fine-tune those models and/or serve them in production"),oa.forEach(t),kt=d(ce),we=r(ce,"LI",{});var aa=s(we);Pt=n(aa,"engineers who just want to download a pretrained model and use it to solve a given NLP task."),aa.forEach(t),ce.forEach(t),Ye=d(o),X=r(o,"P",{});var ra=s(X);Lt=n(ra,"The library was designed with two strong goals in mind:"),ra.forEach(t),Je=d(o),T=r(o,"UL",{});var st=s(T);B=r(st,"LI",{});var lt=s(B);ge=r(lt,"P",{});var sa=s(ge);Tt=n(sa,"Be as easy and fast to use as possible:"),sa.forEach(t),It=d(lt),v=r(lt,"UL",{});var N=s(v);y=r(N,"LI",{});var S=s(y);At=n(S,`We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions, just three standard classes required to use each model: `),Z=r(S,"A",{href:!0});var la=s(Z);zt=n(la,"configuration"),la.forEach(t),Mt=n(S,`, `),ee=r(S,"A",{href:!0});var na=s(ee);xt=n(na,"models"),na.forEach(t),$t=n(S," and "),te=r(S,"A",{href:!0});var ia=s(te);Nt=n(ia,"tokenizer"),ia.forEach(t),St=n(S,"."),S.forEach(t),qt=d(N),k=r(N,"LI",{});var he=s(k);Ot=n(he,`All of these classes can be initialized in a simple and unified way from pretrained instances by using a common `),_e=r(he,"CODE",{});var da=s(_e);Bt=n(da,"from_pretrained()"),da.forEach(t),Ct=n(he,` instantiation method which will take care of downloading (if needed), caching and loading the related class instance and associated data (configurations\u2019 hyper-parameters, tokenizers\u2019 vocabulary, and models\u2019 weights) from a pretrained checkpoint provided on `),C=r(he,"A",{href:!0,rel:!0});var ca=s(C);Ft=n(ca,"Hugging Face Hub"),ca.forEach(t),Ut=n(he," or your own saved checkpoint."),he.forEach(t),jt=d(N),w=r(N,"LI",{});var q=s(w);Ht=n(q,"On top of those three base classes, the library provides two APIs: "),oe=r(q,"A",{href:!0});var ha=s(oe);Kt=n(ha,"pipeline()"),ha.forEach(t),Wt=n(q,` for quickly using a model (plus its associated tokenizer and configuration) on a given task and `),ae=r(q,"A",{href:!0});var fa=s(ae);Dt=n(fa,"Trainer"),fa.forEach(t),Gt=n(q,"/"),be=r(q,"CODE",{});var ua=s(be);Rt=n(ua,"Keras.fit"),ua.forEach(t),Yt=n(q," to quickly train or fine-tune a given model."),q.forEach(t),Jt=d(N),Ee=r(N,"LI",{});var pa=s(Ee);Qt=n(pa,`As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to extend/build-upon the library, just use regular Python/PyTorch/TensorFlow/Keras modules and inherit from the base classes of the library to reuse functionalities like model loading/saving.`),pa.forEach(t),N.forEach(t),lt.forEach(t),Vt=d(st),F=r(st,"LI",{});var nt=s(F);ke=r(nt,"P",{});var ma=s(ke);Xt=n(ma,"Provide state-of-the-art models with performances as close as possible to the original models:"),ma.forEach(t),Zt=d(nt),U=r(nt,"UL",{});var it=s(U);Pe=r(it,"LI",{});var va=s(Pe);eo=n(va,`We provide at least one example for each architecture which reproduces a result provided by the official authors of said architecture.`),va.forEach(t),to=d(it),j=r(it,"LI",{});var dt=s(j);oo=n(dt,`The code is usually as close to the original code base as possible which means some PyTorch code may be not as `),Le=r(dt,"EM",{});var ya=s(Le);ao=n(ya,"pytorchic"),ya.forEach(t),ro=n(dt," as it could be as a result of being converted TensorFlow code and vice versa."),dt.forEach(t),it.forEach(t),nt.forEach(t),st.forEach(t),Qe=d(o),re=r(o,"P",{});var wa=s(re);so=n(wa,"A few other goals:"),wa.forEach(t),Ve=d(o),b=r(o,"UL",{});var fe=s(b);H=r(fe,"LI",{});var ct=s(H);Te=r(ct,"P",{});var ga=s(Te);lo=n(ga,"Expose the models\u2019 internals as consistently as possible:"),ga.forEach(t),no=d(ct),K=r(ct,"UL",{});var ht=s(K);Ie=r(ht,"LI",{});var _a=s(Ie);io=n(_a,"We give access, using a single API, to the full hidden-states and attention weights."),_a.forEach(t),co=d(ht),Ae=r(ht,"LI",{});var ba=s(Ae);ho=n(ba,"Tokenizer and base model\u2019s API are standardized to easily switch between models."),ba.forEach(t),ht.forEach(t),ct.forEach(t),fo=d(fe),W=r(fe,"LI",{});var ft=s(W);ze=r(ft,"P",{});var Ea=s(ze);uo=n(Ea,"Incorporate a subjective selection of promising tools for fine-tuning/investigating these models:"),Ea.forEach(t),po=d(ft),D=r(ft,"UL",{});var ut=s(D);Me=r(ut,"LI",{});var ka=s(Me);mo=n(ka,"A simple/consistent way to add new tokens to the vocabulary and embeddings for fine-tuning."),ka.forEach(t),vo=d(ut),xe=r(ut,"LI",{});var Pa=s(xe);yo=n(Pa,"Simple ways to mask and prune transformer heads."),Pa.forEach(t),ut.forEach(t),ft.forEach(t),wo=d(fe),$e=r(fe,"LI",{});var La=s($e);Ne=r(La,"P",{});var Ta=s(Ne);go=n(Ta,"Switch easily between PyTorch and TensorFlow 2.0, allowing training using one framework and inference using another."),Ta.forEach(t),La.forEach(t),fe.forEach(t),Xe=d(o),P=r(o,"H2",{class:!0});var pt=s(P);I=r(pt,"A",{id:!0,class:!0,href:!0});var Ia=s(I);Se=r(Ia,"SPAN",{});var Aa=s(Se);Da(G.$$.fragment,Aa),Aa.forEach(t),Ia.forEach(t),_o=d(pt),qe=r(pt,"SPAN",{});var za=s(qe);bo=n(za,"Main concepts"),za.forEach(t),pt.forEach(t),Ze=d(o),se=r(o,"P",{});var Ma=s(se);Eo=n(Ma,"The library is built around three types of classes for each model:"),Ma.forEach(t),et=d(o),E=r(o,"UL",{});var ue=s(E);p=r(ue,"LI",{});var L=s(p);Oe=r(L,"STRONG",{});var xa=s(Oe);ko=n(xa,"Model classes"),xa.forEach(t),Po=n(L," such as "),le=r(L,"A",{href:!0});var $a=s(le);Lo=n($a,"BertModel"),$a.forEach(t),To=n(L,", which are 30+ PyTorch models ("),R=r(L,"A",{href:!0,rel:!0});var Na=s(R);Io=n(Na,"torch.nn.Module"),Na.forEach(t),Ao=n(L,") or Keras models ("),Y=r(L,"A",{href:!0,rel:!0});var Sa=s(Y);zo=n(Sa,"tf.keras.Model"),Sa.forEach(t),Mo=n(L,`) that work with the pretrained weights provided in the library.`),L.forEach(t),xo=d(ue),A=r(ue,"LI",{});var He=s(A);Be=r(He,"STRONG",{});var qa=s(Be);$o=n(qa,"Configuration classes"),qa.forEach(t),No=n(He," such as "),ne=r(He,"A",{href:!0});var Oa=s(ne);So=n(Oa,"BertConfig"),Oa.forEach(t),qo=n(He,`, which store all the parameters required to build a model. You don\u2019t always need to instantiate these yourself. In particular, if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model).`),He.forEach(t),Oo=d(ue),z=r(ue,"LI",{});var Ke=s(z);Ce=r(Ke,"STRONG",{});var Ba=s(Ce);Bo=n(Ba,"Tokenizer classes"),Ba.forEach(t),Co=n(Ke," such as "),ie=r(Ke,"A",{href:!0});var Ca=s(ie);Fo=n(Ca,"BertTokenizer"),Ca.forEach(t),Uo=n(Ke,`, which store the vocabulary for each model and provide methods for encoding/decoding strings in a list of token embeddings indices to be fed to a model.`),Ke.forEach(t),ue.forEach(t),tt=d(o),de=r(o,"P",{});var Fa=s(de);jo=n(Fa,"All these classes can be instantiated from pretrained instances and saved locally using two methods:"),Fa.forEach(t),ot=d(o),M=r(o,"UL",{});var mt=s(M);x=r(mt,"LI",{});var We=s(x);Fe=r(We,"CODE",{});var Ua=s(Fe);Ho=n(Ua,"from_pretrained()"),Ua.forEach(t),Ko=n(We,` lets you instantiate a model/configuration/tokenizer from a pretrained version either provided by the library itself (the supported models can be found on the `),J=r(We,"A",{href:!0,rel:!0});var ja=s(J);Wo=n(ja,"Model Hub"),ja.forEach(t),Do=n(We,`) or stored locally (or on a server) by the user,`),We.forEach(t),Go=d(mt),$=r(mt,"LI",{});var De=s($);Ue=r(De,"CODE",{});var Ha=s(Ue);Ro=n(Ha,"save_pretrained()"),Ha.forEach(t),Yo=n(De,` lets you save a model/configuration/tokenizer locally so that it can be reloaded using `),je=r(De,"CODE",{});var Ka=s(je);Jo=n(Ka,"from_pretrained()"),Ka.forEach(t),Qo=n(De,"."),De.forEach(t),mt.forEach(t),this.h()},h(){h(g,"name","hf:doc:metadata"),h(g,"content",JSON.stringify(ar)),h(m,"id","philosophy"),h(m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(m,"href","#philosophy"),h(u,"class","relative group"),h(Z,"href","main_classes/configuration"),h(ee,"href","main_classes/model"),h(te,"href","main_classes/tokenizer"),h(C,"href","https://huggingface.co/models"),h(C,"rel","nofollow"),h(oe,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(ae,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),h(I,"id","main-concepts"),h(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(I,"href","#main-concepts"),h(P,"class","relative group"),h(le,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel"),h(R,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(R,"rel","nofollow"),h(Y,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(Y,"rel","nofollow"),h(ne,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertConfig"),h(ie,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),h(J,"href","https://huggingface.co/models"),h(J,"rel","nofollow")},m(o,c){e(document.head,g),f(o,Q,c),f(o,u,c),e(u,m),e(m,pe),Ga(O,pe,null),e(u,yt),e(u,me),e(me,wt),f(o,Ge,c),f(o,V,c),e(V,gt),f(o,Re,c),f(o,_,c),e(_,ve),e(ve,_t),e(_,bt),e(_,ye),e(ye,Et),e(_,kt),e(_,we),e(we,Pt),f(o,Ye,c),f(o,X,c),e(X,Lt),f(o,Je,c),f(o,T,c),e(T,B),e(B,ge),e(ge,Tt),e(B,It),e(B,v),e(v,y),e(y,At),e(y,Z),e(Z,zt),e(y,Mt),e(y,ee),e(ee,xt),e(y,$t),e(y,te),e(te,Nt),e(y,St),e(v,qt),e(v,k),e(k,Ot),e(k,_e),e(_e,Bt),e(k,Ct),e(k,C),e(C,Ft),e(k,Ut),e(v,jt),e(v,w),e(w,Ht),e(w,oe),e(oe,Kt),e(w,Wt),e(w,ae),e(ae,Dt),e(w,Gt),e(w,be),e(be,Rt),e(w,Yt),e(v,Jt),e(v,Ee),e(Ee,Qt),e(T,Vt),e(T,F),e(F,ke),e(ke,Xt),e(F,Zt),e(F,U),e(U,Pe),e(Pe,eo),e(U,to),e(U,j),e(j,oo),e(j,Le),e(Le,ao),e(j,ro),f(o,Qe,c),f(o,re,c),e(re,so),f(o,Ve,c),f(o,b,c),e(b,H),e(H,Te),e(Te,lo),e(H,no),e(H,K),e(K,Ie),e(Ie,io),e(K,co),e(K,Ae),e(Ae,ho),e(b,fo),e(b,W),e(W,ze),e(ze,uo),e(W,po),e(W,D),e(D,Me),e(Me,mo),e(D,vo),e(D,xe),e(xe,yo),e(b,wo),e(b,$e),e($e,Ne),e(Ne,go),f(o,Xe,c),f(o,P,c),e(P,I),e(I,Se),Ga(G,Se,null),e(P,_o),e(P,qe),e(qe,bo),f(o,Ze,c),f(o,se,c),e(se,Eo),f(o,et,c),f(o,E,c),e(E,p),e(p,Oe),e(Oe,ko),e(p,Po),e(p,le),e(le,Lo),e(p,To),e(p,R),e(R,Io),e(p,Ao),e(p,Y),e(Y,zo),e(p,Mo),e(E,xo),e(E,A),e(A,Be),e(Be,$o),e(A,No),e(A,ne),e(ne,So),e(A,qo),e(E,Oo),e(E,z),e(z,Ce),e(Ce,Bo),e(z,Co),e(z,ie),e(ie,Fo),e(z,Uo),f(o,tt,c),f(o,de,c),e(de,jo),f(o,ot,c),f(o,M,c),e(M,x),e(x,Fe),e(Fe,Ho),e(x,Ko),e(x,J),e(J,Wo),e(x,Do),e(M,Go),e(M,$),e($,Ue),e(Ue,Ro),e($,Yo),e($,je),e(je,Jo),e($,Qo),at=!0},p:tr,i(o){at||(Ra(O.$$.fragment,o),Ra(G.$$.fragment,o),at=!0)},o(o){Ya(O.$$.fragment,o),Ya(G.$$.fragment,o),at=!1},d(o){t(g),o&&t(Q),o&&t(u),Ja(O),o&&t(Ge),o&&t(V),o&&t(Re),o&&t(_),o&&t(Ye),o&&t(X),o&&t(Je),o&&t(T),o&&t(Qe),o&&t(re),o&&t(Ve),o&&t(b),o&&t(Xe),o&&t(P),Ja(G),o&&t(Ze),o&&t(se),o&&t(et),o&&t(E),o&&t(tt),o&&t(de),o&&t(ot),o&&t(M)}}}const ar={local:"philosophy",sections:[{local:"main-concepts",title:"Main concepts"}],title:"Philosophy"};function rr(vt,g,Q){let{fw:u}=g;return vt.$$set=m=>{"fw"in m&&Q(0,u=m.fw)},[u]}class nr extends Va{constructor(g){super();Xa(this,g,rr,or,Za,{fw:0})}}export{nr as default,ar as metadata};
247
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/pipeline_tutorial.mdx-8f24abfb.js
import{S as $n,i as xn,s as yn,e as r,k as h,w as d,t as a,M as wn,c as l,d as t,m as c,a as i,x as g,h as n,b as f,N as jn,F as s,g as p,y as v,q as _,o as k,B as $}from"../chunks/vendor-4833417e.js";import{T as En}from"../chunks/Tip-fffd6df1.js";import{I as os}from"../chunks/IconCopyLink-4b81c553.js";import{C as F}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function bn(Be){let m,P,u,x,z;return{c(){m=r("p"),P=a("Take a look at the "),u=r("a"),x=a("pipeline()"),z=a(" documentation for a complete list of supported taska."),this.h()},l(j){m=l(j,"P",{});var T=i(m);P=n(T,"Take a look at the "),u=l(T,"A",{href:!0});var H=i(u);x=n(H,"pipeline()"),H.forEach(t),z=n(T," documentation for a complete list of supported taska."),T.forEach(t),this.h()},h(){f(u,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline")},m(j,T){p(j,m,T),s(m,P),s(m,u),s(u,x),s(m,z)},d(j){j&&t(m)}}}function An(Be){let m,P,u,x,z,j,T,H,Zs,is,E,et,we,st,tt,Z,at,nt,je,rt,lt,ps,q,ee,ot,Ee,it,pt,ft,Ge,ht,ct,se,mt,be,ut,dt,fs,I,hs,C,O,Qe,te,gt,Xe,vt,cs,b,_t,Ae,kt,$t,Pe,xt,yt,Te,wt,jt,ms,qe,ae,Et,Me,bt,At,us,ne,ds,re,le,Pt,Se,Tt,qt,gs,oe,vs,Fe,Mt,_s,ie,ks,y,St,ze,Ft,zt,Ye,Ct,Lt,Ce,Dt,Nt,Ze,Ht,It,$s,pe,xs,L,R,es,fe,Ot,ss,Rt,ys,w,Ut,Le,Wt,Jt,he,Kt,Vt,ts,Bt,Gt,De,Qt,Xt,ws,ce,js,U,Yt,Ne,Zt,ea,Es,me,bs,W,sa,He,ta,aa,As,ue,Ps,D,J,as,de,na,ns,ra,Ts,K,la,Ie,oa,ia,qs,A,pa,ge,fa,ha,ve,ca,ma,Oe,ua,da,Ms,_e,Ss,V,ga,Re,va,_a,Fs,ke,zs,N,B,rs,$e,ka,ls,$a,Cs,G,xa,Ue,ya,wa,Ls,We,ja,Ds,Je,Ke,Ea,Ns,xe,Hs;return j=new os({}),I=new En({props:{$$slots:{default:[bn]},$$scope:{ctx:Be}}}),te=new os({}),ne=new F({props:{code:`from transformers import pipeline generator = pipeline(task="text-generation")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>)`}}),oe=new F({props:{code:'generator("Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain&#x27;</span>}]`}}),ie=new F({props:{code:`generator( [ "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", "Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne", ] )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne&quot;</span>, <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>)`}}),pe=new F({props:{code:`generator( "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", num_return_sequences=2, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> num_return_sequences=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>)`}}),fe=new os({}),ce=new F({props:{code:`from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),me=new F({props:{code:`from transformers import pipeline generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>, model=model, tokenizer=tokenizer)`}}),ue=new F({props:{code:'generator("Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm&#x27;</span>}]`}}),de=new os({}),_e=new F({props:{code:`from transformers import pipeline audio_classifier = pipeline( task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier = pipeline( <span class="hljs-meta">... </span> task=<span class="hljs-string">&quot;audio-classification&quot;</span>, model=<span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)`}}),ke=new F({props:{code:'audio_classifier("jfk_moon_speech.wav")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier(<span class="hljs-string">&quot;jfk_moon_speech.wav&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;calm&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13856211304664612</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;disgust&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13148026168346405</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;happy&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12635163962841034</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;angry&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12439591437578201</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;fearful&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12404385954141617</span>}]`}}),$e=new os({}),xe=new F({props:{code:`from transformers import pipeline vision_classifier = pipeline(task="image-classification") vision_classifier( images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier = pipeline(task=<span class="hljs-string">&quot;image-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier( <span class="hljs-meta">... </span> images=<span class="hljs-string">&quot;https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg&quot;</span> <span class="hljs-meta">... </span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;lynx, catamount&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.4403027892112732</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;cougar, puma, catamount, mountain lion, painter, panther, Felis concolor&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.03433405980467796</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;snow leopard, ounce, Panthera uncia&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.032148055732250214</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;Egyptian cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.02353910356760025</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;tiger cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.023034192621707916</span>}]`}}),{c(){m=r("meta"),P=h(),u=r("h1"),x=r("a"),z=r("span"),d(j.$$.fragment),T=h(),H=r("span"),Zs=a("Pipelines for inference"),is=h(),E=r("p"),et=a("The "),we=r("a"),st=a("pipeline()"),tt=a(" makes it simple to use any model from the "),Z=r("a"),at=a("Model Hub"),nt=a(" for inference on a variety of tasks such as text generation, image segmentation and audio classification. Even if you don\u2019t have experience with a specific modality or understand the code powering the models, you can still use them with the "),je=r("a"),rt=a("pipeline()"),lt=a("! This tutorial will teach you to:"),ps=h(),q=r("ul"),ee=r("li"),ot=a("Use a "),Ee=r("a"),it=a("pipeline()"),pt=a(" for inference."),ft=h(),Ge=r("li"),ht=a("Use a specific tokenizer or model."),ct=h(),se=r("li"),mt=a("Use a "),be=r("a"),ut=a("pipeline()"),dt=a(" for audio and vision tasks."),fs=h(),d(I.$$.fragment),hs=h(),C=r("h2"),O=r("a"),Qe=r("span"),d(te.$$.fragment),gt=h(),Xe=r("span"),vt=a("Pipeline usage"),cs=h(),b=r("p"),_t=a("While each task has an associated "),Ae=r("a"),kt=a("pipeline()"),$t=a(", it is simpler to use the general "),Pe=r("a"),xt=a("pipeline()"),yt=a(" abstraction which contains all the specific task pipelines. The "),Te=r("a"),wt=a("pipeline()"),jt=a(" automatically loads a default model and tokenizer capable of inference for your task."),ms=h(),qe=r("ol"),ae=r("li"),Et=a("Start by creating a "),Me=r("a"),bt=a("pipeline()"),At=a(" and specify an inference task:"),us=h(),d(ne.$$.fragment),ds=h(),re=r("ol"),le=r("li"),Pt=a("Pass your input text to the "),Se=r("a"),Tt=a("pipeline()"),qt=a(":"),gs=h(),d(oe.$$.fragment),vs=h(),Fe=r("p"),Mt=a("If you have more than one input, pass your input as a list:"),_s=h(),d(ie.$$.fragment),ks=h(),y=r("p"),St=a("Any additional parameters for your task can also be included in the "),ze=r("a"),Ft=a("pipeline()"),zt=a(". The "),Ye=r("code"),Ct=a("text-generation"),Lt=a(" task has a "),Ce=r("a"),Dt=a("generate()"),Nt=a(" method with several parameters for controlling the output. For example, if you want to generate more than one output, set the "),Ze=r("code"),Ht=a("num_return_sequences"),It=a(" parameter:"),$s=h(),d(pe.$$.fragment),xs=h(),L=r("h3"),R=r("a"),es=r("span"),d(fe.$$.fragment),Ot=h(),ss=r("span"),Rt=a("Choose a model and tokenizer"),ys=h(),w=r("p"),Ut=a("The "),Le=r("a"),Wt=a("pipeline()"),Jt=a(" accepts any model from the "),he=r("a"),Kt=a("Model Hub"),Vt=a(". There are tags on the Model Hub that allow you to filter for a model you\u2019d like to use for your task. Once you\u2019ve picked an appropriate model, load it with the corresponding "),ts=r("code"),Bt=a("AutoModelFor"),Gt=a(" and [`AutoTokenizer\u2019] class. For example, load the "),De=r("a"),Qt=a("AutoModelForCausalLM"),Xt=a(" class for a causal language modeling task:"),ws=h(),d(ce.$$.fragment),js=h(),U=r("p"),Yt=a("Create a "),Ne=r("a"),Zt=a("pipeline()"),ea=a(" for your task, and specify the model and tokenizer you\u2019ve loaded:"),Es=h(),d(me.$$.fragment),bs=h(),W=r("p"),sa=a("Pass your input text to the "),He=r("a"),ta=a("pipeline()"),aa=a(" to generate some text:"),As=h(),d(ue.$$.fragment),Ps=h(),D=r("h2"),J=r("a"),as=r("span"),d(de.$$.fragment),na=h(),ns=r("span"),ra=a("Audio pipeline"),Ts=h(),K=r("p"),la=a("The flexibility of the "),Ie=r("a"),oa=a("pipeline()"),ia=a(" means it can also be extended to audio tasks."),qs=h(),A=r("p"),pa=a("For example, let\u2019s classify the emotion from a short clip of John F. Kennedy\u2019s famous "),ge=r("a"),fa=a("\u201CWe choose to go to the Moon\u201D"),ha=a(" speech. Find an "),ve=r("a"),ca=a("audio classification"),ma=a(" model on the Model Hub for emotion recognition and load it in the "),Oe=r("a"),ua=a("pipeline()"),da=a(":"),Ms=h(),d(_e.$$.fragment),Ss=h(),V=r("p"),ga=a("Pass the audio file to the "),Re=r("a"),va=a("pipeline()"),_a=a(":"),Fs=h(),d(ke.$$.fragment),zs=h(),N=r("h2"),B=r("a"),rs=r("span"),d($e.$$.fragment),ka=h(),ls=r("span"),$a=a("Vision pipeline"),Cs=h(),G=r("p"),xa=a("Finally, using a "),Ue=r("a"),ya=a("pipeline()"),wa=a(" for vision tasks is practically identical."),Ls=h(),We=r("p"),ja=a("Specify your vision task and pass your image to the classifier. The imaage can be a link or a local path to the image. For example, what species of cat is shown below?"),Ds=h(),Je=r("p"),Ke=r("img"),Ns=h(),d(xe.$$.fragment),this.h()},l(e){const o=wn('[data-svelte="svelte-1phssyn"]',document.head);m=l(o,"META",{name:!0,content:!0}),o.forEach(t),P=c(e),u=l(e,"H1",{class:!0});var ye=i(u);x=l(ye,"A",{id:!0,class:!0,href:!0});var ba=i(x);z=l(ba,"SPAN",{});var Aa=i(z);g(j.$$.fragment,Aa),Aa.forEach(t),ba.forEach(t),T=c(ye),H=l(ye,"SPAN",{});var Pa=i(H);Zs=n(Pa,"Pipelines for inference"),Pa.forEach(t),ye.forEach(t),is=c(e),E=l(e,"P",{});var Q=i(E);et=n(Q,"The "),we=l(Q,"A",{href:!0});var Ta=i(we);st=n(Ta,"pipeline()"),Ta.forEach(t),tt=n(Q," makes it simple to use any model from the "),Z=l(Q,"A",{href:!0,rel:!0});var qa=i(Z);at=n(qa,"Model Hub"),qa.forEach(t),nt=n(Q," for inference on a variety of tasks such as text generation, image segmentation and audio classification. Even if you don\u2019t have experience with a specific modality or understand the code powering the models, you can still use them with the "),je=l(Q,"A",{href:!0});var Ma=i(je);rt=n(Ma,"pipeline()"),Ma.forEach(t),lt=n(Q,"! This tutorial will teach you to:"),Q.forEach(t),ps=c(e),q=l(e,"UL",{});var Ve=i(q);ee=l(Ve,"LI",{});var Is=i(ee);ot=n(Is,"Use a "),Ee=l(Is,"A",{href:!0});var Sa=i(Ee);it=n(Sa,"pipeline()"),Sa.forEach(t),pt=n(Is," for inference."),Is.forEach(t),ft=c(Ve),Ge=l(Ve,"LI",{});var Fa=i(Ge);ht=n(Fa,"Use a specific tokenizer or model."),Fa.forEach(t),ct=c(Ve),se=l(Ve,"LI",{});var Os=i(se);mt=n(Os,"Use a "),be=l(Os,"A",{href:!0});var za=i(be);ut=n(za,"pipeline()"),za.forEach(t),dt=n(Os," for audio and vision tasks."),Os.forEach(t),Ve.forEach(t),fs=c(e),g(I.$$.fragment,e),hs=c(e),C=l(e,"H2",{class:!0});var Rs=i(C);O=l(Rs,"A",{id:!0,class:!0,href:!0});var Ca=i(O);Qe=l(Ca,"SPAN",{});var La=i(Qe);g(te.$$.fragment,La),La.forEach(t),Ca.forEach(t),gt=c(Rs),Xe=l(Rs,"SPAN",{});var Da=i(Xe);vt=n(Da,"Pipeline usage"),Da.forEach(t),Rs.forEach(t),cs=c(e),b=l(e,"P",{});var X=i(b);_t=n(X,"While each task has an associated "),Ae=l(X,"A",{href:!0});var Na=i(Ae);kt=n(Na,"pipeline()"),Na.forEach(t),$t=n(X,", it is simpler to use the general "),Pe=l(X,"A",{href:!0});var Ha=i(Pe);xt=n(Ha,"pipeline()"),Ha.forEach(t),yt=n(X," abstraction which contains all the specific task pipelines. The "),Te=l(X,"A",{href:!0});var Ia=i(Te);wt=n(Ia,"pipeline()"),Ia.forEach(t),jt=n(X," automatically loads a default model and tokenizer capable of inference for your task."),X.forEach(t),ms=c(e),qe=l(e,"OL",{});var Oa=i(qe);ae=l(Oa,"LI",{});var Us=i(ae);Et=n(Us,"Start by creating a "),Me=l(Us,"A",{href:!0});var Ra=i(Me);bt=n(Ra,"pipeline()"),Ra.forEach(t),At=n(Us," and specify an inference task:"),Us.forEach(t),Oa.forEach(t),us=c(e),g(ne.$$.fragment,e),ds=c(e),re=l(e,"OL",{start:!0});var Ua=i(re);le=l(Ua,"LI",{});var Ws=i(le);Pt=n(Ws,"Pass your input text to the "),Se=l(Ws,"A",{href:!0});var Wa=i(Se);Tt=n(Wa,"pipeline()"),Wa.forEach(t),qt=n(Ws,":"),Ws.forEach(t),Ua.forEach(t),gs=c(e),g(oe.$$.fragment,e),vs=c(e),Fe=l(e,"P",{});var Ja=i(Fe);Mt=n(Ja,"If you have more than one input, pass your input as a list:"),Ja.forEach(t),_s=c(e),g(ie.$$.fragment,e),ks=c(e),y=l(e,"P",{});var M=i(y);St=n(M,"Any additional parameters for your task can also be included in the "),ze=l(M,"A",{href:!0});var Ka=i(ze);Ft=n(Ka,"pipeline()"),Ka.forEach(t),zt=n(M,". The "),Ye=l(M,"CODE",{});var Va=i(Ye);Ct=n(Va,"text-generation"),Va.forEach(t),Lt=n(M," task has a "),Ce=l(M,"A",{href:!0});var Ba=i(Ce);Dt=n(Ba,"generate()"),Ba.forEach(t),Nt=n(M," method with several parameters for controlling the output. For example, if you want to generate more than one output, set the "),Ze=l(M,"CODE",{});var Ga=i(Ze);Ht=n(Ga,"num_return_sequences"),Ga.forEach(t),It=n(M," parameter:"),M.forEach(t),$s=c(e),g(pe.$$.fragment,e),xs=c(e),L=l(e,"H3",{class:!0});var Js=i(L);R=l(Js,"A",{id:!0,class:!0,href:!0});var Qa=i(R);es=l(Qa,"SPAN",{});var Xa=i(es);g(fe.$$.fragment,Xa),Xa.forEach(t),Qa.forEach(t),Ot=c(Js),ss=l(Js,"SPAN",{});var Ya=i(ss);Rt=n(Ya,"Choose a model and tokenizer"),Ya.forEach(t),Js.forEach(t),ys=c(e),w=l(e,"P",{});var S=i(w);Ut=n(S,"The "),Le=l(S,"A",{href:!0});var Za=i(Le);Wt=n(Za,"pipeline()"),Za.forEach(t),Jt=n(S," accepts any model from the "),he=l(S,"A",{href:!0,rel:!0});var en=i(he);Kt=n(en,"Model Hub"),en.forEach(t),Vt=n(S,". There are tags on the Model Hub that allow you to filter for a model you\u2019d like to use for your task. Once you\u2019ve picked an appropriate model, load it with the corresponding "),ts=l(S,"CODE",{});var sn=i(ts);Bt=n(sn,"AutoModelFor"),sn.forEach(t),Gt=n(S," and [`AutoTokenizer\u2019] class. For example, load the "),De=l(S,"A",{href:!0});var tn=i(De);Qt=n(tn,"AutoModelForCausalLM"),tn.forEach(t),Xt=n(S," class for a causal language modeling task:"),S.forEach(t),ws=c(e),g(ce.$$.fragment,e),js=c(e),U=l(e,"P",{});var Ks=i(U);Yt=n(Ks,"Create a "),Ne=l(Ks,"A",{href:!0});var an=i(Ne);Zt=n(an,"pipeline()"),an.forEach(t),ea=n(Ks," for your task, and specify the model and tokenizer you\u2019ve loaded:"),Ks.forEach(t),Es=c(e),g(me.$$.fragment,e),bs=c(e),W=l(e,"P",{});var Vs=i(W);sa=n(Vs,"Pass your input text to the "),He=l(Vs,"A",{href:!0});var nn=i(He);ta=n(nn,"pipeline()"),nn.forEach(t),aa=n(Vs," to generate some text:"),Vs.forEach(t),As=c(e),g(ue.$$.fragment,e),Ps=c(e),D=l(e,"H2",{class:!0});var Bs=i(D);J=l(Bs,"A",{id:!0,class:!0,href:!0});var rn=i(J);as=l(rn,"SPAN",{});var ln=i(as);g(de.$$.fragment,ln),ln.forEach(t),rn.forEach(t),na=c(Bs),ns=l(Bs,"SPAN",{});var on=i(ns);ra=n(on,"Audio pipeline"),on.forEach(t),Bs.forEach(t),Ts=c(e),K=l(e,"P",{});var Gs=i(K);la=n(Gs,"The flexibility of the "),Ie=l(Gs,"A",{href:!0});var pn=i(Ie);oa=n(pn,"pipeline()"),pn.forEach(t),ia=n(Gs," means it can also be extended to audio tasks."),Gs.forEach(t),qs=c(e),A=l(e,"P",{});var Y=i(A);pa=n(Y,"For example, let\u2019s classify the emotion from a short clip of John F. Kennedy\u2019s famous "),ge=l(Y,"A",{href:!0,rel:!0});var fn=i(ge);fa=n(fn,"\u201CWe choose to go to the Moon\u201D"),fn.forEach(t),ha=n(Y," speech. Find an "),ve=l(Y,"A",{href:!0,rel:!0});var hn=i(ve);ca=n(hn,"audio classification"),hn.forEach(t),ma=n(Y," model on the Model Hub for emotion recognition and load it in the "),Oe=l(Y,"A",{href:!0});var cn=i(Oe);ua=n(cn,"pipeline()"),cn.forEach(t),da=n(Y,":"),Y.forEach(t),Ms=c(e),g(_e.$$.fragment,e),Ss=c(e),V=l(e,"P",{});var Qs=i(V);ga=n(Qs,"Pass the audio file to the "),Re=l(Qs,"A",{href:!0});var mn=i(Re);va=n(mn,"pipeline()"),mn.forEach(t),_a=n(Qs,":"),Qs.forEach(t),Fs=c(e),g(ke.$$.fragment,e),zs=c(e),N=l(e,"H2",{class:!0});var Xs=i(N);B=l(Xs,"A",{id:!0,class:!0,href:!0});var un=i(B);rs=l(un,"SPAN",{});var dn=i(rs);g($e.$$.fragment,dn),dn.forEach(t),un.forEach(t),ka=c(Xs),ls=l(Xs,"SPAN",{});var gn=i(ls);$a=n(gn,"Vision pipeline"),gn.forEach(t),Xs.forEach(t),Cs=c(e),G=l(e,"P",{});var Ys=i(G);xa=n(Ys,"Finally, using a "),Ue=l(Ys,"A",{href:!0});var vn=i(Ue);ya=n(vn,"pipeline()"),vn.forEach(t),wa=n(Ys," for vision tasks is practically identical."),Ys.forEach(t),Ls=c(e),We=l(e,"P",{});var _n=i(We);ja=n(_n,"Specify your vision task and pass your image to the classifier. The imaage can be a link or a local path to the image. For example, what species of cat is shown below?"),_n.forEach(t),Ds=c(e),Je=l(e,"P",{});var kn=i(Je);Ke=l(kn,"IMG",{src:!0,alt:!0}),kn.forEach(t),Ns=c(e),g(xe.$$.fragment,e),this.h()},h(){f(m,"name","hf:doc:metadata"),f(m,"content",JSON.stringify(Pn)),f(x,"id","pipelines-for-inference"),f(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(x,"href","#pipelines-for-inference"),f(u,"class","relative group"),f(we,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Z,"href","https://huggingface.co/models"),f(Z,"rel","nofollow"),f(je,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Ee,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(be,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(O,"id","pipeline-usage"),f(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(O,"href","#pipeline-usage"),f(C,"class","relative group"),f(Ae,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Pe,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Te,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Me,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Se,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(re,"start","2"),f(ze,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Ce,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),f(R,"id","choose-a-model-and-tokenizer"),f(R,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(R,"href","#choose-a-model-and-tokenizer"),f(L,"class","relative group"),f(Le,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(he,"href","https://huggingface.co/models"),f(he,"rel","nofollow"),f(De,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForCausalLM"),f(Ne,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(He,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(J,"id","audio-pipeline"),f(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(J,"href","#audio-pipeline"),f(D,"class","relative group"),f(Ie,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(ge,"href","https://en.wikipedia.org/wiki/We_choose_to_go_to_the_Moon"),f(ge,"rel","nofollow"),f(ve,"href","https://huggingface.co/models?pipeline_tag=audio-classification"),f(ve,"rel","nofollow"),f(Oe,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(Re,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),f(B,"id","vision-pipeline"),f(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(B,"href","#vision-pipeline"),f(N,"class","relative group"),f(Ue,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),jn(Ke.src,Ea="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg")||f(Ke,"src",Ea),f(Ke,"alt","pipeline-cat-chonk")},m(e,o){s(document.head,m),p(e,P,o),p(e,u,o),s(u,x),s(x,z),v(j,z,null),s(u,T),s(u,H),s(H,Zs),p(e,is,o),p(e,E,o),s(E,et),s(E,we),s(we,st),s(E,tt),s(E,Z),s(Z,at),s(E,nt),s(E,je),s(je,rt),s(E,lt),p(e,ps,o),p(e,q,o),s(q,ee),s(ee,ot),s(ee,Ee),s(Ee,it),s(ee,pt),s(q,ft),s(q,Ge),s(Ge,ht),s(q,ct),s(q,se),s(se,mt),s(se,be),s(be,ut),s(se,dt),p(e,fs,o),v(I,e,o),p(e,hs,o),p(e,C,o),s(C,O),s(O,Qe),v(te,Qe,null),s(C,gt),s(C,Xe),s(Xe,vt),p(e,cs,o),p(e,b,o),s(b,_t),s(b,Ae),s(Ae,kt),s(b,$t),s(b,Pe),s(Pe,xt),s(b,yt),s(b,Te),s(Te,wt),s(b,jt),p(e,ms,o),p(e,qe,o),s(qe,ae),s(ae,Et),s(ae,Me),s(Me,bt),s(ae,At),p(e,us,o),v(ne,e,o),p(e,ds,o),p(e,re,o),s(re,le),s(le,Pt),s(le,Se),s(Se,Tt),s(le,qt),p(e,gs,o),v(oe,e,o),p(e,vs,o),p(e,Fe,o),s(Fe,Mt),p(e,_s,o),v(ie,e,o),p(e,ks,o),p(e,y,o),s(y,St),s(y,ze),s(ze,Ft),s(y,zt),s(y,Ye),s(Ye,Ct),s(y,Lt),s(y,Ce),s(Ce,Dt),s(y,Nt),s(y,Ze),s(Ze,Ht),s(y,It),p(e,$s,o),v(pe,e,o),p(e,xs,o),p(e,L,o),s(L,R),s(R,es),v(fe,es,null),s(L,Ot),s(L,ss),s(ss,Rt),p(e,ys,o),p(e,w,o),s(w,Ut),s(w,Le),s(Le,Wt),s(w,Jt),s(w,he),s(he,Kt),s(w,Vt),s(w,ts),s(ts,Bt),s(w,Gt),s(w,De),s(De,Qt),s(w,Xt),p(e,ws,o),v(ce,e,o),p(e,js,o),p(e,U,o),s(U,Yt),s(U,Ne),s(Ne,Zt),s(U,ea),p(e,Es,o),v(me,e,o),p(e,bs,o),p(e,W,o),s(W,sa),s(W,He),s(He,ta),s(W,aa),p(e,As,o),v(ue,e,o),p(e,Ps,o),p(e,D,o),s(D,J),s(J,as),v(de,as,null),s(D,na),s(D,ns),s(ns,ra),p(e,Ts,o),p(e,K,o),s(K,la),s(K,Ie),s(Ie,oa),s(K,ia),p(e,qs,o),p(e,A,o),s(A,pa),s(A,ge),s(ge,fa),s(A,ha),s(A,ve),s(ve,ca),s(A,ma),s(A,Oe),s(Oe,ua),s(A,da),p(e,Ms,o),v(_e,e,o),p(e,Ss,o),p(e,V,o),s(V,ga),s(V,Re),s(Re,va),s(V,_a),p(e,Fs,o),v(ke,e,o),p(e,zs,o),p(e,N,o),s(N,B),s(B,rs),v($e,rs,null),s(N,ka),s(N,ls),s(ls,$a),p(e,Cs,o),p(e,G,o),s(G,xa),s(G,Ue),s(Ue,ya),s(G,wa),p(e,Ls,o),p(e,We,o),s(We,ja),p(e,Ds,o),p(e,Je,o),s(Je,Ke),p(e,Ns,o),v(xe,e,o),Hs=!0},p(e,[o]){const ye={};o&2&&(ye.$$scope={dirty:o,ctx:e}),I.$set(ye)},i(e){Hs||(_(j.$$.fragment,e),_(I.$$.fragment,e),_(te.$$.fragment,e),_(ne.$$.fragment,e),_(oe.$$.fragment,e),_(ie.$$.fragment,e),_(pe.$$.fragment,e),_(fe.$$.fragment,e),_(ce.$$.fragment,e),_(me.$$.fragment,e),_(ue.$$.fragment,e),_(de.$$.fragment,e),_(_e.$$.fragment,e),_(ke.$$.fragment,e),_($e.$$.fragment,e),_(xe.$$.fragment,e),Hs=!0)},o(e){k(j.$$.fragment,e),k(I.$$.fragment,e),k(te.$$.fragment,e),k(ne.$$.fragment,e),k(oe.$$.fragment,e),k(ie.$$.fragment,e),k(pe.$$.fragment,e),k(fe.$$.fragment,e),k(ce.$$.fragment,e),k(me.$$.fragment,e),k(ue.$$.fragment,e),k(de.$$.fragment,e),k(_e.$$.fragment,e),k(ke.$$.fragment,e),k($e.$$.fragment,e),k(xe.$$.fragment,e),Hs=!1},d(e){t(m),e&&t(P),e&&t(u),$(j),e&&t(is),e&&t(E),e&&t(ps),e&&t(q),e&&t(fs),$(I,e),e&&t(hs),e&&t(C),$(te),e&&t(cs),e&&t(b),e&&t(ms),e&&t(qe),e&&t(us),$(ne,e),e&&t(ds),e&&t(re),e&&t(gs),$(oe,e),e&&t(vs),e&&t(Fe),e&&t(_s),$(ie,e),e&&t(ks),e&&t(y),e&&t($s),$(pe,e),e&&t(xs),e&&t(L),$(fe),e&&t(ys),e&&t(w),e&&t(ws),$(ce,e),e&&t(js),e&&t(U),e&&t(Es),$(me,e),e&&t(bs),e&&t(W),e&&t(As),$(ue,e),e&&t(Ps),e&&t(D),$(de),e&&t(Ts),e&&t(K),e&&t(qs),e&&t(A),e&&t(Ms),$(_e,e),e&&t(Ss),e&&t(V),e&&t(Fs),$(ke,e),e&&t(zs),e&&t(N),$($e),e&&t(Cs),e&&t(G),e&&t(Ls),e&&t(We),e&&t(Ds),e&&t(Je),e&&t(Ns),$(xe,e)}}}const Pn={local:"pipelines-for-inference",sections:[{local:"pipeline-usage",sections:[{local:"choose-a-model-and-tokenizer",title:"Choose a model and tokenizer"}],title:"Pipeline usage"},{local:"audio-pipeline",title:"Audio pipeline"},{local:"vision-pipeline",title:"Vision pipeline"}],title:"Pipelines for inference"};function Tn(Be,m,P){let{fw:u}=m;return Be.$$set=x=>{"fw"in x&&P(0,u=x.fw)},[u]}class Cn extends $n{constructor(m){super();xn(this,m,Tn,An,yn,{fw:0})}}export{Cn as default,Pn as metadata};
248
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/custom_models.mdx-12924744.js
import{S as Hi,i as Bi,s as Si,e as l,k as c,w as p,t as o,M as Wi,c as r,d as s,m as d,a as i,x as _,h as n,b as h,F as t,g as f,y as g,q as y,o as w,B as b}from"../chunks/vendor-4833417e.js";import{T as ar}from"../chunks/Tip-fffd6df1.js";import{I as Nt}from"../chunks/IconCopyLink-4b81c553.js";import{C as E}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function Li(z){let u,k;return{c(){u=l("p"),k=o("If your model is very similar to a model inside the library, you can re-use the same configuration as this model.")},l(m){u=r(m,"P",{});var v=i(u);k=n(v,"If your model is very similar to a model inside the library, you can re-use the same configuration as this model."),v.forEach(s)},m(m,v){f(m,u,v),t(u,k)},d(m){m&&s(u)}}}function Yi(z){let u,k;return{c(){u=l("p"),k=o("This API is experimental and may have some slight breaking changes in the next releases.")},l(m){u=r(m,"P",{});var v=i(u);k=n(v,"This API is experimental and may have some slight breaking changes in the next releases."),v.forEach(s)},m(m,v){f(m,u,v),t(u,k)},d(m){m&&s(u)}}}function zi(z){let u,k,m,v,G;return{c(){u=l("p"),k=o(`If copying a modeling files from the library, you will need to replace all the relative imports at the top of the file to import from the `),m=l("code"),v=o("transformers"),G=o(" package.")},l(P){u=r(P,"P",{});var I=i(u);k=n(I,`If copying a modeling files from the library, you will need to replace all the relative imports at the top of the file to import from the `),m=r(I,"CODE",{});var ee=i(m);v=n(ee,"transformers"),ee.forEach(s),G=n(I," package."),I.forEach(s)},m(P,I){f(P,u,I),t(u,k),t(u,m),t(m,v),t(u,G)},d(P){P&&s(u)}}}function Gi(z){let u,k,m,v,G,P,I,ee,sn,qs,Ke,on,Is,Ue,nn,Ns,N,an,be,ln,rn,Ve,fn,cn,xs,K,te,xt,ve,dn,Ot,hn,Os,se,un,Dt,mn,pn,Ds,Je,_n,Fs,$e,Hs,Qe,gn,Bs,x,ke,yn,Ft,wn,bn,vn,U,$n,Ht,kn,En,Bt,jn,Cn,Pn,V,An,St,Tn,Mn,Wt,Rn,qn,Ss,O,In,Lt,Nn,xn,Yt,On,Dn,Ws,D,Fn,zt,Hn,Bn,Gt,Sn,Wn,Ls,Xe,Ln,Ys,Ee,zs,A,Yn,Kt,zn,Gn,Ut,Kn,Un,Vt,Vn,Jn,Gs,je,Ks,F,Qn,Ze,Xn,Zn,et,ea,ta,Us,J,oe,Jt,Ce,sa,Qt,oa,Vs,H,na,tt,aa,la,Xt,ra,ia,Js,ne,fa,Zt,ca,da,Qs,Pe,Xs,st,ha,Zs,Ae,eo,C,ua,es,ma,pa,ts,_a,ga,ss,ya,wa,os,ba,va,to,ae,so,B,$a,ns,ka,Ea,ot,ja,Ca,oo,nt,Pa,no,Te,ao,T,Aa,at,Ta,Ma,lt,Ra,qa,rt,Ia,Na,lo,it,xa,ro,Me,io,S,Oa,ft,Da,Fa,ct,Ha,Ba,fo,Q,le,as,Re,Sa,ls,Wa,co,re,ho,$,La,rs,Ya,za,is,Ga,Ka,fs,Ua,Va,cs,Ja,Qa,ds,Xa,Za,hs,el,tl,us,sl,ol,uo,qe,mo,W,nl,ms,al,ll,ps,rl,il,po,ie,_o,dt,fl,go,ht,cl,yo,Ie,wo,fe,dl,_s,hl,ul,bo,Ne,vo,ce,ml,ut,pl,_l,$o,mt,gl,ko,xe,Eo,pt,yl,jo,Oe,Co,_t,wl,Po,De,Ao,gt,bl,To,Fe,Mo,M,vl,gs,$l,kl,ys,El,jl,He,Cl,Pl,Ro,de,Al,yt,Tl,Ml,qo,X,he,ws,Be,Rl,bs,ql,Io,R,Il,vs,Nl,xl,Se,Ol,Dl,$s,Fl,Hl,No,We,xo,ue,Bl,ks,Sl,Wl,Oo,Le,Do,wt,Ll,Fo,Z,me,Es,Ye,Yl,js,zl,Ho,bt,Gl,Bo,L,Kl,Cs,Ul,Vl,Ps,Jl,Ql,So,ze,Wo,q,Xl,vt,Zl,er,As,tr,sr,Ts,or,nr,Lo;return P=new Nt({}),ve=new Nt({}),$e=new E({props:{code:`from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: List[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"\`block\` must be 'basic' or bottleneck', got {block}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"\`stem_type\` must be '', 'deep' or 'deep-tiered', got {block}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PretrainedConfig <span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">List</span> <span class="hljs-keyword">class</span> <span class="hljs-title class_">ResnetConfig</span>(<span class="hljs-title class_ inherited__">PretrainedConfig</span>): model_type = <span class="hljs-string">&quot;resnet&quot;</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__init__</span>(<span class="hljs-params"> self, block_type=<span class="hljs-string">&quot;bottleneck&quot;</span>, layers: <span class="hljs-type">List</span>[<span class="hljs-built_in">int</span>] = [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">3</span>], num_classes: <span class="hljs-built_in">int</span> = <span class="hljs-number">1000</span>, input_channels: <span class="hljs-built_in">int</span> = <span class="hljs-number">3</span>, cardinality: <span class="hljs-built_in">int</span> = <span class="hljs-number">1</span>, base_width: <span class="hljs-built_in">int</span> = <span class="hljs-number">64</span>, stem_width: <span class="hljs-built_in">int</span> = <span class="hljs-number">64</span>, stem_type: <span class="hljs-built_in">str</span> = <span class="hljs-string">&quot;&quot;</span>, avg_down: <span class="hljs-built_in">bool</span> = <span class="hljs-literal">False</span>, **kwargs, </span>): <span class="hljs-keyword">if</span> block_type <span class="hljs-keyword">not</span> <span class="hljs-keyword">in</span> [<span class="hljs-string">&quot;basic&quot;</span>, <span class="hljs-string">&quot;bottleneck&quot;</span>]: <span class="hljs-keyword">raise</span> ValueError(<span class="hljs-string">f&quot;\`block\` must be &#x27;basic&#x27; or bottleneck&#x27;, got <span class="hljs-subst">{block}</span>.&quot;</span>) <span class="hljs-keyword">if</span> stem_type <span class="hljs-keyword">not</span> <span class="hljs-keyword">in</span> [<span class="hljs-string">&quot;&quot;</span>, <span class="hljs-string">&quot;deep&quot;</span>, <span class="hljs-string">&quot;deep-tiered&quot;</span>]: <span class="hljs-keyword">raise</span> ValueError(<span class="hljs-string">f&quot;\`stem_type\` must be &#x27;&#x27;, &#x27;deep&#x27; or &#x27;deep-tiered&#x27;, got <span class="hljs-subst">{block}</span>.&quot;</span>) self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down <span class="hljs-built_in">super</span>().__init__(**kwargs)`}}),Ee=new E({props:{code:`resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet")`,highlighted:`resnet50d_config = ResnetConfig(block_type=<span class="hljs-string">&quot;bottleneck&quot;</span>, stem_width=<span class="hljs-number">32</span>, stem_type=<span class="hljs-string">&quot;deep&quot;</span>, avg_down=<span class="hljs-literal">True</span>) resnet50d_config.save_pretrained(<span class="hljs-string">&quot;custom-resnet&quot;</span>)`}}),je=new E({props:{code:'resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")',highlighted:'resnet50d_config = ResnetConfig.from_pretrained(<span class="hljs-string">&quot;custom-resnet&quot;</span>)'}}),Ce=new Nt({}),Pe=new E({props:{code:`from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedModel <span class="hljs-keyword">from</span> timm.models.resnet <span class="hljs-keyword">import</span> BasicBlock, Bottleneck, ResNet <span class="hljs-keyword">from</span> .configuration_resnet <span class="hljs-keyword">import</span> ResnetConfig BLOCK_MAPPING = {<span class="hljs-string">&quot;basic&quot;</span>: BasicBlock, <span class="hljs-string">&quot;bottleneck&quot;</span>: Bottleneck} <span class="hljs-keyword">class</span> <span class="hljs-title class_">ResnetModel</span>(<span class="hljs-title class_ inherited__">PreTrainedModel</span>): config_class = ResnetConfig <span class="hljs-keyword">def</span> <span class="hljs-title function_">__init__</span>(<span class="hljs-params">self, config</span>): <span class="hljs-built_in">super</span>().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, tensor</span>): <span class="hljs-keyword">return</span> self.model.forward_features(tensor)`}}),Ae=new E({props:{code:`class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits}`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">ResnetModelForImageClassification</span>(<span class="hljs-title class_ inherited__">PreTrainedModel</span>): config_class = ResnetConfig <span class="hljs-keyword">def</span> <span class="hljs-title function_">__init__</span>(<span class="hljs-params">self, config</span>): <span class="hljs-built_in">super</span>().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, tensor, labels=<span class="hljs-literal">None</span></span>): logits = self.model(tensor) <span class="hljs-keyword">if</span> labels <span class="hljs-keyword">is</span> <span class="hljs-keyword">not</span> <span class="hljs-literal">None</span>: loss = torch.nn.cross_entropy(logits, labels) <span class="hljs-keyword">return</span> {<span class="hljs-string">&quot;loss&quot;</span>: loss, <span class="hljs-string">&quot;logits&quot;</span>: logits} <span class="hljs-keyword">return</span> {<span class="hljs-string">&quot;logits&quot;</span>: logits}`}}),ae=new ar({props:{$$slots:{default:[Li]},$$scope:{ctx:z}}}),Te=new E({props:{code:"resnet50d = ResnetModelForImageClassification(resnet50d_config)",highlighted:"resnet50d = ResnetModelForImageClassification(resnet50d_config)"}}),Me=new E({props:{code:`import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict())`,highlighted:`<span class="hljs-keyword">import</span> timm pretrained_model = timm.create_model(<span class="hljs-string">&quot;resnet50d&quot;</span>, pretrained=<span class="hljs-literal">True</span>) resnet50d.model.load_state_dict(pretrained_model.state_dict())`}}),Re=new Nt({}),re=new ar({props:{warning:"&lcub;true}",$$slots:{default:[Yi]},$$scope:{ctx:z}}}),qe=new E({props:{code:`. \u2514\u2500\u2500 resnet_model \u251C\u2500\u2500 __init__.py \u251C\u2500\u2500 configuration_resnet.py \u2514\u2500\u2500 modeling_resnet.py`,highlighted:`. \u2514\u2500\u2500 resnet_model \u251C\u2500\u2500 __init__.<span class="hljs-keyword">py</span> \u251C\u2500\u2500 configuration_resnet.<span class="hljs-keyword">py</span> \u2514\u2500\u2500 modeling_resnet.<span class="hljs-keyword">py</span>`}}),ie=new ar({props:{warning:"&lcub;true}",$$slots:{default:[zi]},$$scope:{ctx:z}}}),Ie=new E({props:{code:`from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification`,highlighted:`<span class="hljs-keyword">from</span> resnet_model.configuration_resnet <span class="hljs-keyword">import</span> ResnetConfig <span class="hljs-keyword">from</span> resnet_model.modeling_resnet <span class="hljs-keyword">import</span> ResnetModel, ResnetModelForImageClassification`}}),Ne=new E({props:{code:`ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification")`,highlighted:`ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class(<span class="hljs-string">&quot;AutoModel&quot;</span>) ResnetModelForImageClassification.register_for_auto_class(<span class="hljs-string">&quot;AutoModelForImageClassification&quot;</span>)`}}),xe=new E({props:{code:`resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict())`,highlighted:`resnet50d_config = ResnetConfig(block_type=<span class="hljs-string">&quot;bottleneck&quot;</span>, stem_width=<span class="hljs-number">32</span>, stem_type=<span class="hljs-string">&quot;deep&quot;</span>, avg_down=<span class="hljs-literal">True</span>) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model(<span class="hljs-string">&quot;resnet50d&quot;</span>, pretrained=<span class="hljs-literal">True</span>) resnet50d.model.load_state_dict(pretrained_model.state_dict())`}}),Oe=new E({props:{code:"huggingface-cli login",highlighted:"huggingface-cli login"}}),De=new E({props:{code:`from huggingface_hub import notebook_login notebook_login()`,highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> notebook_login notebook_login()`}}),Fe=new E({props:{code:'resnet50d.push_to_hub("custom-resnet50d")',highlighted:'resnet50d.push_to_hub(<span class="hljs-string">&quot;custom-resnet50d&quot;</span>)'}}),Be=new Nt({}),We=new E({props:{code:`from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained(<span class="hljs-string">&quot;sgugger/custom-resnet50d&quot;</span>, trust_remote_code=<span class="hljs-literal">True</span>)`}}),Le=new E({props:{code:`commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash )`,highlighted:`commit_hash = <span class="hljs-string">&quot;ed94a7c6247d8aedce4647f00f20de6875b5b292&quot;</span> model = AutoModelForImageClassification.from_pretrained( <span class="hljs-string">&quot;sgugger/custom-resnet50d&quot;</span>, trust_remote_code=<span class="hljs-literal">True</span>, revision=commit_hash )`}}),Ye=new Nt({}),ze=new E({props:{code:`from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register(<span class="hljs-string">&quot;resnet&quot;</span>, ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)`}}),{c(){u=l("meta"),k=c(),m=l("h1"),v=l("a"),G=l("span"),p(P.$$.fragment),I=c(),ee=l("span"),sn=o("Sharing custom models"),qs=c(),Ke=l("p"),on=o(`The \u{1F917} Transformers library is designed to be easily extensible. Every model is fully coded in a given subfolder of the repository with no abstraction, so you can easily copy a modeling file and tweak it to your needs.`),Is=c(),Ue=l("p"),nn=o(`If you are writing a brand new model, it might be easier to start from scratch. In this tutorial, we will show you how to write a custom model and its configuration so it can be used inside Transformers, and how you can share it with the community (with the code it relies on) so that anyone can use it, even if it\u2019s not present in the \u{1F917} Transformers library.`),Ns=c(),N=l("p"),an=o(`We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the `),be=l("a"),ln=o("timm library"),rn=o(" into a "),Ve=l("a"),fn=o("PreTrainedModel"),cn=o("."),xs=c(),K=l("h2"),te=l("a"),xt=l("span"),p(ve.$$.fragment),dn=c(),Ot=l("span"),hn=o("Writing a custom configuration"),Os=c(),se=l("p"),un=o(`Before we dive into the model, let\u2019s first write its configuration. The configuration of a model is an object that will contain all the necessary information to build the model. As we will see in the next section, the model can only take a `),Dt=l("code"),mn=o("config"),pn=o(" to be initialized, so we really need that object to be as complete as possible."),Ds=c(),Je=l("p"),_n=o(`In our example, we will take a couple of arguments of the ResNet class that we might want to tweak. Different configurations will then give us the different types of ResNets that are possible. We then just store those arguments, after checking the validity of a few of them.`),Fs=c(),p($e.$$.fragment),Hs=c(),Qe=l("p"),gn=o("The three important things to remember when writing you own configuration are the following:"),Bs=c(),x=l("ul"),ke=l("li"),yn=o("you have to inherit from "),Ft=l("code"),wn=o("PretrainedConfig"),bn=o(","),vn=c(),U=l("li"),$n=o("the "),Ht=l("code"),kn=o("__init__"),En=o(" of your "),Bt=l("code"),jn=o("PretrainedConfig"),Cn=o(" must accept any kwargs,"),Pn=c(),V=l("li"),An=o("those "),St=l("code"),Tn=o("kwargs"),Mn=o(" need to be passed to the superclass "),Wt=l("code"),Rn=o("__init__"),qn=o("."),Ss=c(),O=l("p"),In=o(`The inheritance is to make sure you get all the functionality from the \u{1F917} Transformers library, while the two other constraints come from the fact a `),Lt=l("code"),Nn=o("PretrainedConfig"),xn=o(` has more fields than the ones you are setting. When reloading a config with the `),Yt=l("code"),On=o("from_pretrained"),Dn=o(` method, those fields need to be accepted by your config and then sent to the superclass.`),Ws=c(),D=l("p"),Fn=o("Defining a "),zt=l("code"),Hn=o("model_type"),Bn=o(" for your configuration (here "),Gt=l("code"),Sn=o('model_type="resnet"'),Wn=o(`) is not mandatory, unless you want to register your model with the auto classes (see last section).`),Ls=c(),Xe=l("p"),Ln=o(`With this done, you can easily create and save your configuration like you would do with any other model config of the library. Here is how we can create a resnet50d config and save it:`),Ys=c(),p(Ee.$$.fragment),zs=c(),A=l("p"),Yn=o("This will save a file named "),Kt=l("code"),zn=o("config.json"),Gn=o(" inside the folder "),Ut=l("code"),Kn=o("custom-resnet"),Un=o(`. You can then reload your config with the `),Vt=l("code"),Vn=o("from_pretrained"),Jn=o(" method:"),Gs=c(),p(je.$$.fragment),Ks=c(),F=l("p"),Qn=o("You can also use any other method of the "),Ze=l("a"),Xn=o("PretrainedConfig"),Zn=o(" class, like "),et=l("a"),ea=o("push_to_hub()"),ta=o(` to directly upload your config to the Hub.`),Us=c(),J=l("h2"),oe=l("a"),Jt=l("span"),p(Ce.$$.fragment),sa=c(),Qt=l("span"),oa=o("Writing a custom model"),Vs=c(),H=l("p"),na=o(`Now that we have our ResNet configuration, we can go on writing the model. We will actually write two: one that extracts the hidden features from a batch of images (like `),tt=l("a"),aa=o("BertModel"),la=o(`) and one that is suitable for image classification (like `),Xt=l("code"),ra=o("BertModelForSequenceClassification"),ia=o(")."),Js=c(),ne=l("p"),fa=o(`As we mentioned before, we\u2019ll only write a loose wrapper of the model to keep it simple for this example. The only thing we need to do before writing this class is a map between the block types and actual block classes. Then the model is defined from the configuration by passing everything to the `),Zt=l("code"),ca=o("ResNet"),da=o(" class:"),Qs=c(),p(Pe.$$.fragment),Xs=c(),st=l("p"),ha=o("For the model that will classify images, we just change the forward method:"),Zs=c(),p(Ae.$$.fragment),eo=c(),C=l("p"),ua=o("In both cases, notice how we inherit from "),es=l("code"),ma=o("PreTrainedModel"),pa=o(" and call the superclass initialization with the "),ts=l("code"),_a=o("config"),ga=o(` (a bit like when you write a regular `),ss=l("code"),ya=o("torch.nn.Module"),wa=o("). The line that sets the "),os=l("code"),ba=o("config_class"),va=o(` is not mandatory, unless you want to register your model with the auto classes (see last section).`),to=c(),p(ae.$$.fragment),so=c(),B=l("p"),$a=o(`You can have your model return anything you want, but returning a dictionary like we did for `),ns=l("code"),ka=o("ResnetModelForImageClassification"),Ea=o(`, with the loss included when labels are passed, will make your model directly usable inside the `),ot=l("a"),ja=o("Trainer"),Ca=o(` class. Using another output format is fine as long as you are planning on using your own training loop or another library for training.`),oo=c(),nt=l("p"),Pa=o("Now that we have our model class, let\u2019s create one:"),no=c(),p(Te.$$.fragment),ao=c(),T=l("p"),Aa=o("Again, you can use any of the methods of "),at=l("a"),Ta=o("PreTrainedModel"),Ma=o(", like "),lt=l("a"),Ra=o("save_pretrained()"),qa=o(` or `),rt=l("a"),Ia=o("push_to_hub()"),Na=o(`. We will use the second in the next section, and see how to push the model weights with the code of our model. But first, let\u2019s load some pretrained weights inside our model.`),lo=c(),it=l("p"),xa=o(`In your own use case, you will probably be training your custom model on your own data. To go fast for this tutorial, we will use the pretrained version of the resnet50d. Since our model is just a wrapper around it, it\u2019s going to be easy to transfer those weights:`),ro=c(),p(Me.$$.fragment),io=c(),S=l("p"),Oa=o("Now let\u2019s see how to make sure that when we do "),ft=l("a"),Da=o("save_pretrained()"),Fa=o(" or "),ct=l("a"),Ha=o("push_to_hub()"),Ba=o(`, the code of the model is saved.`),fo=c(),Q=l("h2"),le=l("a"),as=l("span"),p(Re.$$.fragment),Sa=c(),ls=l("span"),Wa=o("Sending the code to the Hub"),co=c(),p(re.$$.fragment),ho=c(),$=l("p"),La=o("First, make sure your model is fully defined in a "),rs=l("code"),Ya=o(".py"),za=o(` file. It can rely on relative imports to some other files as long as all the files are in the same directory (we don\u2019t support submodules for this feature yet). For our example, we\u2019ll define a `),is=l("code"),Ga=o("modeling_resnet.py"),Ka=o(" file and a "),fs=l("code"),Ua=o("configuration_resnet.py"),Va=o(` file in a folder of the current working directory named `),cs=l("code"),Ja=o("resnet_model"),Qa=o(". The configuration file contains the code for "),ds=l("code"),Xa=o("ResnetConfig"),Za=o(` and the modeling file contains the code of `),hs=l("code"),el=o("ResnetModel"),tl=o(" and "),us=l("code"),sl=o("ResnetModelForImageClassification"),ol=o("."),uo=c(),p(qe.$$.fragment),mo=c(),W=l("p"),nl=o("The "),ms=l("code"),al=o("__init__.py"),ll=o(" can be empty, it\u2019s just there so that Python detects "),ps=l("code"),rl=o("resnet_model"),il=o(" can be use as a module."),po=c(),p(ie.$$.fragment),_o=c(),dt=l("p"),fl=o("Note that you can re-use (or subclass) an existing configuration/model."),go=c(),ht=l("p"),cl=o(`To share your model with the community, follow those steps: first import the ResNet model and config from the newly created files:`),yo=c(),p(Ie.$$.fragment),wo=c(),fe=l("p"),dl=o("Then you have to tell the library you want to copy the code files of those objects when using the "),_s=l("code"),hl=o("save_pretrained"),ul=o(` method and properly register them with a given Auto class (especially for models), just run:`),bo=c(),p(Ne.$$.fragment),vo=c(),ce=l("p"),ml=o(`Note that there is no need to specify an auto class for the configuration (there is only one auto class for them, `),ut=l("a"),pl=o("AutoConfig"),_l=o(`) but it\u2019s different for models. Your custom model could be suitable for many different tasks, so you have to specify which one of the auto classes is the correct one for your model.`),$o=c(),mt=l("p"),gl=o("Next, let\u2019s create the config and models as we did before:"),ko=c(),p(xe.$$.fragment),Eo=c(),pt=l("p"),yl=o("Now to send the model to the Hub, make sure you are logged in. Either run in your terminal:"),jo=c(),p(Oe.$$.fragment),Co=c(),_t=l("p"),wl=o("or from a notebook:"),Po=c(),p(De.$$.fragment),Ao=c(),gt=l("p"),bl=o("You can then push to to your own namespace (or an organization you are a member of) like this:"),To=c(),p(Fe.$$.fragment),Mo=c(),M=l("p"),vl=o(`On top of the modeling weights and the configuration in json format, this also copied the modeling and configuration `),gs=l("code"),$l=o(".py"),kl=o(" files in the folder "),ys=l("code"),El=o("custom-resnet50d"),jl=o(` and uploaded the result to the Hub. You can check the result in this `),He=l("a"),Cl=o("model repo"),Pl=o("."),Ro=c(),de=l("p"),Al=o("See the "),yt=l("a"),Tl=o("sharing tutorial"),Ml=o(" for more information on the push to Hub method."),qo=c(),X=l("h2"),he=l("a"),ws=l("span"),p(Be.$$.fragment),Rl=c(),bs=l("span"),ql=o("Using a model with custom code"),Io=c(),R=l("p"),Il=o(`You can use any configuration, model or tokenizer with custom code files in its repository with the auto-classes and the `),vs=l("code"),Nl=o("from_pretrained"),xl=o(" method. All files and code uploaded to the Hub are scanned for malware (refer to the "),Se=l("a"),Ol=o("Hub security"),Dl=o(` documentation for more information), but you should still review the model code and author to avoid executing malicious code on your machine. Set `),$s=l("code"),Fl=o("trust_remote_code=True"),Hl=o(` to use a model with custom code:`),No=c(),p(We.$$.fragment),xo=c(),ue=l("p"),Bl=o("It is also strongly encouraged to pass a commit hash as a "),ks=l("code"),Sl=o("revision"),Wl=o(` to make sure the author of the models did not update the code with some malicious new lines (unless you fully trust the authors of the models).`),Oo=c(),p(Le.$$.fragment),Do=c(),wt=l("p"),Ll=o(`Note that when browsing the commit history of the model repo on the Hub, there is a button to easily copy the commit hash of any commit.`),Fo=c(),Z=l("h2"),me=l("a"),Es=l("span"),p(Ye.$$.fragment),Yl=c(),js=l("span"),zl=o("Registering a model with custom code to the auto classes"),Ho=c(),bt=l("p"),Gl=o(`If you are writing a library that extends \u{1F917} Transformers, you may want to extend the auto classes to include your own model. This is different from pushing the code to the Hub in the sense that users will need to import your library to get the custom models (contrarily to automatically downloading the model code from the Hub).`),Bo=c(),L=l("p"),Kl=o("As long as your config has a "),Cs=l("code"),Ul=o("model_type"),Vl=o(` attribute that is different from existing model types, and that your model classes have the right `),Ps=l("code"),Jl=o("config_class"),Ql=o(" attributes, you can just add them to the auto classes likes this:"),So=c(),p(ze.$$.fragment),Wo=c(),q=l("p"),Xl=o("Note that the first argument used when registering your custom config to "),vt=l("a"),Zl=o("AutoConfig"),er=o(" needs to match the "),As=l("code"),tr=o("model_type"),sr=o(` of your custom config, and the first argument used when registering your custom models to any auto model class needs to match the `),Ts=l("code"),or=o("config_class"),nr=o(" of those models."),this.h()},l(e){const a=Wi('[data-svelte="svelte-1phssyn"]',document.head);u=r(a,"META",{name:!0,content:!0}),a.forEach(s),k=d(e),m=r(e,"H1",{class:!0});var Ge=i(m);v=r(Ge,"A",{id:!0,class:!0,href:!0});var Ms=i(v);G=r(Ms,"SPAN",{});var Rs=i(G);_(P.$$.fragment,Rs),Rs.forEach(s),Ms.forEach(s),I=d(Ge),ee=r(Ge,"SPAN",{});var lr=i(ee);sn=n(lr,"Sharing custom models"),lr.forEach(s),Ge.forEach(s),qs=d(e),Ke=r(e,"P",{});var rr=i(Ke);on=n(rr,`The \u{1F917} Transformers library is designed to be easily extensible. Every model is fully coded in a given subfolder of the repository with no abstraction, so you can easily copy a modeling file and tweak it to your needs.`),rr.forEach(s),Is=d(e),Ue=r(e,"P",{});var ir=i(Ue);nn=n(ir,`If you are writing a brand new model, it might be easier to start from scratch. In this tutorial, we will show you how to write a custom model and its configuration so it can be used inside Transformers, and how you can share it with the community (with the code it relies on) so that anyone can use it, even if it\u2019s not present in the \u{1F917} Transformers library.`),ir.forEach(s),Ns=d(e),N=r(e,"P",{});var $t=i(N);an=n($t,`We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the `),be=r($t,"A",{href:!0,rel:!0});var fr=i(be);ln=n(fr,"timm library"),fr.forEach(s),rn=n($t," into a "),Ve=r($t,"A",{href:!0});var cr=i(Ve);fn=n(cr,"PreTrainedModel"),cr.forEach(s),cn=n($t,"."),$t.forEach(s),xs=d(e),K=r(e,"H2",{class:!0});var Yo=i(K);te=r(Yo,"A",{id:!0,class:!0,href:!0});var dr=i(te);xt=r(dr,"SPAN",{});var hr=i(xt);_(ve.$$.fragment,hr),hr.forEach(s),dr.forEach(s),dn=d(Yo),Ot=r(Yo,"SPAN",{});var ur=i(Ot);hn=n(ur,"Writing a custom configuration"),ur.forEach(s),Yo.forEach(s),Os=d(e),se=r(e,"P",{});var zo=i(se);un=n(zo,`Before we dive into the model, let\u2019s first write its configuration. The configuration of a model is an object that will contain all the necessary information to build the model. As we will see in the next section, the model can only take a `),Dt=r(zo,"CODE",{});var mr=i(Dt);mn=n(mr,"config"),mr.forEach(s),pn=n(zo," to be initialized, so we really need that object to be as complete as possible."),zo.forEach(s),Ds=d(e),Je=r(e,"P",{});var pr=i(Je);_n=n(pr,`In our example, we will take a couple of arguments of the ResNet class that we might want to tweak. Different configurations will then give us the different types of ResNets that are possible. We then just store those arguments, after checking the validity of a few of them.`),pr.forEach(s),Fs=d(e),_($e.$$.fragment,e),Hs=d(e),Qe=r(e,"P",{});var _r=i(Qe);gn=n(_r,"The three important things to remember when writing you own configuration are the following:"),_r.forEach(s),Bs=d(e),x=r(e,"UL",{});var kt=i(x);ke=r(kt,"LI",{});var Go=i(ke);yn=n(Go,"you have to inherit from "),Ft=r(Go,"CODE",{});var gr=i(Ft);wn=n(gr,"PretrainedConfig"),gr.forEach(s),bn=n(Go,","),Go.forEach(s),vn=d(kt),U=r(kt,"LI",{});var Et=i(U);$n=n(Et,"the "),Ht=r(Et,"CODE",{});var yr=i(Ht);kn=n(yr,"__init__"),yr.forEach(s),En=n(Et," of your "),Bt=r(Et,"CODE",{});var wr=i(Bt);jn=n(wr,"PretrainedConfig"),wr.forEach(s),Cn=n(Et," must accept any kwargs,"),Et.forEach(s),Pn=d(kt),V=r(kt,"LI",{});var jt=i(V);An=n(jt,"those "),St=r(jt,"CODE",{});var br=i(St);Tn=n(br,"kwargs"),br.forEach(s),Mn=n(jt," need to be passed to the superclass "),Wt=r(jt,"CODE",{});var vr=i(Wt);Rn=n(vr,"__init__"),vr.forEach(s),qn=n(jt,"."),jt.forEach(s),kt.forEach(s),Ss=d(e),O=r(e,"P",{});var Ct=i(O);In=n(Ct,`The inheritance is to make sure you get all the functionality from the \u{1F917} Transformers library, while the two other constraints come from the fact a `),Lt=r(Ct,"CODE",{});var $r=i(Lt);Nn=n($r,"PretrainedConfig"),$r.forEach(s),xn=n(Ct,` has more fields than the ones you are setting. When reloading a config with the `),Yt=r(Ct,"CODE",{});var kr=i(Yt);On=n(kr,"from_pretrained"),kr.forEach(s),Dn=n(Ct,` method, those fields need to be accepted by your config and then sent to the superclass.`),Ct.forEach(s),Ws=d(e),D=r(e,"P",{});var Pt=i(D);Fn=n(Pt,"Defining a "),zt=r(Pt,"CODE",{});var Er=i(zt);Hn=n(Er,"model_type"),Er.forEach(s),Bn=n(Pt," for your configuration (here "),Gt=r(Pt,"CODE",{});var jr=i(Gt);Sn=n(jr,'model_type="resnet"'),jr.forEach(s),Wn=n(Pt,`) is not mandatory, unless you want to register your model with the auto classes (see last section).`),Pt.forEach(s),Ls=d(e),Xe=r(e,"P",{});var Cr=i(Xe);Ln=n(Cr,`With this done, you can easily create and save your configuration like you would do with any other model config of the library. Here is how we can create a resnet50d config and save it:`),Cr.forEach(s),Ys=d(e),_(Ee.$$.fragment,e),zs=d(e),A=r(e,"P",{});var pe=i(A);Yn=n(pe,"This will save a file named "),Kt=r(pe,"CODE",{});var Pr=i(Kt);zn=n(Pr,"config.json"),Pr.forEach(s),Gn=n(pe," inside the folder "),Ut=r(pe,"CODE",{});var Ar=i(Ut);Kn=n(Ar,"custom-resnet"),Ar.forEach(s),Un=n(pe,`. You can then reload your config with the `),Vt=r(pe,"CODE",{});var Tr=i(Vt);Vn=n(Tr,"from_pretrained"),Tr.forEach(s),Jn=n(pe," method:"),pe.forEach(s),Gs=d(e),_(je.$$.fragment,e),Ks=d(e),F=r(e,"P",{});var At=i(F);Qn=n(At,"You can also use any other method of the "),Ze=r(At,"A",{href:!0});var Mr=i(Ze);Xn=n(Mr,"PretrainedConfig"),Mr.forEach(s),Zn=n(At," class, like "),et=r(At,"A",{href:!0});var Rr=i(et);ea=n(Rr,"push_to_hub()"),Rr.forEach(s),ta=n(At,` to directly upload your config to the Hub.`),At.forEach(s),Us=d(e),J=r(e,"H2",{class:!0});var Ko=i(J);oe=r(Ko,"A",{id:!0,class:!0,href:!0});var qr=i(oe);Jt=r(qr,"SPAN",{});var Ir=i(Jt);_(Ce.$$.fragment,Ir),Ir.forEach(s),qr.forEach(s),sa=d(Ko),Qt=r(Ko,"SPAN",{});var Nr=i(Qt);oa=n(Nr,"Writing a custom model"),Nr.forEach(s),Ko.forEach(s),Vs=d(e),H=r(e,"P",{});var Tt=i(H);na=n(Tt,`Now that we have our ResNet configuration, we can go on writing the model. We will actually write two: one that extracts the hidden features from a batch of images (like `),tt=r(Tt,"A",{href:!0});var xr=i(tt);aa=n(xr,"BertModel"),xr.forEach(s),la=n(Tt,`) and one that is suitable for image classification (like `),Xt=r(Tt,"CODE",{});var Or=i(Xt);ra=n(Or,"BertModelForSequenceClassification"),Or.forEach(s),ia=n(Tt,")."),Tt.forEach(s),Js=d(e),ne=r(e,"P",{});var Uo=i(ne);fa=n(Uo,`As we mentioned before, we\u2019ll only write a loose wrapper of the model to keep it simple for this example. The only thing we need to do before writing this class is a map between the block types and actual block classes. Then the model is defined from the configuration by passing everything to the `),Zt=r(Uo,"CODE",{});var Dr=i(Zt);ca=n(Dr,"ResNet"),Dr.forEach(s),da=n(Uo," class:"),Uo.forEach(s),Qs=d(e),_(Pe.$$.fragment,e),Xs=d(e),st=r(e,"P",{});var Fr=i(st);ha=n(Fr,"For the model that will classify images, we just change the forward method:"),Fr.forEach(s),Zs=d(e),_(Ae.$$.fragment,e),eo=d(e),C=r(e,"P",{});var Y=i(C);ua=n(Y,"In both cases, notice how we inherit from "),es=r(Y,"CODE",{});var Hr=i(es);ma=n(Hr,"PreTrainedModel"),Hr.forEach(s),pa=n(Y," and call the superclass initialization with the "),ts=r(Y,"CODE",{});var Br=i(ts);_a=n(Br,"config"),Br.forEach(s),ga=n(Y,` (a bit like when you write a regular `),ss=r(Y,"CODE",{});var Sr=i(ss);ya=n(Sr,"torch.nn.Module"),Sr.forEach(s),wa=n(Y,"). The line that sets the "),os=r(Y,"CODE",{});var Wr=i(os);ba=n(Wr,"config_class"),Wr.forEach(s),va=n(Y,` is not mandatory, unless you want to register your model with the auto classes (see last section).`),Y.forEach(s),to=d(e),_(ae.$$.fragment,e),so=d(e),B=r(e,"P",{});var Mt=i(B);$a=n(Mt,`You can have your model return anything you want, but returning a dictionary like we did for `),ns=r(Mt,"CODE",{});var Lr=i(ns);ka=n(Lr,"ResnetModelForImageClassification"),Lr.forEach(s),Ea=n(Mt,`, with the loss included when labels are passed, will make your model directly usable inside the `),ot=r(Mt,"A",{href:!0});var Yr=i(ot);ja=n(Yr,"Trainer"),Yr.forEach(s),Ca=n(Mt,` class. Using another output format is fine as long as you are planning on using your own training loop or another library for training.`),Mt.forEach(s),oo=d(e),nt=r(e,"P",{});var zr=i(nt);Pa=n(zr,"Now that we have our model class, let\u2019s create one:"),zr.forEach(s),no=d(e),_(Te.$$.fragment,e),ao=d(e),T=r(e,"P",{});var _e=i(T);Aa=n(_e,"Again, you can use any of the methods of "),at=r(_e,"A",{href:!0});var Gr=i(at);Ta=n(Gr,"PreTrainedModel"),Gr.forEach(s),Ma=n(_e,", like "),lt=r(_e,"A",{href:!0});var Kr=i(lt);Ra=n(Kr,"save_pretrained()"),Kr.forEach(s),qa=n(_e,` or `),rt=r(_e,"A",{href:!0});var Ur=i(rt);Ia=n(Ur,"push_to_hub()"),Ur.forEach(s),Na=n(_e,`. We will use the second in the next section, and see how to push the model weights with the code of our model. But first, let\u2019s load some pretrained weights inside our model.`),_e.forEach(s),lo=d(e),it=r(e,"P",{});var Vr=i(it);xa=n(Vr,`In your own use case, you will probably be training your custom model on your own data. To go fast for this tutorial, we will use the pretrained version of the resnet50d. Since our model is just a wrapper around it, it\u2019s going to be easy to transfer those weights:`),Vr.forEach(s),ro=d(e),_(Me.$$.fragment,e),io=d(e),S=r(e,"P",{});var Rt=i(S);Oa=n(Rt,"Now let\u2019s see how to make sure that when we do "),ft=r(Rt,"A",{href:!0});var Jr=i(ft);Da=n(Jr,"save_pretrained()"),Jr.forEach(s),Fa=n(Rt," or "),ct=r(Rt,"A",{href:!0});var Qr=i(ct);Ha=n(Qr,"push_to_hub()"),Qr.forEach(s),Ba=n(Rt,`, the code of the model is saved.`),Rt.forEach(s),fo=d(e),Q=r(e,"H2",{class:!0});var Vo=i(Q);le=r(Vo,"A",{id:!0,class:!0,href:!0});var Xr=i(le);as=r(Xr,"SPAN",{});var Zr=i(as);_(Re.$$.fragment,Zr),Zr.forEach(s),Xr.forEach(s),Sa=d(Vo),ls=r(Vo,"SPAN",{});var ei=i(ls);Wa=n(ei,"Sending the code to the Hub"),ei.forEach(s),Vo.forEach(s),co=d(e),_(re.$$.fragment,e),ho=d(e),$=r(e,"P",{});var j=i($);La=n(j,"First, make sure your model is fully defined in a "),rs=r(j,"CODE",{});var ti=i(rs);Ya=n(ti,".py"),ti.forEach(s),za=n(j,` file. It can rely on relative imports to some other files as long as all the files are in the same directory (we don\u2019t support submodules for this feature yet). For our example, we\u2019ll define a `),is=r(j,"CODE",{});var si=i(is);Ga=n(si,"modeling_resnet.py"),si.forEach(s),Ka=n(j," file and a "),fs=r(j,"CODE",{});var oi=i(fs);Ua=n(oi,"configuration_resnet.py"),oi.forEach(s),Va=n(j,` file in a folder of the current working directory named `),cs=r(j,"CODE",{});var ni=i(cs);Ja=n(ni,"resnet_model"),ni.forEach(s),Qa=n(j,". The configuration file contains the code for "),ds=r(j,"CODE",{});var ai=i(ds);Xa=n(ai,"ResnetConfig"),ai.forEach(s),Za=n(j,` and the modeling file contains the code of `),hs=r(j,"CODE",{});var li=i(hs);el=n(li,"ResnetModel"),li.forEach(s),tl=n(j," and "),us=r(j,"CODE",{});var ri=i(us);sl=n(ri,"ResnetModelForImageClassification"),ri.forEach(s),ol=n(j,"."),j.forEach(s),uo=d(e),_(qe.$$.fragment,e),mo=d(e),W=r(e,"P",{});var qt=i(W);nl=n(qt,"The "),ms=r(qt,"CODE",{});var ii=i(ms);al=n(ii,"__init__.py"),ii.forEach(s),ll=n(qt," can be empty, it\u2019s just there so that Python detects "),ps=r(qt,"CODE",{});var fi=i(ps);rl=n(fi,"resnet_model"),fi.forEach(s),il=n(qt," can be use as a module."),qt.forEach(s),po=d(e),_(ie.$$.fragment,e),_o=d(e),dt=r(e,"P",{});var ci=i(dt);fl=n(ci,"Note that you can re-use (or subclass) an existing configuration/model."),ci.forEach(s),go=d(e),ht=r(e,"P",{});var di=i(ht);cl=n(di,`To share your model with the community, follow those steps: first import the ResNet model and config from the newly created files:`),di.forEach(s),yo=d(e),_(Ie.$$.fragment,e),wo=d(e),fe=r(e,"P",{});var Jo=i(fe);dl=n(Jo,"Then you have to tell the library you want to copy the code files of those objects when using the "),_s=r(Jo,"CODE",{});var hi=i(_s);hl=n(hi,"save_pretrained"),hi.forEach(s),ul=n(Jo,` method and properly register them with a given Auto class (especially for models), just run:`),Jo.forEach(s),bo=d(e),_(Ne.$$.fragment,e),vo=d(e),ce=r(e,"P",{});var Qo=i(ce);ml=n(Qo,`Note that there is no need to specify an auto class for the configuration (there is only one auto class for them, `),ut=r(Qo,"A",{href:!0});var ui=i(ut);pl=n(ui,"AutoConfig"),ui.forEach(s),_l=n(Qo,`) but it\u2019s different for models. Your custom model could be suitable for many different tasks, so you have to specify which one of the auto classes is the correct one for your model.`),Qo.forEach(s),$o=d(e),mt=r(e,"P",{});var mi=i(mt);gl=n(mi,"Next, let\u2019s create the config and models as we did before:"),mi.forEach(s),ko=d(e),_(xe.$$.fragment,e),Eo=d(e),pt=r(e,"P",{});var pi=i(pt);yl=n(pi,"Now to send the model to the Hub, make sure you are logged in. Either run in your terminal:"),pi.forEach(s),jo=d(e),_(Oe.$$.fragment,e),Co=d(e),_t=r(e,"P",{});var _i=i(_t);wl=n(_i,"or from a notebook:"),_i.forEach(s),Po=d(e),_(De.$$.fragment,e),Ao=d(e),gt=r(e,"P",{});var gi=i(gt);bl=n(gi,"You can then push to to your own namespace (or an organization you are a member of) like this:"),gi.forEach(s),To=d(e),_(Fe.$$.fragment,e),Mo=d(e),M=r(e,"P",{});var ge=i(M);vl=n(ge,`On top of the modeling weights and the configuration in json format, this also copied the modeling and configuration `),gs=r(ge,"CODE",{});var yi=i(gs);$l=n(yi,".py"),yi.forEach(s),kl=n(ge," files in the folder "),ys=r(ge,"CODE",{});var wi=i(ys);El=n(wi,"custom-resnet50d"),wi.forEach(s),jl=n(ge,` and uploaded the result to the Hub. You can check the result in this `),He=r(ge,"A",{href:!0,rel:!0});var bi=i(He);Cl=n(bi,"model repo"),bi.forEach(s),Pl=n(ge,"."),ge.forEach(s),Ro=d(e),de=r(e,"P",{});var Xo=i(de);Al=n(Xo,"See the "),yt=r(Xo,"A",{href:!0});var vi=i(yt);Tl=n(vi,"sharing tutorial"),vi.forEach(s),Ml=n(Xo," for more information on the push to Hub method."),Xo.forEach(s),qo=d(e),X=r(e,"H2",{class:!0});var Zo=i(X);he=r(Zo,"A",{id:!0,class:!0,href:!0});var $i=i(he);ws=r($i,"SPAN",{});var ki=i(ws);_(Be.$$.fragment,ki),ki.forEach(s),$i.forEach(s),Rl=d(Zo),bs=r(Zo,"SPAN",{});var Ei=i(bs);ql=n(Ei,"Using a model with custom code"),Ei.forEach(s),Zo.forEach(s),Io=d(e),R=r(e,"P",{});var ye=i(R);Il=n(ye,`You can use any configuration, model or tokenizer with custom code files in its repository with the auto-classes and the `),vs=r(ye,"CODE",{});var ji=i(vs);Nl=n(ji,"from_pretrained"),ji.forEach(s),xl=n(ye," method. All files and code uploaded to the Hub are scanned for malware (refer to the "),Se=r(ye,"A",{href:!0,rel:!0});var Ci=i(Se);Ol=n(Ci,"Hub security"),Ci.forEach(s),Dl=n(ye,` documentation for more information), but you should still review the model code and author to avoid executing malicious code on your machine. Set `),$s=r(ye,"CODE",{});var Pi=i($s);Fl=n(Pi,"trust_remote_code=True"),Pi.forEach(s),Hl=n(ye,` to use a model with custom code:`),ye.forEach(s),No=d(e),_(We.$$.fragment,e),xo=d(e),ue=r(e,"P",{});var en=i(ue);Bl=n(en,"It is also strongly encouraged to pass a commit hash as a "),ks=r(en,"CODE",{});var Ai=i(ks);Sl=n(Ai,"revision"),Ai.forEach(s),Wl=n(en,` to make sure the author of the models did not update the code with some malicious new lines (unless you fully trust the authors of the models).`),en.forEach(s),Oo=d(e),_(Le.$$.fragment,e),Do=d(e),wt=r(e,"P",{});var Ti=i(wt);Ll=n(Ti,`Note that when browsing the commit history of the model repo on the Hub, there is a button to easily copy the commit hash of any commit.`),Ti.forEach(s),Fo=d(e),Z=r(e,"H2",{class:!0});var tn=i(Z);me=r(tn,"A",{id:!0,class:!0,href:!0});var Mi=i(me);Es=r(Mi,"SPAN",{});var Ri=i(Es);_(Ye.$$.fragment,Ri),Ri.forEach(s),Mi.forEach(s),Yl=d(tn),js=r(tn,"SPAN",{});var qi=i(js);zl=n(qi,"Registering a model with custom code to the auto classes"),qi.forEach(s),tn.forEach(s),Ho=d(e),bt=r(e,"P",{});var Ii=i(bt);Gl=n(Ii,`If you are writing a library that extends \u{1F917} Transformers, you may want to extend the auto classes to include your own model. This is different from pushing the code to the Hub in the sense that users will need to import your library to get the custom models (contrarily to automatically downloading the model code from the Hub).`),Ii.forEach(s),Bo=d(e),L=r(e,"P",{});var It=i(L);Kl=n(It,"As long as your config has a "),Cs=r(It,"CODE",{});var Ni=i(Cs);Ul=n(Ni,"model_type"),Ni.forEach(s),Vl=n(It,` attribute that is different from existing model types, and that your model classes have the right `),Ps=r(It,"CODE",{});var xi=i(Ps);Jl=n(xi,"config_class"),xi.forEach(s),Ql=n(It," attributes, you can just add them to the auto classes likes this:"),It.forEach(s),So=d(e),_(ze.$$.fragment,e),Wo=d(e),q=r(e,"P",{});var we=i(q);Xl=n(we,"Note that the first argument used when registering your custom config to "),vt=r(we,"A",{href:!0});var Oi=i(vt);Zl=n(Oi,"AutoConfig"),Oi.forEach(s),er=n(we," needs to match the "),As=r(we,"CODE",{});var Di=i(As);tr=n(Di,"model_type"),Di.forEach(s),sr=n(we,` of your custom config, and the first argument used when registering your custom models to any auto model class needs to match the `),Ts=r(we,"CODE",{});var Fi=i(Ts);or=n(Fi,"config_class"),Fi.forEach(s),nr=n(we," of those models."),we.forEach(s),this.h()},h(){h(u,"name","hf:doc:metadata"),h(u,"content",JSON.stringify(Ki)),h(v,"id","sharing-custom-models"),h(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(v,"href","#sharing-custom-models"),h(m,"class","relative group"),h(be,"href","https://github.com/rwightman/pytorch-image-models/tree/master/timm"),h(be,"rel","nofollow"),h(Ve,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),h(te,"id","writing-a-custom-configuration"),h(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(te,"href","#writing-a-custom-configuration"),h(K,"class","relative group"),h(Ze,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),h(et,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub"),h(oe,"id","writing-a-custom-model"),h(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(oe,"href","#writing-a-custom-model"),h(J,"class","relative group"),h(tt,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel"),h(ot,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),h(at,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),h(lt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained"),h(rt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub"),h(ft,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained"),h(ct,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub"),h(le,"id","sending-the-code-to-the-hub"),h(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(le,"href","#sending-the-code-to-the-hub"),h(Q,"class","relative group"),h(ut,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoConfig"),h(He,"href","https://huggingface.co/sgugger/custom-resnet50d"),h(He,"rel","nofollow"),h(yt,"href","model_sharing"),h(he,"id","using-a-model-with-custom-code"),h(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(he,"href","#using-a-model-with-custom-code"),h(X,"class","relative group"),h(Se,"href","https://huggingface.co/docs/hub/security#malware-scanning"),h(Se,"rel","nofollow"),h(me,"id","registering-a-model-with-custom-code-to-the-auto-classes"),h(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(me,"href","#registering-a-model-with-custom-code-to-the-auto-classes"),h(Z,"class","relative group"),h(vt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoConfig")},m(e,a){t(document.head,u),f(e,k,a),f(e,m,a),t(m,v),t(v,G),g(P,G,null),t(m,I),t(m,ee),t(ee,sn),f(e,qs,a),f(e,Ke,a),t(Ke,on),f(e,Is,a),f(e,Ue,a),t(Ue,nn),f(e,Ns,a),f(e,N,a),t(N,an),t(N,be),t(be,ln),t(N,rn),t(N,Ve),t(Ve,fn),t(N,cn),f(e,xs,a),f(e,K,a),t(K,te),t(te,xt),g(ve,xt,null),t(K,dn),t(K,Ot),t(Ot,hn),f(e,Os,a),f(e,se,a),t(se,un),t(se,Dt),t(Dt,mn),t(se,pn),f(e,Ds,a),f(e,Je,a),t(Je,_n),f(e,Fs,a),g($e,e,a),f(e,Hs,a),f(e,Qe,a),t(Qe,gn),f(e,Bs,a),f(e,x,a),t(x,ke),t(ke,yn),t(ke,Ft),t(Ft,wn),t(ke,bn),t(x,vn),t(x,U),t(U,$n),t(U,Ht),t(Ht,kn),t(U,En),t(U,Bt),t(Bt,jn),t(U,Cn),t(x,Pn),t(x,V),t(V,An),t(V,St),t(St,Tn),t(V,Mn),t(V,Wt),t(Wt,Rn),t(V,qn),f(e,Ss,a),f(e,O,a),t(O,In),t(O,Lt),t(Lt,Nn),t(O,xn),t(O,Yt),t(Yt,On),t(O,Dn),f(e,Ws,a),f(e,D,a),t(D,Fn),t(D,zt),t(zt,Hn),t(D,Bn),t(D,Gt),t(Gt,Sn),t(D,Wn),f(e,Ls,a),f(e,Xe,a),t(Xe,Ln),f(e,Ys,a),g(Ee,e,a),f(e,zs,a),f(e,A,a),t(A,Yn),t(A,Kt),t(Kt,zn),t(A,Gn),t(A,Ut),t(Ut,Kn),t(A,Un),t(A,Vt),t(Vt,Vn),t(A,Jn),f(e,Gs,a),g(je,e,a),f(e,Ks,a),f(e,F,a),t(F,Qn),t(F,Ze),t(Ze,Xn),t(F,Zn),t(F,et),t(et,ea),t(F,ta),f(e,Us,a),f(e,J,a),t(J,oe),t(oe,Jt),g(Ce,Jt,null),t(J,sa),t(J,Qt),t(Qt,oa),f(e,Vs,a),f(e,H,a),t(H,na),t(H,tt),t(tt,aa),t(H,la),t(H,Xt),t(Xt,ra),t(H,ia),f(e,Js,a),f(e,ne,a),t(ne,fa),t(ne,Zt),t(Zt,ca),t(ne,da),f(e,Qs,a),g(Pe,e,a),f(e,Xs,a),f(e,st,a),t(st,ha),f(e,Zs,a),g(Ae,e,a),f(e,eo,a),f(e,C,a),t(C,ua),t(C,es),t(es,ma),t(C,pa),t(C,ts),t(ts,_a),t(C,ga),t(C,ss),t(ss,ya),t(C,wa),t(C,os),t(os,ba),t(C,va),f(e,to,a),g(ae,e,a),f(e,so,a),f(e,B,a),t(B,$a),t(B,ns),t(ns,ka),t(B,Ea),t(B,ot),t(ot,ja),t(B,Ca),f(e,oo,a),f(e,nt,a),t(nt,Pa),f(e,no,a),g(Te,e,a),f(e,ao,a),f(e,T,a),t(T,Aa),t(T,at),t(at,Ta),t(T,Ma),t(T,lt),t(lt,Ra),t(T,qa),t(T,rt),t(rt,Ia),t(T,Na),f(e,lo,a),f(e,it,a),t(it,xa),f(e,ro,a),g(Me,e,a),f(e,io,a),f(e,S,a),t(S,Oa),t(S,ft),t(ft,Da),t(S,Fa),t(S,ct),t(ct,Ha),t(S,Ba),f(e,fo,a),f(e,Q,a),t(Q,le),t(le,as),g(Re,as,null),t(Q,Sa),t(Q,ls),t(ls,Wa),f(e,co,a),g(re,e,a),f(e,ho,a),f(e,$,a),t($,La),t($,rs),t(rs,Ya),t($,za),t($,is),t(is,Ga),t($,Ka),t($,fs),t(fs,Ua),t($,Va),t($,cs),t(cs,Ja),t($,Qa),t($,ds),t(ds,Xa),t($,Za),t($,hs),t(hs,el),t($,tl),t($,us),t(us,sl),t($,ol),f(e,uo,a),g(qe,e,a),f(e,mo,a),f(e,W,a),t(W,nl),t(W,ms),t(ms,al),t(W,ll),t(W,ps),t(ps,rl),t(W,il),f(e,po,a),g(ie,e,a),f(e,_o,a),f(e,dt,a),t(dt,fl),f(e,go,a),f(e,ht,a),t(ht,cl),f(e,yo,a),g(Ie,e,a),f(e,wo,a),f(e,fe,a),t(fe,dl),t(fe,_s),t(_s,hl),t(fe,ul),f(e,bo,a),g(Ne,e,a),f(e,vo,a),f(e,ce,a),t(ce,ml),t(ce,ut),t(ut,pl),t(ce,_l),f(e,$o,a),f(e,mt,a),t(mt,gl),f(e,ko,a),g(xe,e,a),f(e,Eo,a),f(e,pt,a),t(pt,yl),f(e,jo,a),g(Oe,e,a),f(e,Co,a),f(e,_t,a),t(_t,wl),f(e,Po,a),g(De,e,a),f(e,Ao,a),f(e,gt,a),t(gt,bl),f(e,To,a),g(Fe,e,a),f(e,Mo,a),f(e,M,a),t(M,vl),t(M,gs),t(gs,$l),t(M,kl),t(M,ys),t(ys,El),t(M,jl),t(M,He),t(He,Cl),t(M,Pl),f(e,Ro,a),f(e,de,a),t(de,Al),t(de,yt),t(yt,Tl),t(de,Ml),f(e,qo,a),f(e,X,a),t(X,he),t(he,ws),g(Be,ws,null),t(X,Rl),t(X,bs),t(bs,ql),f(e,Io,a),f(e,R,a),t(R,Il),t(R,vs),t(vs,Nl),t(R,xl),t(R,Se),t(Se,Ol),t(R,Dl),t(R,$s),t($s,Fl),t(R,Hl),f(e,No,a),g(We,e,a),f(e,xo,a),f(e,ue,a),t(ue,Bl),t(ue,ks),t(ks,Sl),t(ue,Wl),f(e,Oo,a),g(Le,e,a),f(e,Do,a),f(e,wt,a),t(wt,Ll),f(e,Fo,a),f(e,Z,a),t(Z,me),t(me,Es),g(Ye,Es,null),t(Z,Yl),t(Z,js),t(js,zl),f(e,Ho,a),f(e,bt,a),t(bt,Gl),f(e,Bo,a),f(e,L,a),t(L,Kl),t(L,Cs),t(Cs,Ul),t(L,Vl),t(L,Ps),t(Ps,Jl),t(L,Ql),f(e,So,a),g(ze,e,a),f(e,Wo,a),f(e,q,a),t(q,Xl),t(q,vt),t(vt,Zl),t(q,er),t(q,As),t(As,tr),t(q,sr),t(q,Ts),t(Ts,or),t(q,nr),Lo=!0},p(e,[a]){const Ge={};a&2&&(Ge.$$scope={dirty:a,ctx:e}),ae.$set(Ge);const Ms={};a&2&&(Ms.$$scope={dirty:a,ctx:e}),re.$set(Ms);const Rs={};a&2&&(Rs.$$scope={dirty:a,ctx:e}),ie.$set(Rs)},i(e){Lo||(y(P.$$.fragment,e),y(ve.$$.fragment,e),y($e.$$.fragment,e),y(Ee.$$.fragment,e),y(je.$$.fragment,e),y(Ce.$$.fragment,e),y(Pe.$$.fragment,e),y(Ae.$$.fragment,e),y(ae.$$.fragment,e),y(Te.$$.fragment,e),y(Me.$$.fragment,e),y(Re.$$.fragment,e),y(re.$$.fragment,e),y(qe.$$.fragment,e),y(ie.$$.fragment,e),y(Ie.$$.fragment,e),y(Ne.$$.fragment,e),y(xe.$$.fragment,e),y(Oe.$$.fragment,e),y(De.$$.fragment,e),y(Fe.$$.fragment,e),y(Be.$$.fragment,e),y(We.$$.fragment,e),y(Le.$$.fragment,e),y(Ye.$$.fragment,e),y(ze.$$.fragment,e),Lo=!0)},o(e){w(P.$$.fragment,e),w(ve.$$.fragment,e),w($e.$$.fragment,e),w(Ee.$$.fragment,e),w(je.$$.fragment,e),w(Ce.$$.fragment,e),w(Pe.$$.fragment,e),w(Ae.$$.fragment,e),w(ae.$$.fragment,e),w(Te.$$.fragment,e),w(Me.$$.fragment,e),w(Re.$$.fragment,e),w(re.$$.fragment,e),w(qe.$$.fragment,e),w(ie.$$.fragment,e),w(Ie.$$.fragment,e),w(Ne.$$.fragment,e),w(xe.$$.fragment,e),w(Oe.$$.fragment,e),w(De.$$.fragment,e),w(Fe.$$.fragment,e),w(Be.$$.fragment,e),w(We.$$.fragment,e),w(Le.$$.fragment,e),w(Ye.$$.fragment,e),w(ze.$$.fragment,e),Lo=!1},d(e){s(u),e&&s(k),e&&s(m),b(P),e&&s(qs),e&&s(Ke),e&&s(Is),e&&s(Ue),e&&s(Ns),e&&s(N),e&&s(xs),e&&s(K),b(ve),e&&s(Os),e&&s(se),e&&s(Ds),e&&s(Je),e&&s(Fs),b($e,e),e&&s(Hs),e&&s(Qe),e&&s(Bs),e&&s(x),e&&s(Ss),e&&s(O),e&&s(Ws),e&&s(D),e&&s(Ls),e&&s(Xe),e&&s(Ys),b(Ee,e),e&&s(zs),e&&s(A),e&&s(Gs),b(je,e),e&&s(Ks),e&&s(F),e&&s(Us),e&&s(J),b(Ce),e&&s(Vs),e&&s(H),e&&s(Js),e&&s(ne),e&&s(Qs),b(Pe,e),e&&s(Xs),e&&s(st),e&&s(Zs),b(Ae,e),e&&s(eo),e&&s(C),e&&s(to),b(ae,e),e&&s(so),e&&s(B),e&&s(oo),e&&s(nt),e&&s(no),b(Te,e),e&&s(ao),e&&s(T),e&&s(lo),e&&s(it),e&&s(ro),b(Me,e),e&&s(io),e&&s(S),e&&s(fo),e&&s(Q),b(Re),e&&s(co),b(re,e),e&&s(ho),e&&s($),e&&s(uo),b(qe,e),e&&s(mo),e&&s(W),e&&s(po),b(ie,e),e&&s(_o),e&&s(dt),e&&s(go),e&&s(ht),e&&s(yo),b(Ie,e),e&&s(wo),e&&s(fe),e&&s(bo),b(Ne,e),e&&s(vo),e&&s(ce),e&&s($o),e&&s(mt),e&&s(ko),b(xe,e),e&&s(Eo),e&&s(pt),e&&s(jo),b(Oe,e),e&&s(Co),e&&s(_t),e&&s(Po),b(De,e),e&&s(Ao),e&&s(gt),e&&s(To),b(Fe,e),e&&s(Mo),e&&s(M),e&&s(Ro),e&&s(de),e&&s(qo),e&&s(X),b(Be),e&&s(Io),e&&s(R),e&&s(No),b(We,e),e&&s(xo),e&&s(ue),e&&s(Oo),b(Le,e),e&&s(Do),e&&s(wt),e&&s(Fo),e&&s(Z),b(Ye),e&&s(Ho),e&&s(bt),e&&s(Bo),e&&s(L),e&&s(So),b(ze,e),e&&s(Wo),e&&s(q)}}}const Ki={local:"sharing-custom-models",sections:[{local:"writing-a-custom-configuration",title:"Writing a custom configuration"},{local:"writing-a-custom-model",title:"Writing a custom model"},{local:"sending-the-code-to-the-hub",title:"Sending the code to the Hub"},{local:"using-a-model-with-custom-code",title:"Using a model with custom code"},{local:"registering-a-model-with-custom-code-to-the-auto-classes",title:"Registering a model with custom code to the auto classes"}],title:"Sharing custom models"};function Ui(z,u,k){let{fw:m}=u;return z.$$set=v=>{"fw"in v&&k(0,m=v.fw)},[m]}class ef extends Hi{constructor(u){super();Bi(this,u,Ui,Gi,Si,{fw:0})}}export{ef as default,Ki as metadata};
249
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/benchmarks.mdx-680f04c0.js
import{S as At,i as Pt,s as Ct,e as l,k as h,w as N,t as n,M as Ft,c as t,d as a,m,a as o,x as M,h as r,b,F as s,g as c,y as I,q as O,o as S,B as D}from"../chunks/vendor-4833417e.js";import{T as $t}from"../chunks/Tip-fffd6df1.js";import{I as fa}from"../chunks/IconCopyLink-4b81c553.js";import{C as ga}from"../chunks/CodeBlockFw-27a176a0.js";import{D as Nt}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function Mt(ls){let d,E;return{c(){d=l("p"),E=n(`Hugging Face\u2019s Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed and memory complexity of Transformer models.`)},l(j){d=t(j,"P",{});var _=o(d);E=r(_,`Hugging Face\u2019s Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed and memory complexity of Transformer models.`),_.forEach(a)},m(j,_){c(j,d,_),s(d,E)},d(j){j&&a(d)}}}function It(ls){let d,E,j,_,z,B,ts,U;return{c(){d=l("p"),E=n("Hereby, "),j=l("em"),_=n("inference"),z=n(" is defined by a single forward pass, and "),B=l("em"),ts=n("training"),U=n(` is defined by a single forward pass and backward pass.`)},l(H){d=t(H,"P",{});var $=o(d);E=r($,"Hereby, "),j=t($,"EM",{});var C=o(j);_=r(C,"inference"),C.forEach(a),z=r($," is defined by a single forward pass, and "),B=t($,"EM",{});var os=o(B);ts=r(os,"training"),os.forEach(a),U=r($,` is defined by a single forward pass and backward pass.`),$.forEach(a)},m(H,$){c(H,d,$),s(d,E),s(d,j),s(j,_),s(d,z),s(d,B),s(B,ts),s(d,U)},d(H){H&&a(d)}}}function Ot(ls){let d,E,j,_,z,B,ts,U,H,$,C,os,ps,qe,Es,ja,xe,W,_a,cs,ya,ka,Re,V,Y,Ps,hs,va,Cs,Ea,Le,k,wa,Fs,Ta,Ba,Ns,$a,Aa,Ms,Pa,Ca,Is,Fa,Na,Os,Ma,Ia,Ss,Oa,Sa,ze,J,Ue,y,Da,Ds,qa,xa,qs,Ra,La,xs,za,Ua,Rs,Ha,Va,Ls,Xa,Ga,zs,Wa,Ya,Us,Ja,Qa,He,ms,Ve,i,Ka,Hs,Za,sn,Vs,en,an,Xs,nn,rn,Gs,ln,tn,Ws,on,pn,is,cn,hn,Ys,mn,un,Js,bn,dn,Qs,fn,gn,Ks,jn,_n,Zs,yn,kn,se,vn,En,ee,wn,Tn,Xe,us,Ge,Q,Bn,ae,$n,An,We,bs,Ye,u,Pn,ne,Cn,Fn,re,Nn,Mn,le,In,On,te,Sn,Dn,oe,qn,xn,pe,Rn,Ln,ce,zn,Un,he,Hn,Vn,me,Xn,Gn,ie,Wn,Yn,ue,Jn,Qn,be,Kn,Zn,de,sr,er,Je,P,ar,fe,nr,rr,ge,lr,tr,je,or,pr,Qe,ds,Ke,w,cr,_e,hr,mr,ye,ir,ur,ke,br,dr,ve,fr,gr,Ze,X,K,Ee,fs,jr,we,_r,sa,ws,yr,ea,q,F,kr,Te,vr,Er,Be,wr,Tr,$e,Br,$r,Ar,A,Pr,Ae,Cr,Fr,Pe,Nr,Mr,Ce,Ir,Or,Fe,Sr,Dr,qr,Ne,xr,aa,G,Z,Me,gs,Rr,Ie,Lr,na,ss,zr,Oe,Ur,Hr,ra,x,Vr,js,Xr,Gr,_s,Wr,Yr,la,es,Jr,Se,Qr,Kr,ta,as,Ts,ys,Zr,sl,el,Bs,ks,al,nl,oa;return B=new fa({}),C=new $t({props:{warning:"&lcub;true}",$$slots:{default:[Mt]},$$scope:{ctx:ls}}}),ps=new Nt({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/benchmarks.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/benchmarks.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/benchmarks.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/benchmarks.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/benchmarks.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/benchmarks.ipynb"}]}}),hs=new fa({}),J=new $t({props:{$$slots:{default:[It]},$$scope:{ctx:ls}}}),ms=new ga({props:{group1:{id:"pt",code:`from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments args = PyTorchBenchmarkArguments(models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]) benchmark = PyTorchBenchmark(args)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PyTorchBenchmark, PyTorchBenchmarkArguments <span class="hljs-meta">&gt;&gt;&gt; </span>args = PyTorchBenchmarkArguments(models=[<span class="hljs-string">&quot;bert-base-uncased&quot;</span>], batch_sizes=[<span class="hljs-number">8</span>], sequence_lengths=[<span class="hljs-number">8</span>, <span class="hljs-number">32</span>, <span class="hljs-number">128</span>, <span class="hljs-number">512</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark = PyTorchBenchmark(args)`},group2:{id:"tf",code:`from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments args = TensorFlowBenchmarkArguments( models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512] ) benchmark = TensorFlowBenchmark(args)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TensorFlowBenchmark, TensorFlowBenchmarkArguments <span class="hljs-meta">&gt;&gt;&gt; </span>args = TensorFlowBenchmarkArguments( <span class="hljs-meta">... </span> models=[<span class="hljs-string">&quot;bert-base-uncased&quot;</span>], batch_sizes=[<span class="hljs-number">8</span>], sequence_lengths=[<span class="hljs-number">8</span>, <span class="hljs-number">32</span>, <span class="hljs-number">128</span>, <span class="hljs-number">512</span>] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark = TensorFlowBenchmark(args)`}}}),us=new ga({props:{group1:{id:"pt",code:"python examples/pytorch/benchmarking/run_benchmark.py --help",highlighted:'python examples/pytorch/benchmarking/run_benchmark.py --<span class="hljs-built_in">help</span>'},group2:{id:"tf",code:"python examples/tensorflow/benchmarking/run_benchmark_tf.py --help",highlighted:'python examples/tensorflow/benchmarking/run_benchmark_tf.py --<span class="hljs-built_in">help</span>'}}}),bs=new ga({props:{group1:{id:"pt",code:`results = benchmark.run() print(results) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>results = benchmark.run() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(results) ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time <span class="hljs-keyword">in</span> s -------------------------------------------------------------------------------- bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.006</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.006</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.018</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.088</span> -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory <span class="hljs-keyword">in</span> MB -------------------------------------------------------------------------------- bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1227</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1281</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1307</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1539</span> -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: <span class="hljs-number">2.11</span><span class="hljs-number">.0</span> - framework: PyTorch - use_torchscript: <span class="hljs-literal">False</span> - framework_version: <span class="hljs-number">1.4</span><span class="hljs-number">.0</span> - python_version: <span class="hljs-number">3.6</span><span class="hljs-number">.10</span> - system: Linux - cpu: x86_64 - architecture: 64bit - date: <span class="hljs-number">2020</span>-06-<span class="hljs-number">29</span> - time: 08:<span class="hljs-number">58</span>:<span class="hljs-number">43.371351</span> - fp16: <span class="hljs-literal">False</span> - use_multiprocessing: <span class="hljs-literal">True</span> - only_pretrain_model: <span class="hljs-literal">False</span> - cpu_ram_mb: <span class="hljs-number">32088</span> - use_gpu: <span class="hljs-literal">True</span> - num_gpus: <span class="hljs-number">1</span> - gpu: TITAN RTX - gpu_ram_mb: <span class="hljs-number">24217</span> - gpu_power_watts: <span class="hljs-number">280.0</span> - gpu_performance_state: <span class="hljs-number">2</span> - use_tpu: <span class="hljs-literal">False</span>`},group2:{id:"tf",code:`results = benchmark.run() print(results) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>results = benchmark.run() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(results) ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time <span class="hljs-keyword">in</span> s -------------------------------------------------------------------------------- bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.005</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.008</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.022</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.105</span> -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory <span class="hljs-keyword">in</span> MB -------------------------------------------------------------------------------- bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1330</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1330</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1330</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1770</span> -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: <span class="hljs-number">2.11</span><span class="hljs-number">.0</span> - framework: Tensorflow - use_xla: <span class="hljs-literal">False</span> - framework_version: <span class="hljs-number">2.2</span><span class="hljs-number">.0</span> - python_version: <span class="hljs-number">3.6</span><span class="hljs-number">.10</span> - system: Linux - cpu: x86_64 - architecture: 64bit - date: <span class="hljs-number">2020</span>-06-<span class="hljs-number">29</span> - time: 09:<span class="hljs-number">26</span>:<span class="hljs-number">35.617317</span> - fp16: <span class="hljs-literal">False</span> - use_multiprocessing: <span class="hljs-literal">True</span> - only_pretrain_model: <span class="hljs-literal">False</span> - cpu_ram_mb: <span class="hljs-number">32088</span> - use_gpu: <span class="hljs-literal">True</span> - num_gpus: <span class="hljs-number">1</span> - gpu: TITAN RTX - gpu_ram_mb: <span class="hljs-number">24217</span> - gpu_power_watts: <span class="hljs-number">280.0</span> - gpu_performance_state: <span class="hljs-number">2</span> - use_tpu: <span class="hljs-literal">False</span>`}}}),ds=new ga({props:{group1:{id:"pt",code:`from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig args = PyTorchBenchmarkArguments( models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512] ) config_base = BertConfig() config_384_hid = BertConfig(hidden_size=384) config_6_lay = BertConfig(num_hidden_layers=6) benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay]) benchmark.run() `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig <span class="hljs-meta">&gt;&gt;&gt; </span>args = PyTorchBenchmarkArguments( <span class="hljs-meta">... </span> models=[<span class="hljs-string">&quot;bert-base&quot;</span>, <span class="hljs-string">&quot;bert-384-hid&quot;</span>, <span class="hljs-string">&quot;bert-6-lay&quot;</span>], batch_sizes=[<span class="hljs-number">8</span>], sequence_lengths=[<span class="hljs-number">8</span>, <span class="hljs-number">32</span>, <span class="hljs-number">128</span>, <span class="hljs-number">512</span>] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config_base = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_384_hid = BertConfig(hidden_size=<span class="hljs-number">384</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config_6_lay = BertConfig(num_hidden_layers=<span class="hljs-number">6</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay]) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark.run() ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time <span class="hljs-keyword">in</span> s -------------------------------------------------------------------------------- bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.006</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.006</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.018</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.088</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.006</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.006</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.011</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.054</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.003</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.004</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.009</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.044</span> -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory <span class="hljs-keyword">in</span> MB -------------------------------------------------------------------------------- bert-base <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1277</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1281</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1307</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1539</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1005</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1027</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1035</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1255</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1097</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1101</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1127</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1359</span> -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: <span class="hljs-number">2.11</span><span class="hljs-number">.0</span> - framework: PyTorch - use_torchscript: <span class="hljs-literal">False</span> - framework_version: <span class="hljs-number">1.4</span><span class="hljs-number">.0</span> - python_version: <span class="hljs-number">3.6</span><span class="hljs-number">.10</span> - system: Linux - cpu: x86_64 - architecture: 64bit - date: <span class="hljs-number">2020</span>-06-<span class="hljs-number">29</span> - time: 09:<span class="hljs-number">35</span>:<span class="hljs-number">25.143267</span> - fp16: <span class="hljs-literal">False</span> - use_multiprocessing: <span class="hljs-literal">True</span> - only_pretrain_model: <span class="hljs-literal">False</span> - cpu_ram_mb: <span class="hljs-number">32088</span> - use_gpu: <span class="hljs-literal">True</span> - num_gpus: <span class="hljs-number">1</span> - gpu: TITAN RTX - gpu_ram_mb: <span class="hljs-number">24217</span> - gpu_power_watts: <span class="hljs-number">280.0</span> - gpu_performance_state: <span class="hljs-number">2</span> - use_tpu: <span class="hljs-literal">False</span>`},group2:{id:"tf",code:`from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig args = TensorFlowBenchmarkArguments( models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512] ) config_base = BertConfig() config_384_hid = BertConfig(hidden_size=384) config_6_lay = BertConfig(num_hidden_layers=6) benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay]) benchmark.run() `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig <span class="hljs-meta">&gt;&gt;&gt; </span>args = TensorFlowBenchmarkArguments( <span class="hljs-meta">... </span> models=[<span class="hljs-string">&quot;bert-base&quot;</span>, <span class="hljs-string">&quot;bert-384-hid&quot;</span>, <span class="hljs-string">&quot;bert-6-lay&quot;</span>], batch_sizes=[<span class="hljs-number">8</span>], sequence_lengths=[<span class="hljs-number">8</span>, <span class="hljs-number">32</span>, <span class="hljs-number">128</span>, <span class="hljs-number">512</span>] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config_base = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_384_hid = BertConfig(hidden_size=<span class="hljs-number">384</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config_6_lay = BertConfig(num_hidden_layers=<span class="hljs-number">6</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay]) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark.run() ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time <span class="hljs-keyword">in</span> s -------------------------------------------------------------------------------- bert-base <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.005</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.008</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.022</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.106</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.005</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.007</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.018</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.064</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.002</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.003</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.0011</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.074</span> -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory <span class="hljs-keyword">in</span> MB -------------------------------------------------------------------------------- bert-base <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1330</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1330</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1330</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1770</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1330</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1330</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1330</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1540</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1330</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1330</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1330</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1540</span> -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: <span class="hljs-number">2.11</span><span class="hljs-number">.0</span> - framework: Tensorflow - use_xla: <span class="hljs-literal">False</span> - framework_version: <span class="hljs-number">2.2</span><span class="hljs-number">.0</span> - python_version: <span class="hljs-number">3.6</span><span class="hljs-number">.10</span> - system: Linux - cpu: x86_64 - architecture: 64bit - date: <span class="hljs-number">2020</span>-06-<span class="hljs-number">29</span> - time: 09:<span class="hljs-number">38</span>:<span class="hljs-number">15.487125</span> - fp16: <span class="hljs-literal">False</span> - use_multiprocessing: <span class="hljs-literal">True</span> - only_pretrain_model: <span class="hljs-literal">False</span> - cpu_ram_mb: <span class="hljs-number">32088</span> - use_gpu: <span class="hljs-literal">True</span> - num_gpus: <span class="hljs-number">1</span> - gpu: TITAN RTX - gpu_ram_mb: <span class="hljs-number">24217</span> - gpu_power_watts: <span class="hljs-number">280.0</span> - gpu_performance_state: <span class="hljs-number">2</span> - use_tpu: <span class="hljs-literal">False</span>`}}}),fs=new fa({}),gs=new fa({}),{c(){d=l("meta"),E=h(),j=l("h1"),_=l("a"),z=l("span"),N(B.$$.fragment),ts=h(),U=l("span"),H=n("Benchmarks"),$=h(),N(C.$$.fragment),os=h(),N(ps.$$.fragment),qe=h(),Es=l("p"),ja=n("Let\u2019s take a look at how \u{1F917} Transformers models can be benchmarked, best practices, and already available benchmarks."),xe=h(),W=l("p"),_a=n("A notebook explaining in more detail how to benchmark \u{1F917} Transformers models can be found "),cs=l("a"),ya=n("here"),ka=n("."),Re=h(),V=l("h2"),Y=l("a"),Ps=l("span"),N(hs.$$.fragment),va=h(),Cs=l("span"),Ea=n("How to benchmark \u{1F917} Transformers models"),Le=h(),k=l("p"),wa=n("The classes "),Fs=l("code"),Ta=n("PyTorchBenchmark"),Ba=n("and "),Ns=l("code"),$a=n("TensorFlowBenchmark"),Aa=n("allow to flexibly benchmark \u{1F917} Transformers models. The benchmark classes allow us to measure the "),Ms=l("em"),Pa=n("peak memory usage"),Ca=n(" and "),Is=l("em"),Fa=n("required time"),Na=n(" for both "),Os=l("em"),Ma=n("inference"),Ia=n(" and "),Ss=l("em"),Oa=n("training"),Sa=n("."),ze=h(),N(J.$$.fragment),Ue=h(),y=l("p"),Da=n("The benchmark classes "),Ds=l("code"),qa=n("PyTorchBenchmark"),xa=n("and "),qs=l("code"),Ra=n("TensorFlowBenchmark"),La=n("expect an object of type "),xs=l("code"),za=n("PyTorchBenchmarkArguments"),Ua=n(`and `),Rs=l("code"),Ha=n("TensorFlowBenchmarkArguments"),Va=n(" respectively, for instantiation. "),Ls=l("code"),Xa=n("PyTorchBenchmarkArguments"),Ga=n("and "),zs=l("code"),Wa=n("TensorFlowBenchmarkArguments"),Ya=n("are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type "),Us=l("em"),Ja=n("bert-base-cased"),Qa=n(" can be benchmarked."),He=h(),N(ms.$$.fragment),Ve=h(),i=l("p"),Ka=n("Here, three arguments are given to the benchmark argument data classes, namely "),Hs=l("code"),Za=n("models"),sn=n(", "),Vs=l("code"),en=n("batch_sizes"),an=n(`, and `),Xs=l("code"),nn=n("sequence_lengths"),rn=n(". The argument "),Gs=l("code"),ln=n("models"),tn=n(" is required and expects a "),Ws=l("code"),on=n("list"),pn=n(` of model identifiers from the `),is=l("a"),cn=n("model hub"),hn=n(" The "),Ys=l("code"),mn=n("list"),un=n(" arguments "),Js=l("code"),bn=n("batch_sizes"),dn=n(" and "),Qs=l("code"),fn=n("sequence_lengths"),gn=n(` define the size of the `),Ks=l("code"),jn=n("input_ids"),_n=n(` on which the model is benchmarked. There are many more parameters that can be configured via the benchmark argument data classes. For more detail on these one can either directly consult the files `),Zs=l("code"),yn=n("src/transformers/benchmark/benchmark_args_utils.py"),kn=n(", "),se=l("code"),vn=n("src/transformers/benchmark/benchmark_args.py"),En=n(` (for PyTorch) and `),ee=l("code"),wn=n("src/transformers/benchmark/benchmark_args_tf.py"),Tn=n(` (for Tensorflow). Alternatively, running the following shell commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow respectively.`),Xe=h(),N(us.$$.fragment),Ge=h(),Q=l("p"),Bn=n("An instantiated benchmark object can then simply be run by calling "),ae=l("code"),$n=n("benchmark.run()"),An=n("."),We=h(),N(bs.$$.fragment),Ye=h(),u=l("p"),Pn=n("By default, the "),ne=l("em"),Cn=n("time"),Fn=n(" and the "),re=l("em"),Nn=n("required memory"),Mn=n(" for "),le=l("em"),In=n("inference"),On=n(` are benchmarked. In the example output above the first two sections show the result corresponding to `),te=l("em"),Sn=n("inference time"),Dn=n(" and "),oe=l("em"),qn=n("inference memory"),xn=n(`. In addition, all relevant information about the computing environment, `),pe=l("em"),Rn=n("e.g."),Ln=n(` the GPU type, the system, the library versions, etc\u2026 are printed out in the third section under `),ce=l("em"),zn=n("ENVIRONMENT INFORMATION"),Un=n(". This information can optionally be saved in a "),he=l("em"),Hn=n(".csv"),Vn=n(` file when adding the argument `),me=l("code"),Xn=n("save_to_csv=True"),Gn=n(" to "),ie=l("code"),Wn=n("PyTorchBenchmarkArguments"),Yn=n(`and `),ue=l("code"),Jn=n("TensorFlowBenchmarkArguments"),Qn=n(`respectively. In this case, every section is saved in a separate `),be=l("em"),Kn=n(".csv"),Zn=n(" file. The path to each "),de=l("em"),sr=n(".csv"),er=n(" file can optionally be defined via the argument data classes."),Je=h(),P=l("p"),ar=n("Instead of benchmarking pre-trained models via their model identifier, "),fe=l("em"),nr=n("e.g."),rr=h(),ge=l("code"),lr=n("bert-base-uncased"),tr=n(`, the user can alternatively benchmark an arbitrary configuration of any available model class. In this case, a `),je=l("code"),or=n("list"),pr=n(` of configurations must be inserted with the benchmark args as follows.`),Qe=h(),N(ds.$$.fragment),Ke=h(),w=l("p"),cr=n("Again, "),_e=l("em"),hr=n("inference time"),mr=n(" and "),ye=l("em"),ir=n("required memory"),ur=n(" for "),ke=l("em"),br=n("inference"),dr=n(` are measured, but this time for customized configurations of the `),ve=l("code"),fr=n("BertModel"),gr=n(` class. This feature can especially be helpful when deciding for which configuration the model should be trained.`),Ze=h(),X=l("h2"),K=l("a"),Ee=l("span"),N(fs.$$.fragment),jr=h(),we=l("span"),_r=n("Benchmark best practices"),sa=h(),ws=l("p"),yr=n("This section lists a couple of best practices one should be aware of when benchmarking a model."),ea=h(),q=l("ul"),F=l("li"),kr=n(`Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user specifies on which device the code should be run by setting the `),Te=l("code"),vr=n("CUDA_VISIBLE_DEVICES"),Er=n(` environment variable in the shell, `),Be=l("em"),wr=n("e.g."),Tr=h(),$e=l("code"),Br=n("export CUDA_VISIBLE_DEVICES=0"),$r=n(" before running the code."),Ar=h(),A=l("li"),Pr=n("The option "),Ae=l("code"),Cr=n("no_multi_processing"),Fr=n(" should only be set to "),Pe=l("code"),Nr=n("True"),Mr=n(` for testing and debugging. To ensure accurate memory measurement it is recommended to run each memory benchmark in a separate process by making sure `),Ce=l("code"),Ir=n("no_multi_processing"),Or=n(" is set to "),Fe=l("code"),Sr=n("True"),Dr=n("."),qr=h(),Ne=l("li"),xr=n(`One should always state the environment information when sharing the results of a model benchmark. Results can vary heavily between different GPU devices, library versions, etc., so that benchmark results on their own are not very useful for the community.`),aa=h(),G=l("h2"),Z=l("a"),Me=l("span"),N(gs.$$.fragment),Rr=h(),Ie=l("span"),Lr=n("Sharing your benchmark"),na=h(),ss=l("p"),zr=n("Previously all available core models (10 at the time) have been benchmarked for "),Oe=l("em"),Ur=n("inference time"),Hr=n(`, across many different settings: using PyTorch, with and without TorchScript, using TensorFlow, with and without XLA. All of those tests were done across CPUs (except for TensorFlow XLA) and GPUs.`),ra=h(),x=l("p"),Vr=n("The approach is detailed in the "),js=l("a"),Xr=n("following blogpost"),Gr=n(` and the results are available `),_s=l("a"),Wr=n("here"),Yr=n("."),la=h(),es=l("p"),Jr=n("With the new "),Se=l("em"),Qr=n("benchmark"),Kr=n(" tools, it is easier than ever to share your benchmark results with the community"),ta=h(),as=l("ul"),Ts=l("li"),ys=l("a"),Zr=n("PyTorch Benchmarking Results"),sl=n("."),el=h(),Bs=l("li"),ks=l("a"),al=n("TensorFlow Benchmarking Results"),nl=n("."),this.h()},l(e){const p=Ft('[data-svelte="svelte-1phssyn"]',document.head);d=t(p,"META",{name:!0,content:!0}),p.forEach(a),E=m(e),j=t(e,"H1",{class:!0});var vs=o(j);_=t(vs,"A",{id:!0,class:!0,href:!0});var De=o(_);z=t(De,"SPAN",{});var tl=o(z);M(B.$$.fragment,tl),tl.forEach(a),De.forEach(a),ts=m(vs),U=t(vs,"SPAN",{});var ol=o(U);H=r(ol,"Benchmarks"),ol.forEach(a),vs.forEach(a),$=m(e),M(C.$$.fragment,e),os=m(e),M(ps.$$.fragment,e),qe=m(e),Es=t(e,"P",{});var pl=o(Es);ja=r(pl,"Let\u2019s take a look at how \u{1F917} Transformers models can be benchmarked, best practices, and already available benchmarks."),pl.forEach(a),xe=m(e),W=t(e,"P",{});var pa=o(W);_a=r(pa,"A notebook explaining in more detail how to benchmark \u{1F917} Transformers models can be found "),cs=t(pa,"A",{href:!0,rel:!0});var cl=o(cs);ya=r(cl,"here"),cl.forEach(a),ka=r(pa,"."),pa.forEach(a),Re=m(e),V=t(e,"H2",{class:!0});var ca=o(V);Y=t(ca,"A",{id:!0,class:!0,href:!0});var hl=o(Y);Ps=t(hl,"SPAN",{});var ml=o(Ps);M(hs.$$.fragment,ml),ml.forEach(a),hl.forEach(a),va=m(ca),Cs=t(ca,"SPAN",{});var il=o(Cs);Ea=r(il,"How to benchmark \u{1F917} Transformers models"),il.forEach(a),ca.forEach(a),Le=m(e),k=t(e,"P",{});var T=o(k);wa=r(T,"The classes "),Fs=t(T,"CODE",{});var ul=o(Fs);Ta=r(ul,"PyTorchBenchmark"),ul.forEach(a),Ba=r(T,"and "),Ns=t(T,"CODE",{});var bl=o(Ns);$a=r(bl,"TensorFlowBenchmark"),bl.forEach(a),Aa=r(T,"allow to flexibly benchmark \u{1F917} Transformers models. The benchmark classes allow us to measure the "),Ms=t(T,"EM",{});var dl=o(Ms);Pa=r(dl,"peak memory usage"),dl.forEach(a),Ca=r(T," and "),Is=t(T,"EM",{});var fl=o(Is);Fa=r(fl,"required time"),fl.forEach(a),Na=r(T," for both "),Os=t(T,"EM",{});var gl=o(Os);Ma=r(gl,"inference"),gl.forEach(a),Ia=r(T," and "),Ss=t(T,"EM",{});var jl=o(Ss);Oa=r(jl,"training"),jl.forEach(a),Sa=r(T,"."),T.forEach(a),ze=m(e),M(J.$$.fragment,e),Ue=m(e),y=t(e,"P",{});var v=o(y);Da=r(v,"The benchmark classes "),Ds=t(v,"CODE",{});var _l=o(Ds);qa=r(_l,"PyTorchBenchmark"),_l.forEach(a),xa=r(v,"and "),qs=t(v,"CODE",{});var yl=o(qs);Ra=r(yl,"TensorFlowBenchmark"),yl.forEach(a),La=r(v,"expect an object of type "),xs=t(v,"CODE",{});var kl=o(xs);za=r(kl,"PyTorchBenchmarkArguments"),kl.forEach(a),Ua=r(v,`and `),Rs=t(v,"CODE",{});var vl=o(Rs);Ha=r(vl,"TensorFlowBenchmarkArguments"),vl.forEach(a),Va=r(v," respectively, for instantiation. "),Ls=t(v,"CODE",{});var El=o(Ls);Xa=r(El,"PyTorchBenchmarkArguments"),El.forEach(a),Ga=r(v,"and "),zs=t(v,"CODE",{});var wl=o(zs);Wa=r(wl,"TensorFlowBenchmarkArguments"),wl.forEach(a),Ya=r(v,"are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type "),Us=t(v,"EM",{});var Tl=o(Us);Ja=r(Tl,"bert-base-cased"),Tl.forEach(a),Qa=r(v," can be benchmarked."),v.forEach(a),He=m(e),M(ms.$$.fragment,e),Ve=m(e),i=t(e,"P",{});var f=o(i);Ka=r(f,"Here, three arguments are given to the benchmark argument data classes, namely "),Hs=t(f,"CODE",{});var Bl=o(Hs);Za=r(Bl,"models"),Bl.forEach(a),sn=r(f,", "),Vs=t(f,"CODE",{});var $l=o(Vs);en=r($l,"batch_sizes"),$l.forEach(a),an=r(f,`, and `),Xs=t(f,"CODE",{});var Al=o(Xs);nn=r(Al,"sequence_lengths"),Al.forEach(a),rn=r(f,". The argument "),Gs=t(f,"CODE",{});var Pl=o(Gs);ln=r(Pl,"models"),Pl.forEach(a),tn=r(f," is required and expects a "),Ws=t(f,"CODE",{});var Cl=o(Ws);on=r(Cl,"list"),Cl.forEach(a),pn=r(f,` of model identifiers from the `),is=t(f,"A",{href:!0,rel:!0});var Fl=o(is);cn=r(Fl,"model hub"),Fl.forEach(a),hn=r(f," The "),Ys=t(f,"CODE",{});var Nl=o(Ys);mn=r(Nl,"list"),Nl.forEach(a),un=r(f," arguments "),Js=t(f,"CODE",{});var Ml=o(Js);bn=r(Ml,"batch_sizes"),Ml.forEach(a),dn=r(f," and "),Qs=t(f,"CODE",{});var Il=o(Qs);fn=r(Il,"sequence_lengths"),Il.forEach(a),gn=r(f,` define the size of the `),Ks=t(f,"CODE",{});var Ol=o(Ks);jn=r(Ol,"input_ids"),Ol.forEach(a),_n=r(f,` on which the model is benchmarked. There are many more parameters that can be configured via the benchmark argument data classes. For more detail on these one can either directly consult the files `),Zs=t(f,"CODE",{});var Sl=o(Zs);yn=r(Sl,"src/transformers/benchmark/benchmark_args_utils.py"),Sl.forEach(a),kn=r(f,", "),se=t(f,"CODE",{});var Dl=o(se);vn=r(Dl,"src/transformers/benchmark/benchmark_args.py"),Dl.forEach(a),En=r(f,` (for PyTorch) and `),ee=t(f,"CODE",{});var ql=o(ee);wn=r(ql,"src/transformers/benchmark/benchmark_args_tf.py"),ql.forEach(a),Tn=r(f,` (for Tensorflow). Alternatively, running the following shell commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow respectively.`),f.forEach(a),Xe=m(e),M(us.$$.fragment,e),Ge=m(e),Q=t(e,"P",{});var ha=o(Q);Bn=r(ha,"An instantiated benchmark object can then simply be run by calling "),ae=t(ha,"CODE",{});var xl=o(ae);$n=r(xl,"benchmark.run()"),xl.forEach(a),An=r(ha,"."),ha.forEach(a),We=m(e),M(bs.$$.fragment,e),Ye=m(e),u=t(e,"P",{});var g=o(u);Pn=r(g,"By default, the "),ne=t(g,"EM",{});var Rl=o(ne);Cn=r(Rl,"time"),Rl.forEach(a),Fn=r(g," and the "),re=t(g,"EM",{});var Ll=o(re);Nn=r(Ll,"required memory"),Ll.forEach(a),Mn=r(g," for "),le=t(g,"EM",{});var zl=o(le);In=r(zl,"inference"),zl.forEach(a),On=r(g,` are benchmarked. In the example output above the first two sections show the result corresponding to `),te=t(g,"EM",{});var Ul=o(te);Sn=r(Ul,"inference time"),Ul.forEach(a),Dn=r(g," and "),oe=t(g,"EM",{});var Hl=o(oe);qn=r(Hl,"inference memory"),Hl.forEach(a),xn=r(g,`. In addition, all relevant information about the computing environment, `),pe=t(g,"EM",{});var Vl=o(pe);Rn=r(Vl,"e.g."),Vl.forEach(a),Ln=r(g,` the GPU type, the system, the library versions, etc\u2026 are printed out in the third section under `),ce=t(g,"EM",{});var Xl=o(ce);zn=r(Xl,"ENVIRONMENT INFORMATION"),Xl.forEach(a),Un=r(g,". This information can optionally be saved in a "),he=t(g,"EM",{});var Gl=o(he);Hn=r(Gl,".csv"),Gl.forEach(a),Vn=r(g,` file when adding the argument `),me=t(g,"CODE",{});var Wl=o(me);Xn=r(Wl,"save_to_csv=True"),Wl.forEach(a),Gn=r(g," to "),ie=t(g,"CODE",{});var Yl=o(ie);Wn=r(Yl,"PyTorchBenchmarkArguments"),Yl.forEach(a),Yn=r(g,`and `),ue=t(g,"CODE",{});var Jl=o(ue);Jn=r(Jl,"TensorFlowBenchmarkArguments"),Jl.forEach(a),Qn=r(g,`respectively. In this case, every section is saved in a separate `),be=t(g,"EM",{});var Ql=o(be);Kn=r(Ql,".csv"),Ql.forEach(a),Zn=r(g," file. The path to each "),de=t(g,"EM",{});var Kl=o(de);sr=r(Kl,".csv"),Kl.forEach(a),er=r(g," file can optionally be defined via the argument data classes."),g.forEach(a),Je=m(e),P=t(e,"P",{});var ns=o(P);ar=r(ns,"Instead of benchmarking pre-trained models via their model identifier, "),fe=t(ns,"EM",{});var Zl=o(fe);nr=r(Zl,"e.g."),Zl.forEach(a),rr=m(ns),ge=t(ns,"CODE",{});var st=o(ge);lr=r(st,"bert-base-uncased"),st.forEach(a),tr=r(ns,`, the user can alternatively benchmark an arbitrary configuration of any available model class. In this case, a `),je=t(ns,"CODE",{});var et=o(je);or=r(et,"list"),et.forEach(a),pr=r(ns,` of configurations must be inserted with the benchmark args as follows.`),ns.forEach(a),Qe=m(e),M(ds.$$.fragment,e),Ke=m(e),w=t(e,"P",{});var R=o(w);cr=r(R,"Again, "),_e=t(R,"EM",{});var at=o(_e);hr=r(at,"inference time"),at.forEach(a),mr=r(R," and "),ye=t(R,"EM",{});var nt=o(ye);ir=r(nt,"required memory"),nt.forEach(a),ur=r(R," for "),ke=t(R,"EM",{});var rt=o(ke);br=r(rt,"inference"),rt.forEach(a),dr=r(R,` are measured, but this time for customized configurations of the `),ve=t(R,"CODE",{});var lt=o(ve);fr=r(lt,"BertModel"),lt.forEach(a),gr=r(R,` class. This feature can especially be helpful when deciding for which configuration the model should be trained.`),R.forEach(a),Ze=m(e),X=t(e,"H2",{class:!0});var ma=o(X);K=t(ma,"A",{id:!0,class:!0,href:!0});var tt=o(K);Ee=t(tt,"SPAN",{});var ot=o(Ee);M(fs.$$.fragment,ot),ot.forEach(a),tt.forEach(a),jr=m(ma),we=t(ma,"SPAN",{});var pt=o(we);_r=r(pt,"Benchmark best practices"),pt.forEach(a),ma.forEach(a),sa=m(e),ws=t(e,"P",{});var ct=o(ws);yr=r(ct,"This section lists a couple of best practices one should be aware of when benchmarking a model."),ct.forEach(a),ea=m(e),q=t(e,"UL",{});var $s=o(q);F=t($s,"LI",{});var rs=o(F);kr=r(rs,`Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user specifies on which device the code should be run by setting the `),Te=t(rs,"CODE",{});var ht=o(Te);vr=r(ht,"CUDA_VISIBLE_DEVICES"),ht.forEach(a),Er=r(rs,` environment variable in the shell, `),Be=t(rs,"EM",{});var mt=o(Be);wr=r(mt,"e.g."),mt.forEach(a),Tr=m(rs),$e=t(rs,"CODE",{});var it=o($e);Br=r(it,"export CUDA_VISIBLE_DEVICES=0"),it.forEach(a),$r=r(rs," before running the code."),rs.forEach(a),Ar=m($s),A=t($s,"LI",{});var L=o(A);Pr=r(L,"The option "),Ae=t(L,"CODE",{});var ut=o(Ae);Cr=r(ut,"no_multi_processing"),ut.forEach(a),Fr=r(L," should only be set to "),Pe=t(L,"CODE",{});var bt=o(Pe);Nr=r(bt,"True"),bt.forEach(a),Mr=r(L,` for testing and debugging. To ensure accurate memory measurement it is recommended to run each memory benchmark in a separate process by making sure `),Ce=t(L,"CODE",{});var dt=o(Ce);Ir=r(dt,"no_multi_processing"),dt.forEach(a),Or=r(L," is set to "),Fe=t(L,"CODE",{});var ft=o(Fe);Sr=r(ft,"True"),ft.forEach(a),Dr=r(L,"."),L.forEach(a),qr=m($s),Ne=t($s,"LI",{});var gt=o(Ne);xr=r(gt,`One should always state the environment information when sharing the results of a model benchmark. Results can vary heavily between different GPU devices, library versions, etc., so that benchmark results on their own are not very useful for the community.`),gt.forEach(a),$s.forEach(a),aa=m(e),G=t(e,"H2",{class:!0});var ia=o(G);Z=t(ia,"A",{id:!0,class:!0,href:!0});var jt=o(Z);Me=t(jt,"SPAN",{});var _t=o(Me);M(gs.$$.fragment,_t),_t.forEach(a),jt.forEach(a),Rr=m(ia),Ie=t(ia,"SPAN",{});var yt=o(Ie);Lr=r(yt,"Sharing your benchmark"),yt.forEach(a),ia.forEach(a),na=m(e),ss=t(e,"P",{});var ua=o(ss);zr=r(ua,"Previously all available core models (10 at the time) have been benchmarked for "),Oe=t(ua,"EM",{});var kt=o(Oe);Ur=r(kt,"inference time"),kt.forEach(a),Hr=r(ua,`, across many different settings: using PyTorch, with and without TorchScript, using TensorFlow, with and without XLA. All of those tests were done across CPUs (except for TensorFlow XLA) and GPUs.`),ua.forEach(a),ra=m(e),x=t(e,"P",{});var As=o(x);Vr=r(As,"The approach is detailed in the "),js=t(As,"A",{href:!0,rel:!0});var vt=o(js);Xr=r(vt,"following blogpost"),vt.forEach(a),Gr=r(As,` and the results are available `),_s=t(As,"A",{href:!0,rel:!0});var Et=o(_s);Wr=r(Et,"here"),Et.forEach(a),Yr=r(As,"."),As.forEach(a),la=m(e),es=t(e,"P",{});var ba=o(es);Jr=r(ba,"With the new "),Se=t(ba,"EM",{});var wt=o(Se);Qr=r(wt,"benchmark"),wt.forEach(a),Kr=r(ba," tools, it is easier than ever to share your benchmark results with the community"),ba.forEach(a),ta=m(e),as=t(e,"UL",{});var da=o(as);Ts=t(da,"LI",{});var rl=o(Ts);ys=t(rl,"A",{href:!0,rel:!0});var Tt=o(ys);Zr=r(Tt,"PyTorch Benchmarking Results"),Tt.forEach(a),sl=r(rl,"."),rl.forEach(a),el=m(da),Bs=t(da,"LI",{});var ll=o(Bs);ks=t(ll,"A",{href:!0,rel:!0});var Bt=o(ks);al=r(Bt,"TensorFlow Benchmarking Results"),Bt.forEach(a),nl=r(ll,"."),ll.forEach(a),da.forEach(a),this.h()},h(){b(d,"name","hf:doc:metadata"),b(d,"content",JSON.stringify(St)),b(_,"id","benchmarks"),b(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(_,"href","#benchmarks"),b(j,"class","relative group"),b(cs,"href","https://github.com/huggingface/notebooks/tree/master/examples/benchmark.ipynb"),b(cs,"rel","nofollow"),b(Y,"id","how-to-benchmark-transformers-models"),b(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Y,"href","#how-to-benchmark-transformers-models"),b(V,"class","relative group"),b(is,"href","https://huggingface.co/models"),b(is,"rel","nofollow"),b(K,"id","benchmark-best-practices"),b(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(K,"href","#benchmark-best-practices"),b(X,"class","relative group"),b(Z,"id","sharing-your-benchmark"),b(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Z,"href","#sharing-your-benchmark"),b(G,"class","relative group"),b(js,"href","https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2"),b(js,"rel","nofollow"),b(_s,"href","https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing"),b(_s,"rel","nofollow"),b(ys,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/benchmarking/README.md"),b(ys,"rel","nofollow"),b(ks,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow/benchmarking/README.md"),b(ks,"rel","nofollow")},m(e,p){s(document.head,d),c(e,E,p),c(e,j,p),s(j,_),s(_,z),I(B,z,null),s(j,ts),s(j,U),s(U,H),c(e,$,p),I(C,e,p),c(e,os,p),I(ps,e,p),c(e,qe,p),c(e,Es,p),s(Es,ja),c(e,xe,p),c(e,W,p),s(W,_a),s(W,cs),s(cs,ya),s(W,ka),c(e,Re,p),c(e,V,p),s(V,Y),s(Y,Ps),I(hs,Ps,null),s(V,va),s(V,Cs),s(Cs,Ea),c(e,Le,p),c(e,k,p),s(k,wa),s(k,Fs),s(Fs,Ta),s(k,Ba),s(k,Ns),s(Ns,$a),s(k,Aa),s(k,Ms),s(Ms,Pa),s(k,Ca),s(k,Is),s(Is,Fa),s(k,Na),s(k,Os),s(Os,Ma),s(k,Ia),s(k,Ss),s(Ss,Oa),s(k,Sa),c(e,ze,p),I(J,e,p),c(e,Ue,p),c(e,y,p),s(y,Da),s(y,Ds),s(Ds,qa),s(y,xa),s(y,qs),s(qs,Ra),s(y,La),s(y,xs),s(xs,za),s(y,Ua),s(y,Rs),s(Rs,Ha),s(y,Va),s(y,Ls),s(Ls,Xa),s(y,Ga),s(y,zs),s(zs,Wa),s(y,Ya),s(y,Us),s(Us,Ja),s(y,Qa),c(e,He,p),I(ms,e,p),c(e,Ve,p),c(e,i,p),s(i,Ka),s(i,Hs),s(Hs,Za),s(i,sn),s(i,Vs),s(Vs,en),s(i,an),s(i,Xs),s(Xs,nn),s(i,rn),s(i,Gs),s(Gs,ln),s(i,tn),s(i,Ws),s(Ws,on),s(i,pn),s(i,is),s(is,cn),s(i,hn),s(i,Ys),s(Ys,mn),s(i,un),s(i,Js),s(Js,bn),s(i,dn),s(i,Qs),s(Qs,fn),s(i,gn),s(i,Ks),s(Ks,jn),s(i,_n),s(i,Zs),s(Zs,yn),s(i,kn),s(i,se),s(se,vn),s(i,En),s(i,ee),s(ee,wn),s(i,Tn),c(e,Xe,p),I(us,e,p),c(e,Ge,p),c(e,Q,p),s(Q,Bn),s(Q,ae),s(ae,$n),s(Q,An),c(e,We,p),I(bs,e,p),c(e,Ye,p),c(e,u,p),s(u,Pn),s(u,ne),s(ne,Cn),s(u,Fn),s(u,re),s(re,Nn),s(u,Mn),s(u,le),s(le,In),s(u,On),s(u,te),s(te,Sn),s(u,Dn),s(u,oe),s(oe,qn),s(u,xn),s(u,pe),s(pe,Rn),s(u,Ln),s(u,ce),s(ce,zn),s(u,Un),s(u,he),s(he,Hn),s(u,Vn),s(u,me),s(me,Xn),s(u,Gn),s(u,ie),s(ie,Wn),s(u,Yn),s(u,ue),s(ue,Jn),s(u,Qn),s(u,be),s(be,Kn),s(u,Zn),s(u,de),s(de,sr),s(u,er),c(e,Je,p),c(e,P,p),s(P,ar),s(P,fe),s(fe,nr),s(P,rr),s(P,ge),s(ge,lr),s(P,tr),s(P,je),s(je,or),s(P,pr),c(e,Qe,p),I(ds,e,p),c(e,Ke,p),c(e,w,p),s(w,cr),s(w,_e),s(_e,hr),s(w,mr),s(w,ye),s(ye,ir),s(w,ur),s(w,ke),s(ke,br),s(w,dr),s(w,ve),s(ve,fr),s(w,gr),c(e,Ze,p),c(e,X,p),s(X,K),s(K,Ee),I(fs,Ee,null),s(X,jr),s(X,we),s(we,_r),c(e,sa,p),c(e,ws,p),s(ws,yr),c(e,ea,p),c(e,q,p),s(q,F),s(F,kr),s(F,Te),s(Te,vr),s(F,Er),s(F,Be),s(Be,wr),s(F,Tr),s(F,$e),s($e,Br),s(F,$r),s(q,Ar),s(q,A),s(A,Pr),s(A,Ae),s(Ae,Cr),s(A,Fr),s(A,Pe),s(Pe,Nr),s(A,Mr),s(A,Ce),s(Ce,Ir),s(A,Or),s(A,Fe),s(Fe,Sr),s(A,Dr),s(q,qr),s(q,Ne),s(Ne,xr),c(e,aa,p),c(e,G,p),s(G,Z),s(Z,Me),I(gs,Me,null),s(G,Rr),s(G,Ie),s(Ie,Lr),c(e,na,p),c(e,ss,p),s(ss,zr),s(ss,Oe),s(Oe,Ur),s(ss,Hr),c(e,ra,p),c(e,x,p),s(x,Vr),s(x,js),s(js,Xr),s(x,Gr),s(x,_s),s(_s,Wr),s(x,Yr),c(e,la,p),c(e,es,p),s(es,Jr),s(es,Se),s(Se,Qr),s(es,Kr),c(e,ta,p),c(e,as,p),s(as,Ts),s(Ts,ys),s(ys,Zr),s(Ts,sl),s(as,el),s(as,Bs),s(Bs,ks),s(ks,al),s(Bs,nl),oa=!0},p(e,[p]){const vs={};p&2&&(vs.$$scope={dirty:p,ctx:e}),C.$set(vs);const De={};p&2&&(De.$$scope={dirty:p,ctx:e}),J.$set(De)},i(e){oa||(O(B.$$.fragment,e),O(C.$$.fragment,e),O(ps.$$.fragment,e),O(hs.$$.fragment,e),O(J.$$.fragment,e),O(ms.$$.fragment,e),O(us.$$.fragment,e),O(bs.$$.fragment,e),O(ds.$$.fragment,e),O(fs.$$.fragment,e),O(gs.$$.fragment,e),oa=!0)},o(e){S(B.$$.fragment,e),S(C.$$.fragment,e),S(ps.$$.fragment,e),S(hs.$$.fragment,e),S(J.$$.fragment,e),S(ms.$$.fragment,e),S(us.$$.fragment,e),S(bs.$$.fragment,e),S(ds.$$.fragment,e),S(fs.$$.fragment,e),S(gs.$$.fragment,e),oa=!1},d(e){a(d),e&&a(E),e&&a(j),D(B),e&&a($),D(C,e),e&&a(os),D(ps,e),e&&a(qe),e&&a(Es),e&&a(xe),e&&a(W),e&&a(Re),e&&a(V),D(hs),e&&a(Le),e&&a(k),e&&a(ze),D(J,e),e&&a(Ue),e&&a(y),e&&a(He),D(ms,e),e&&a(Ve),e&&a(i),e&&a(Xe),D(us,e),e&&a(Ge),e&&a(Q),e&&a(We),D(bs,e),e&&a(Ye),e&&a(u),e&&a(Je),e&&a(P),e&&a(Qe),D(ds,e),e&&a(Ke),e&&a(w),e&&a(Ze),e&&a(X),D(fs),e&&a(sa),e&&a(ws),e&&a(ea),e&&a(q),e&&a(aa),e&&a(G),D(gs),e&&a(na),e&&a(ss),e&&a(ra),e&&a(x),e&&a(la),e&&a(es),e&&a(ta),e&&a(as)}}}const St={local:"benchmarks",sections:[{local:"how-to-benchmark-transformers-models",title:"How to benchmark \u{1F917} Transformers models"},{local:"benchmark-best-practices",title:"Benchmark best practices"},{local:"sharing-your-benchmark",title:"Sharing your benchmark"}],title:"Benchmarks"};function Dt(ls,d,E){let{fw:j}=d;return ls.$$set=_=>{"fw"in _&&E(0,j=_.fw)},[j]}class Ht extends At{constructor(d){super();Pt(this,d,Dt,Ot,Ct,{fw:0})}}export{Ht as default,St as metadata};
250
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/training.mdx-378a8a6f.js
import{S as Of,i as Bf,s as Hf,e as r,k as h,w as c,t as i,M as Yf,c as o,d as t,m,a as n,x as u,h as p,b as f,F as a,g as l,y as d,q as g,o as _,B as v}from"../chunks/vendor-4833417e.js";import{T as Bi}from"../chunks/Tip-fffd6df1.js";import{Y as Wo}from"../chunks/Youtube-27813aed.js";import{I as x}from"../chunks/IconCopyLink-4b81c553.js";import{C as y}from"../chunks/CodeBlock-6a3d1b46.js";import{D as Wf}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function Rf(Y){let $,k;return{c(){$=r("p"),k=i(`You will see a warning about some of the pretrained weights not being used and some weights being randomly initialized. Don\u2019t worry, this is completely normal! The pretrained head of the BERT model is discarded, and replaced with a randomly initialized classification head. You will fine-tune this new model head on your sequence classification task, transferring the knowledge of the pretrained model to it.`)},l(w){$=o(w,"P",{});var b=n($);k=p(b,`You will see a warning about some of the pretrained weights not being used and some weights being randomly initialized. Don\u2019t worry, this is completely normal! The pretrained head of the BERT model is discarded, and replaced with a randomly initialized classification head. You will fine-tune this new model head on your sequence classification task, transferring the knowledge of the pretrained model to it.`),b.forEach(t)},m(w,b){l(w,$,b),a($,k)},d(w){w&&t($)}}}function Gf(Y){let $,k,w,b,T,j,F;return{c(){$=r("p"),k=r("a"),w=i("Trainer"),b=i(" uses "),T=r("a"),j=i("DataCollatorWithPadding"),F=i(" by default so you don\u2019t need to explicitly specify a data collator."),this.h()},l(P){$=o(P,"P",{});var E=n($);k=o(E,"A",{href:!0});var A=n(k);w=p(A,"Trainer"),A.forEach(t),b=p(E," uses "),T=o(E,"A",{href:!0});var z=n(T);j=p(z,"DataCollatorWithPadding"),z.forEach(t),F=p(E," by default so you don\u2019t need to explicitly specify a data collator."),E.forEach(t),this.h()},h(){f(k,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(T,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding")},m(P,E){l(P,$,E),a($,k),a(k,w),a($,b),a($,T),a(T,j),a($,F)},d(P){P&&t($)}}}function Kf(Y){let $,k,w,b,T,j,F,P;return{c(){$=r("p"),k=i("Get free access to a cloud GPU if you don\u2019t have one with a hosted notebook like "),w=r("a"),b=i("Colaboratory"),T=i(" or "),j=r("a"),F=i("SageMaker StudioLab"),P=i("."),this.h()},l(E){$=o(E,"P",{});var A=n($);k=p(A,"Get free access to a cloud GPU if you don\u2019t have one with a hosted notebook like "),w=o(A,"A",{href:!0,rel:!0});var z=n(w);b=p(z,"Colaboratory"),z.forEach(t),T=p(A," or "),j=o(A,"A",{href:!0,rel:!0});var Le=n(j);F=p(Le,"SageMaker StudioLab"),Le.forEach(t),P=p(A,"."),A.forEach(t),this.h()},h(){f(w,"href","https://colab.research.google.com/"),f(w,"rel","nofollow"),f(j,"href","https://studiolab.sagemaker.aws/"),f(j,"rel","nofollow")},m(E,A){l(E,$,A),a($,k),a($,w),a(w,b),a($,T),a($,j),a(j,F),a($,P)},d(E){E&&t($)}}}function Uf(Y){let $,k,w,b,T,j,F,P,E,A,z,Le,Ut,Ro,Fs,I,Me,Go,Jt,Ko,Uo,Jo,Ca,Xo,Zo,Da,Qo,Ss,Xt,Ns,W,re,Fa,Oe,Vo,Sa,en,Is,Be,Ls,Zt,tn,Ms,oe,an,He,sn,rn,Os,Ye,Bs,ne,on,We,Na,nn,ln,Hs,Re,Ys,Qt,pn,Ws,Ge,Rs,Vt,Gs,R,le,Ia,Ke,fn,ea,hn,La,mn,Ks,Ue,Us,L,cn,ta,un,dn,aa,gn,_n,Js,ie,vn,Je,$n,wn,Xs,Xe,Zs,pe,Qs,G,fe,Ma,Ze,yn,Oa,bn,Vs,M,kn,sa,jn,En,Qe,Tn,An,er,ra,Pn,tr,Ve,ar,K,he,Ba,et,qn,Ha,xn,sr,q,oa,zn,Cn,na,Dn,Fn,tt,Ya,Sn,Nn,Wa,In,Ln,at,Mn,On,rr,st,or,C,Bn,Ra,Hn,Yn,Ga,Wn,Rn,Ka,Gn,Kn,nr,rt,lr,me,Un,Ua,Jn,Xn,ir,ot,pr,U,ce,Ja,nt,Zn,Xa,Qn,fr,ue,Vn,la,el,tl,hr,lt,mr,de,al,ia,sl,rl,cr,it,ur,pa,dr,J,ge,Za,pt,ol,Qa,nl,gr,ft,_r,fa,ll,vr,X,_e,Va,ht,il,es,pl,$r,O,fl,ha,hl,ml,ts,cl,ul,wr,mt,yr,ve,br,D,dl,ct,as,gl,_l,ss,vl,$l,rs,wl,yl,kr,ut,jr,Z,$e,os,dt,bl,ns,kl,Er,ma,jl,Tr,gt,Ar,we,El,_t,ls,Tl,Al,Pr,vt,qr,ca,xr,Q,ye,is,$t,Pl,ps,ql,zr,wt,Cr,yt,ua,xl,zl,Dr,da,Cl,Fr,bt,Sr,be,Dl,fs,Fl,Sl,Nr,B,kt,jt,Nl,hs,Il,Ll,Ml,Et,Ol,Tt,S,Bl,ms,Hl,Yl,cs,Wl,Rl,us,Gl,Kl,Ul,At,Jl,Pt,ds,Xl,Zl,qt,Ir,ga,Ql,Lr,xt,Mr,V,ke,gs,zt,Vl,_s,ei,Or,je,ti,vs,ai,si,Br,Ct,Hr,_a,ri,Yr,Dt,Wr,ee,Ee,$s,Ft,oi,ws,ni,Rr,Te,li,St,ys,ii,pi,Gr,Nt,Kr,Ae,fi,va,hi,mi,Ur,It,Jr,Pe,ci,bs,ui,di,Xr,Lt,Zr,qe,Qr,$a,gi,Vr,te,xe,ks,Mt,_i,js,vi,eo,ze,$i,Ot,wi,yi,to,Bt,ao,ae,Ce,Es,Ht,bi,Ts,ki,so,H,ji,wa,Ei,Ti,Yt,As,Ai,Pi,ro,Wt,oo,ya,no,se,De,Ps,Rt,qi,qs,xi,lo,ba,zi,io,Fe,xs,ka,Gt,Ci,Di,Fi,zs,ja,Ea,Si,Ni,po;return j=new x({}),z=new Wf({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/training.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/training.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/training.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/training.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/training.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/training.ipynb"}]}}),Oe=new x({}),Be=new Wo({props:{id:"_BZearw7f0w"}}),Ye=new y({props:{code:`from datasets import load_dataset dataset = load_dataset("yelp_review_full") dataset[100]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;yelp_review_full&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-number">100</span>] {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\\\nThe cashier took my friends\\&#x27;s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\\&#x27;s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\\\&quot;serving off their orders\\\\&quot; when they didn\\&#x27;t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\\\nThe manager was rude when giving me my order. She didn\\&#x27;t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\\\nI\\&#x27;ve eaten at various McDonalds restaurants for over 30 years. I\\&#x27;ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!&#x27;</span>}`}}),Re=new y({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") def tokenize_function(examples): return tokenizer(examples["text"], padding="max_length", truncation=True) tokenized_datasets = dataset.map(tokenize_function, batched=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], padding=<span class="hljs-string">&quot;max_length&quot;</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = dataset.<span class="hljs-built_in">map</span>(tokenize_function, batched=<span class="hljs-literal">True</span>)`}}),Ge=new y({props:{code:`small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>small_train_dataset = tokenized_datasets[<span class="hljs-string">&quot;train&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>small_eval_dataset = tokenized_datasets[<span class="hljs-string">&quot;test&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>))`}}),Ke=new x({}),Ue=new Wo({props:{id:"nvBXf7s7vTI"}}),Xe=new y({props:{code:`from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)`}}),pe=new Bi({props:{$$slots:{default:[Rf]},$$scope:{ctx:Y}}}),Ze=new x({}),Ve=new y({props:{code:`from transformers import TrainingArguments training_args = TrainingArguments(output_dir="test_trainer")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;test_trainer&quot;</span>)`}}),et=new x({}),st=new y({props:{code:`import numpy as np from datasets import load_metric metric = load_metric("accuracy")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_metric <span class="hljs-meta">&gt;&gt;&gt; </span>metric = load_metric(<span class="hljs-string">&quot;accuracy&quot;</span>)`}}),rt=new y({props:{code:`def compute_metrics(eval_pred): logits, labels = eval_pred predictions = np.argmax(logits, axis=-1) return metric.compute(predictions=predictions, references=labels)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_metrics</span>(<span class="hljs-params">eval_pred</span>): <span class="hljs-meta">... </span> logits, labels = eval_pred <span class="hljs-meta">... </span> predictions = np.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> metric.compute(predictions=predictions, references=labels)`}}),ot=new y({props:{code:`from transformers import TrainingArguments training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;test_trainer&quot;</span>, evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>)`}}),nt=new x({}),lt=new y({props:{code:`trainer = Trainer( model=model, args=training_args, train_dataset=small_train_dataset, eval_dataset=small_eval_dataset, compute_metrics=compute_metrics, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=small_train_dataset, <span class="hljs-meta">... </span> eval_dataset=small_eval_dataset, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)`}}),it=new y({props:{code:"trainer.train()",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()'}}),pt=new x({}),ft=new Wo({props:{id:"rnTGBy2ax1c"}}),ht=new x({}),mt=new y({props:{code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator(return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator(return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),ve=new Bi({props:{$$slots:{default:[Gf]},$$scope:{ctx:Y}}}),ut=new y({props:{code:`tf_train_dataset = small_train_dataset.to_tf_dataset( columns=["attention_mask", "input_ids", "token_type_ids"], label_cols=["labels"], shuffle=True, collate_fn=data_collator, batch_size=8, ) tf_validation_dataset = small_eval_dataset.to_tf_dataset( columns=["attention_mask", "input_ids", "token_type_ids"], label_cols=["labels"], shuffle=False, collate_fn=data_collator, batch_size=8, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_dataset = small_train_dataset.to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;token_type_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">8</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_dataset = small_eval_dataset.to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;token_type_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">8</span>, <span class="hljs-meta">... </span>)`}}),dt=new x({}),gt=new y({props:{code:`import tensorflow as tf from transformers import TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)`}}),vt=new y({props:{code:`model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=tf.metrics.SparseCategoricalAccuracy(), ) model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>( <span class="hljs-meta">... </span> optimizer=tf.keras.optimizers.Adam(learning_rate=<span class="hljs-number">5e-5</span>), <span class="hljs-meta">... </span> loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<span class="hljs-literal">True</span>), <span class="hljs-meta">... </span> metrics=tf.metrics.SparseCategoricalAccuracy(), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=<span class="hljs-number">3</span>)`}}),$t=new x({}),wt=new Wo({props:{id:"Dh9CL8fyG80"}}),bt=new y({props:{code:`del model del pytorch_model del trainer torch.cuda.empty_cache()`,highlighted:`<span class="hljs-keyword">del</span> model <span class="hljs-keyword">del</span> pytorch_model <span class="hljs-keyword">del</span> trainer torch.cuda.empty_cache()`}}),Et=new y({props:{code:'tokenized_datasets = tokenized_datasets.remove_columns(["text"])',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = tokenized_datasets.remove_columns([<span class="hljs-string">&quot;text&quot;</span>])'}}),At=new y({props:{code:'tokenized_datasets = tokenized_datasets.rename_column("label", "labels")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = tokenized_datasets.rename_column(<span class="hljs-string">&quot;label&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>)'}}),qt=new y({props:{code:'tokenized_datasets.set_format("torch")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets.set_format(<span class="hljs-string">&quot;torch&quot;</span>)'}}),xt=new y({props:{code:`small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>small_train_dataset = tokenized_datasets[<span class="hljs-string">&quot;train&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>small_eval_dataset = tokenized_datasets[<span class="hljs-string">&quot;test&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>))`}}),zt=new x({}),Ct=new y({props:{code:`from torch.utils.data import DataLoader train_dataloader = DataLoader(small_train_dataset, shuffle=True, batch_size=8) eval_dataloader = DataLoader(small_eval_dataset, batch_size=8)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch.utils.data <span class="hljs-keyword">import</span> DataLoader <span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader = DataLoader(small_train_dataset, shuffle=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">8</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>eval_dataloader = DataLoader(small_eval_dataset, batch_size=<span class="hljs-number">8</span>)`}}),Dt=new y({props:{code:`from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)`}}),Ft=new x({}),Nt=new y({props:{code:`from torch.optim import AdamW optimizer = AdamW(model.parameters(), lr=5e-5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch.optim <span class="hljs-keyword">import</span> AdamW <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamW(model.parameters(), lr=<span class="hljs-number">5e-5</span>)`}}),It=new y({props:{code:`from transformers import get_scheduler num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( name="linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> get_scheduler <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_training_steps = num_epochs * <span class="hljs-built_in">len</span>(train_dataloader) <span class="hljs-meta">&gt;&gt;&gt; </span>lr_scheduler = get_scheduler( <span class="hljs-meta">... </span> name=<span class="hljs-string">&quot;linear&quot;</span>, optimizer=optimizer, num_warmup_steps=<span class="hljs-number">0</span>, num_training_steps=num_training_steps <span class="hljs-meta">... </span>)`}}),Lt=new y({props:{code:`import torch device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model.to(device)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>device = torch.device(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> torch.device(<span class="hljs-string">&quot;cpu&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.to(device)`}}),qe=new Bi({props:{$$slots:{default:[Kf]},$$scope:{ctx:Y}}}),Mt=new x({}),Bt=new y({props:{code:`from tqdm.auto import tqdm progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm <span class="hljs-meta">&gt;&gt;&gt; </span>progress_bar = tqdm(<span class="hljs-built_in">range</span>(num_training_steps)) <span class="hljs-meta">&gt;&gt;&gt; </span>model.train() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> batch = {k: v.to(device) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> loss.backward() <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)`}}),Ht=new x({}),Wt=new y({props:{code:`metric = load_metric("accuracy") model.eval() for batch in eval_dataloader: batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) metric.add_batch(predictions=predictions, references=batch["labels"]) metric.compute()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>metric = load_metric(<span class="hljs-string">&quot;accuracy&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">eval</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> eval_dataloader: <span class="hljs-meta">... </span> batch = {k: v.to(device) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> logits = outputs.logits <span class="hljs-meta">... </span> predictions = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> metric.add_batch(predictions=predictions, references=batch[<span class="hljs-string">&quot;labels&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>metric.compute()`}}),Rt=new x({}),{c(){$=r("meta"),k=h(),w=r("h1"),b=r("a"),T=r("span"),c(j.$$.fragment),F=h(),P=r("span"),E=i("Fine-tune a pretrained model"),A=h(),c(z.$$.fragment),Le=h(),Ut=r("p"),Ro=i("There are significant benefits to using a pretrained model. It reduces computation costs, your carbon footprint, and allows you to use state-of-the-art models without having to train one from scratch. \u{1F917} Transformers provides access to thousands of pretrained models for a wide range of tasks. When you use a pretrained model, you train it on a dataset specific to your task. This is known as fine-tuning, an incredibly powerful training technique. In this tutorial, you will fine-tune a pretrained model with a deep learning framework of your choice:"),Fs=h(),I=r("ul"),Me=r("li"),Go=i("Fine-tune a pretrained model with \u{1F917} Transformers "),Jt=r("a"),Ko=i("Trainer"),Uo=i("."),Jo=h(),Ca=r("li"),Xo=i("Fine-tune a pretrained model in TensorFlow with Keras."),Zo=h(),Da=r("li"),Qo=i("Fine-tune a pretrained model in native PyTorch."),Ss=h(),Xt=r("a"),Ns=h(),W=r("h2"),re=r("a"),Fa=r("span"),c(Oe.$$.fragment),Vo=h(),Sa=r("span"),en=i("Prepare a dataset"),Is=h(),c(Be.$$.fragment),Ls=h(),Zt=r("p"),tn=i("Before you can fine-tune a pretrained model, download a dataset and prepare it for training. The previous tutorial showed you how to process data for training, and now you get an opportunity to put those skills to the test!"),Ms=h(),oe=r("p"),an=i("Begin by loading the "),He=r("a"),sn=i("Yelp Reviews"),rn=i(" dataset:"),Os=h(),c(Ye.$$.fragment),Bs=h(),ne=r("p"),on=i("As you now know, you need a tokenizer to process the text and include a padding and truncation strategy to handle any variable sequence lengths. To process your dataset in one step, use \u{1F917} Datasets "),We=r("a"),Na=r("code"),nn=i("map"),ln=i(" method to apply a preprocessing function over the entire dataset:"),Hs=h(),c(Re.$$.fragment),Ys=h(),Qt=r("p"),pn=i("If you like, you can create a smaller subset of the full dataset to fine-tune on to reduce the time it takes:"),Ws=h(),c(Ge.$$.fragment),Rs=h(),Vt=r("a"),Gs=h(),R=r("h2"),le=r("a"),Ia=r("span"),c(Ke.$$.fragment),fn=h(),ea=r("span"),hn=i("Fine-tune with "),La=r("code"),mn=i("Trainer"),Ks=h(),c(Ue.$$.fragment),Us=h(),L=r("p"),cn=i("\u{1F917} Transformers provides a "),ta=r("a"),un=i("Trainer"),dn=i(" class optimized for training \u{1F917} Transformers models, making it easier to start training without manually writing your own training loop. The "),aa=r("a"),gn=i("Trainer"),_n=i(" API supports a wide range of training options and features such as logging, gradient accumulation, and mixed precision."),Js=h(),ie=r("p"),vn=i("Start by loading your model and specify the number of expected labels. From the Yelp Review "),Je=r("a"),$n=i("dataset card"),wn=i(", you know there are five labels:"),Xs=h(),c(Xe.$$.fragment),Zs=h(),c(pe.$$.fragment),Qs=h(),G=r("h3"),fe=r("a"),Ma=r("span"),c(Ze.$$.fragment),yn=h(),Oa=r("span"),bn=i("Training hyperparameters"),Vs=h(),M=r("p"),kn=i("Next, create a "),sa=r("a"),jn=i("TrainingArguments"),En=i(" class which contains all the hyperparameters you can tune as well as flags for activating different training options. For this tutorial you can start with the default training "),Qe=r("a"),Tn=i("hyperparameters"),An=i(", but feel free to experiment with these to find your optimal settings."),er=h(),ra=r("p"),Pn=i("Specify where to save the checkpoints from your training:"),tr=h(),c(Ve.$$.fragment),ar=h(),K=r("h3"),he=r("a"),Ba=r("span"),c(et.$$.fragment),qn=h(),Ha=r("span"),xn=i("Metrics"),sr=h(),q=r("p"),oa=r("a"),zn=i("Trainer"),Cn=i(" does not automatically evaluate model performance during training. You will need to pass "),na=r("a"),Dn=i("Trainer"),Fn=i(" a function to compute and report metrics. The \u{1F917} Datasets library provides a simple "),tt=r("a"),Ya=r("code"),Sn=i("accuracy"),Nn=i(" function you can load with the "),Wa=r("code"),In=i("load_metric"),Ln=i(" (see this "),at=r("a"),Mn=i("tutorial"),On=i(" for more information) function:"),rr=h(),c(st.$$.fragment),or=h(),C=r("p"),Bn=i("Call "),Ra=r("code"),Hn=i("compute"),Yn=i(" on "),Ga=r("code"),Wn=i("metric"),Rn=i(" to calculate the accuracy of your predictions. Before passing your predictions to "),Ka=r("code"),Gn=i("compute"),Kn=i(", you need to convert the predictions to logits (remember all \u{1F917} Transformers models return logits):"),nr=h(),c(rt.$$.fragment),lr=h(),me=r("p"),Un=i("If you\u2019d like to monitor your evaluation metrics during fine-tuning, specify the "),Ua=r("code"),Jn=i("evaluation_strategy"),Xn=i(" parameter in your training arguments to report the evaluation metric at the end of each epoch:"),ir=h(),c(ot.$$.fragment),pr=h(),U=r("h3"),ce=r("a"),Ja=r("span"),c(nt.$$.fragment),Zn=h(),Xa=r("span"),Qn=i("Trainer"),fr=h(),ue=r("p"),Vn=i("Create a "),la=r("a"),el=i("Trainer"),tl=i(" object with your model, training arguments, training and test datasets, and evaluation function:"),hr=h(),c(lt.$$.fragment),mr=h(),de=r("p"),al=i("Then fine-tune your model by calling "),ia=r("a"),sl=i("train()"),rl=i(":"),cr=h(),c(it.$$.fragment),ur=h(),pa=r("a"),dr=h(),J=r("h2"),ge=r("a"),Za=r("span"),c(pt.$$.fragment),ol=h(),Qa=r("span"),nl=i("Fine-tune with Keras"),gr=h(),c(ft.$$.fragment),_r=h(),fa=r("p"),ll=i("\u{1F917} Transformers models also supports training in TensorFlow with the Keras API. You only need to make a few changes before you can fine-tune."),vr=h(),X=r("h3"),_e=r("a"),Va=r("span"),c(ht.$$.fragment),il=h(),es=r("span"),pl=i("Convert dataset to TensorFlow format"),$r=h(),O=r("p"),fl=i("The "),ha=r("a"),hl=i("DefaultDataCollator"),ml=i(" assembles tensors into a batch for the model to train on. Make sure you specify "),ts=r("code"),cl=i("return_tensors"),ul=i(" to return TensorFlow tensors:"),wr=h(),c(mt.$$.fragment),yr=h(),c(ve.$$.fragment),br=h(),D=r("p"),dl=i("Next, convert the tokenized datasets to TensorFlow datasets with the "),ct=r("a"),as=r("code"),gl=i("to_tf_dataset"),_l=i(" method. Specify your inputs in "),ss=r("code"),vl=i("columns"),$l=i(", and your label in "),rs=r("code"),wl=i("label_cols"),yl=i(":"),kr=h(),c(ut.$$.fragment),jr=h(),Z=r("h3"),$e=r("a"),os=r("span"),c(dt.$$.fragment),bl=h(),ns=r("span"),kl=i("Compile and fit"),Er=h(),ma=r("p"),jl=i("Load a TensorFlow model with the expected number of labels:"),Tr=h(),c(gt.$$.fragment),Ar=h(),we=r("p"),El=i("Then compile and fine-tune your model with "),_t=r("a"),ls=r("code"),Tl=i("fit"),Al=i(" as you would with any other Keras model:"),Pr=h(),c(vt.$$.fragment),qr=h(),ca=r("a"),xr=h(),Q=r("h2"),ye=r("a"),is=r("span"),c($t.$$.fragment),Pl=h(),ps=r("span"),ql=i("Fine-tune in native PyTorch"),zr=h(),c(wt.$$.fragment),Cr=h(),yt=r("p"),ua=r("a"),xl=i("Trainer"),zl=i(" takes care of the training loop and allows you to fine-tune a model in a single line of code. For users who prefer to write their own training loop, you can also fine-tune a \u{1F917} Transformers model in native PyTorch."),Dr=h(),da=r("p"),Cl=i("At this point, you may need to restart your notebook or execute the following code to free some memory:"),Fr=h(),c(bt.$$.fragment),Sr=h(),be=r("p"),Dl=i("Next, manually postprocess "),fs=r("code"),Fl=i("tokenized_dataset"),Sl=i(" to prepare it for training."),Nr=h(),B=r("ol"),kt=r("li"),jt=r("p"),Nl=i("Remove the "),hs=r("code"),Il=i("text"),Ll=i(" column because the model does not accept raw text as an input:"),Ml=h(),c(Et.$$.fragment),Ol=h(),Tt=r("li"),S=r("p"),Bl=i("Rename the "),ms=r("code"),Hl=i("label"),Yl=i(" column to "),cs=r("code"),Wl=i("labels"),Rl=i(" because the model expects the argument to be named "),us=r("code"),Gl=i("labels"),Kl=i(":"),Ul=h(),c(At.$$.fragment),Jl=h(),Pt=r("li"),ds=r("p"),Xl=i("Set the format of the dataset to return PyTorch tensors instead of lists:"),Zl=h(),c(qt.$$.fragment),Ir=h(),ga=r("p"),Ql=i("Then create a smaller subset of the dataset as previously shown to speed up the fine-tuning:"),Lr=h(),c(xt.$$.fragment),Mr=h(),V=r("h3"),ke=r("a"),gs=r("span"),c(zt.$$.fragment),Vl=h(),_s=r("span"),ei=i("DataLoader"),Or=h(),je=r("p"),ti=i("Create a "),vs=r("code"),ai=i("DataLoader"),si=i(" for your training and test datasets so you can iterate over batches of data:"),Br=h(),c(Ct.$$.fragment),Hr=h(),_a=r("p"),ri=i("Load your model with the number of expected labels:"),Yr=h(),c(Dt.$$.fragment),Wr=h(),ee=r("h3"),Ee=r("a"),$s=r("span"),c(Ft.$$.fragment),oi=h(),ws=r("span"),ni=i("Optimizer and learning rate scheduler"),Rr=h(),Te=r("p"),li=i("Create an optimizer and learning rate scheduler to fine-tune the model. Let\u2019s use the "),St=r("a"),ys=r("code"),ii=i("AdamW"),pi=i(" optimizer from PyTorch:"),Gr=h(),c(Nt.$$.fragment),Kr=h(),Ae=r("p"),fi=i("Create the default learning rate scheduler from "),va=r("a"),hi=i("Trainer"),mi=i(":"),Ur=h(),c(It.$$.fragment),Jr=h(),Pe=r("p"),ci=i("Lastly, specify "),bs=r("code"),ui=i("device"),di=i(" to use a GPU if you have access to one. Otherwise, training on a CPU may take several hours instead of a couple of minutes."),Xr=h(),c(Lt.$$.fragment),Zr=h(),c(qe.$$.fragment),Qr=h(),$a=r("p"),gi=i("Great, now you are ready to train! \u{1F973}"),Vr=h(),te=r("h3"),xe=r("a"),ks=r("span"),c(Mt.$$.fragment),_i=h(),js=r("span"),vi=i("Training loop"),eo=h(),ze=r("p"),$i=i("To keep track of your training progress, use the "),Ot=r("a"),wi=i("tqdm"),yi=i(" library to add a progress bar over the number of training steps:"),to=h(),c(Bt.$$.fragment),ao=h(),ae=r("h3"),Ce=r("a"),Es=r("span"),c(Ht.$$.fragment),bi=h(),Ts=r("span"),ki=i("Metrics"),so=h(),H=r("p"),ji=i("Just like how you need to add an evaluation function to "),wa=r("a"),Ei=i("Trainer"),Ti=i(", you need to do the same when you write your own training loop. But instead of calculating and reporting the metric at the end of each epoch, this time you will accumulate all the batches with "),Yt=r("a"),As=r("code"),Ai=i("add_batch"),Pi=i(" and calculate the metric at the very end."),ro=h(),c(Wt.$$.fragment),oo=h(),ya=r("a"),no=h(),se=r("h2"),De=r("a"),Ps=r("span"),c(Rt.$$.fragment),qi=h(),qs=r("span"),xi=i("Additional resources"),lo=h(),ba=r("p"),zi=i("For more fine-tuning examples, refer to:"),io=h(),Fe=r("ul"),xs=r("li"),ka=r("p"),Gt=r("a"),Ci=i("\u{1F917} Transformers Examples"),Di=i(` includes scripts to train common NLP tasks in PyTorch and TensorFlow.`),Fi=h(),zs=r("li"),ja=r("p"),Ea=r("a"),Si=i("\u{1F917} Transformers Notebooks"),Ni=i(" contains various notebooks on how to fine-tune a model for specific tasks in PyTorch and TensorFlow."),this.h()},l(e){const s=Yf('[data-svelte="svelte-1phssyn"]',document.head);$=o(s,"META",{name:!0,content:!0}),s.forEach(t),k=m(e),w=o(e,"H1",{class:!0});var Kt=n(w);b=o(Kt,"A",{id:!0,class:!0,href:!0});var Cs=n(b);T=o(Cs,"SPAN",{});var Ds=n(T);u(j.$$.fragment,Ds),Ds.forEach(t),Cs.forEach(t),F=m(Kt),P=o(Kt,"SPAN",{});var Hi=n(P);E=p(Hi,"Fine-tune a pretrained model"),Hi.forEach(t),Kt.forEach(t),A=m(e),u(z.$$.fragment,e),Le=m(e),Ut=o(e,"P",{});var Yi=n(Ut);Ro=p(Yi,"There are significant benefits to using a pretrained model. It reduces computation costs, your carbon footprint, and allows you to use state-of-the-art models without having to train one from scratch. \u{1F917} Transformers provides access to thousands of pretrained models for a wide range of tasks. When you use a pretrained model, you train it on a dataset specific to your task. This is known as fine-tuning, an incredibly powerful training technique. In this tutorial, you will fine-tune a pretrained model with a deep learning framework of your choice:"),Yi.forEach(t),Fs=m(e),I=o(e,"UL",{});var Ta=n(I);Me=o(Ta,"LI",{});var fo=n(Me);Go=p(fo,"Fine-tune a pretrained model with \u{1F917} Transformers "),Jt=o(fo,"A",{href:!0});var Wi=n(Jt);Ko=p(Wi,"Trainer"),Wi.forEach(t),Uo=p(fo,"."),fo.forEach(t),Jo=m(Ta),Ca=o(Ta,"LI",{});var Ri=n(Ca);Xo=p(Ri,"Fine-tune a pretrained model in TensorFlow with Keras."),Ri.forEach(t),Zo=m(Ta),Da=o(Ta,"LI",{});var Gi=n(Da);Qo=p(Gi,"Fine-tune a pretrained model in native PyTorch."),Gi.forEach(t),Ta.forEach(t),Ss=m(e),Xt=o(e,"A",{id:!0}),n(Xt).forEach(t),Ns=m(e),W=o(e,"H2",{class:!0});var ho=n(W);re=o(ho,"A",{id:!0,class:!0,href:!0});var Ki=n(re);Fa=o(Ki,"SPAN",{});var Ui=n(Fa);u(Oe.$$.fragment,Ui),Ui.forEach(t),Ki.forEach(t),Vo=m(ho),Sa=o(ho,"SPAN",{});var Ji=n(Sa);en=p(Ji,"Prepare a dataset"),Ji.forEach(t),ho.forEach(t),Is=m(e),u(Be.$$.fragment,e),Ls=m(e),Zt=o(e,"P",{});var Xi=n(Zt);tn=p(Xi,"Before you can fine-tune a pretrained model, download a dataset and prepare it for training. The previous tutorial showed you how to process data for training, and now you get an opportunity to put those skills to the test!"),Xi.forEach(t),Ms=m(e),oe=o(e,"P",{});var mo=n(oe);an=p(mo,"Begin by loading the "),He=o(mo,"A",{href:!0,rel:!0});var Zi=n(He);sn=p(Zi,"Yelp Reviews"),Zi.forEach(t),rn=p(mo," dataset:"),mo.forEach(t),Os=m(e),u(Ye.$$.fragment,e),Bs=m(e),ne=o(e,"P",{});var co=n(ne);on=p(co,"As you now know, you need a tokenizer to process the text and include a padding and truncation strategy to handle any variable sequence lengths. To process your dataset in one step, use \u{1F917} Datasets "),We=o(co,"A",{href:!0,rel:!0});var Qi=n(We);Na=o(Qi,"CODE",{});var Vi=n(Na);nn=p(Vi,"map"),Vi.forEach(t),Qi.forEach(t),ln=p(co," method to apply a preprocessing function over the entire dataset:"),co.forEach(t),Hs=m(e),u(Re.$$.fragment,e),Ys=m(e),Qt=o(e,"P",{});var ep=n(Qt);pn=p(ep,"If you like, you can create a smaller subset of the full dataset to fine-tune on to reduce the time it takes:"),ep.forEach(t),Ws=m(e),u(Ge.$$.fragment,e),Rs=m(e),Vt=o(e,"A",{id:!0}),n(Vt).forEach(t),Gs=m(e),R=o(e,"H2",{class:!0});var uo=n(R);le=o(uo,"A",{id:!0,class:!0,href:!0});var tp=n(le);Ia=o(tp,"SPAN",{});var ap=n(Ia);u(Ke.$$.fragment,ap),ap.forEach(t),tp.forEach(t),fn=m(uo),ea=o(uo,"SPAN",{});var Ii=n(ea);hn=p(Ii,"Fine-tune with "),La=o(Ii,"CODE",{});var sp=n(La);mn=p(sp,"Trainer"),sp.forEach(t),Ii.forEach(t),uo.forEach(t),Ks=m(e),u(Ue.$$.fragment,e),Us=m(e),L=o(e,"P",{});var Aa=n(L);cn=p(Aa,"\u{1F917} Transformers provides a "),ta=o(Aa,"A",{href:!0});var rp=n(ta);un=p(rp,"Trainer"),rp.forEach(t),dn=p(Aa," class optimized for training \u{1F917} Transformers models, making it easier to start training without manually writing your own training loop. The "),aa=o(Aa,"A",{href:!0});var op=n(aa);gn=p(op,"Trainer"),op.forEach(t),_n=p(Aa," API supports a wide range of training options and features such as logging, gradient accumulation, and mixed precision."),Aa.forEach(t),Js=m(e),ie=o(e,"P",{});var go=n(ie);vn=p(go,"Start by loading your model and specify the number of expected labels. From the Yelp Review "),Je=o(go,"A",{href:!0,rel:!0});var np=n(Je);$n=p(np,"dataset card"),np.forEach(t),wn=p(go,", you know there are five labels:"),go.forEach(t),Xs=m(e),u(Xe.$$.fragment,e),Zs=m(e),u(pe.$$.fragment,e),Qs=m(e),G=o(e,"H3",{class:!0});var _o=n(G);fe=o(_o,"A",{id:!0,class:!0,href:!0});var lp=n(fe);Ma=o(lp,"SPAN",{});var ip=n(Ma);u(Ze.$$.fragment,ip),ip.forEach(t),lp.forEach(t),yn=m(_o),Oa=o(_o,"SPAN",{});var pp=n(Oa);bn=p(pp,"Training hyperparameters"),pp.forEach(t),_o.forEach(t),Vs=m(e),M=o(e,"P",{});var Pa=n(M);kn=p(Pa,"Next, create a "),sa=o(Pa,"A",{href:!0});var fp=n(sa);jn=p(fp,"TrainingArguments"),fp.forEach(t),En=p(Pa," class which contains all the hyperparameters you can tune as well as flags for activating different training options. For this tutorial you can start with the default training "),Qe=o(Pa,"A",{href:!0,rel:!0});var hp=n(Qe);Tn=p(hp,"hyperparameters"),hp.forEach(t),An=p(Pa,", but feel free to experiment with these to find your optimal settings."),Pa.forEach(t),er=m(e),ra=o(e,"P",{});var mp=n(ra);Pn=p(mp,"Specify where to save the checkpoints from your training:"),mp.forEach(t),tr=m(e),u(Ve.$$.fragment,e),ar=m(e),K=o(e,"H3",{class:!0});var vo=n(K);he=o(vo,"A",{id:!0,class:!0,href:!0});var cp=n(he);Ba=o(cp,"SPAN",{});var up=n(Ba);u(et.$$.fragment,up),up.forEach(t),cp.forEach(t),qn=m(vo),Ha=o(vo,"SPAN",{});var dp=n(Ha);xn=p(dp,"Metrics"),dp.forEach(t),vo.forEach(t),sr=m(e),q=o(e,"P",{});var N=n(q);oa=o(N,"A",{href:!0});var gp=n(oa);zn=p(gp,"Trainer"),gp.forEach(t),Cn=p(N," does not automatically evaluate model performance during training. You will need to pass "),na=o(N,"A",{href:!0});var _p=n(na);Dn=p(_p,"Trainer"),_p.forEach(t),Fn=p(N," a function to compute and report metrics. The \u{1F917} Datasets library provides a simple "),tt=o(N,"A",{href:!0,rel:!0});var vp=n(tt);Ya=o(vp,"CODE",{});var $p=n(Ya);Sn=p($p,"accuracy"),$p.forEach(t),vp.forEach(t),Nn=p(N," function you can load with the "),Wa=o(N,"CODE",{});var wp=n(Wa);In=p(wp,"load_metric"),wp.forEach(t),Ln=p(N," (see this "),at=o(N,"A",{href:!0,rel:!0});var yp=n(at);Mn=p(yp,"tutorial"),yp.forEach(t),On=p(N," for more information) function:"),N.forEach(t),rr=m(e),u(st.$$.fragment,e),or=m(e),C=o(e,"P",{});var Se=n(C);Bn=p(Se,"Call "),Ra=o(Se,"CODE",{});var bp=n(Ra);Hn=p(bp,"compute"),bp.forEach(t),Yn=p(Se," on "),Ga=o(Se,"CODE",{});var kp=n(Ga);Wn=p(kp,"metric"),kp.forEach(t),Rn=p(Se," to calculate the accuracy of your predictions. Before passing your predictions to "),Ka=o(Se,"CODE",{});var jp=n(Ka);Gn=p(jp,"compute"),jp.forEach(t),Kn=p(Se,", you need to convert the predictions to logits (remember all \u{1F917} Transformers models return logits):"),Se.forEach(t),nr=m(e),u(rt.$$.fragment,e),lr=m(e),me=o(e,"P",{});var $o=n(me);Un=p($o,"If you\u2019d like to monitor your evaluation metrics during fine-tuning, specify the "),Ua=o($o,"CODE",{});var Ep=n(Ua);Jn=p(Ep,"evaluation_strategy"),Ep.forEach(t),Xn=p($o," parameter in your training arguments to report the evaluation metric at the end of each epoch:"),$o.forEach(t),ir=m(e),u(ot.$$.fragment,e),pr=m(e),U=o(e,"H3",{class:!0});var wo=n(U);ce=o(wo,"A",{id:!0,class:!0,href:!0});var Tp=n(ce);Ja=o(Tp,"SPAN",{});var Ap=n(Ja);u(nt.$$.fragment,Ap),Ap.forEach(t),Tp.forEach(t),Zn=m(wo),Xa=o(wo,"SPAN",{});var Pp=n(Xa);Qn=p(Pp,"Trainer"),Pp.forEach(t),wo.forEach(t),fr=m(e),ue=o(e,"P",{});var yo=n(ue);Vn=p(yo,"Create a "),la=o(yo,"A",{href:!0});var qp=n(la);el=p(qp,"Trainer"),qp.forEach(t),tl=p(yo," object with your model, training arguments, training and test datasets, and evaluation function:"),yo.forEach(t),hr=m(e),u(lt.$$.fragment,e),mr=m(e),de=o(e,"P",{});var bo=n(de);al=p(bo,"Then fine-tune your model by calling "),ia=o(bo,"A",{href:!0});var xp=n(ia);sl=p(xp,"train()"),xp.forEach(t),rl=p(bo,":"),bo.forEach(t),cr=m(e),u(it.$$.fragment,e),ur=m(e),pa=o(e,"A",{id:!0}),n(pa).forEach(t),dr=m(e),J=o(e,"H2",{class:!0});var ko=n(J);ge=o(ko,"A",{id:!0,class:!0,href:!0});var zp=n(ge);Za=o(zp,"SPAN",{});var Cp=n(Za);u(pt.$$.fragment,Cp),Cp.forEach(t),zp.forEach(t),ol=m(ko),Qa=o(ko,"SPAN",{});var Dp=n(Qa);nl=p(Dp,"Fine-tune with Keras"),Dp.forEach(t),ko.forEach(t),gr=m(e),u(ft.$$.fragment,e),_r=m(e),fa=o(e,"P",{});var Fp=n(fa);ll=p(Fp,"\u{1F917} Transformers models also supports training in TensorFlow with the Keras API. You only need to make a few changes before you can fine-tune."),Fp.forEach(t),vr=m(e),X=o(e,"H3",{class:!0});var jo=n(X);_e=o(jo,"A",{id:!0,class:!0,href:!0});var Sp=n(_e);Va=o(Sp,"SPAN",{});var Np=n(Va);u(ht.$$.fragment,Np),Np.forEach(t),Sp.forEach(t),il=m(jo),es=o(jo,"SPAN",{});var Ip=n(es);pl=p(Ip,"Convert dataset to TensorFlow format"),Ip.forEach(t),jo.forEach(t),$r=m(e),O=o(e,"P",{});var qa=n(O);fl=p(qa,"The "),ha=o(qa,"A",{href:!0});var Lp=n(ha);hl=p(Lp,"DefaultDataCollator"),Lp.forEach(t),ml=p(qa," assembles tensors into a batch for the model to train on. Make sure you specify "),ts=o(qa,"CODE",{});var Mp=n(ts);cl=p(Mp,"return_tensors"),Mp.forEach(t),ul=p(qa," to return TensorFlow tensors:"),qa.forEach(t),wr=m(e),u(mt.$$.fragment,e),yr=m(e),u(ve.$$.fragment,e),br=m(e),D=o(e,"P",{});var Ne=n(D);dl=p(Ne,"Next, convert the tokenized datasets to TensorFlow datasets with the "),ct=o(Ne,"A",{href:!0,rel:!0});var Op=n(ct);as=o(Op,"CODE",{});var Bp=n(as);gl=p(Bp,"to_tf_dataset"),Bp.forEach(t),Op.forEach(t),_l=p(Ne," method. Specify your inputs in "),ss=o(Ne,"CODE",{});var Hp=n(ss);vl=p(Hp,"columns"),Hp.forEach(t),$l=p(Ne,", and your label in "),rs=o(Ne,"CODE",{});var Yp=n(rs);wl=p(Yp,"label_cols"),Yp.forEach(t),yl=p(Ne,":"),Ne.forEach(t),kr=m(e),u(ut.$$.fragment,e),jr=m(e),Z=o(e,"H3",{class:!0});var Eo=n(Z);$e=o(Eo,"A",{id:!0,class:!0,href:!0});var Wp=n($e);os=o(Wp,"SPAN",{});var Rp=n(os);u(dt.$$.fragment,Rp),Rp.forEach(t),Wp.forEach(t),bl=m(Eo),ns=o(Eo,"SPAN",{});var Gp=n(ns);kl=p(Gp,"Compile and fit"),Gp.forEach(t),Eo.forEach(t),Er=m(e),ma=o(e,"P",{});var Kp=n(ma);jl=p(Kp,"Load a TensorFlow model with the expected number of labels:"),Kp.forEach(t),Tr=m(e),u(gt.$$.fragment,e),Ar=m(e),we=o(e,"P",{});var To=n(we);El=p(To,"Then compile and fine-tune your model with "),_t=o(To,"A",{href:!0,rel:!0});var Up=n(_t);ls=o(Up,"CODE",{});var Jp=n(ls);Tl=p(Jp,"fit"),Jp.forEach(t),Up.forEach(t),Al=p(To," as you would with any other Keras model:"),To.forEach(t),Pr=m(e),u(vt.$$.fragment,e),qr=m(e),ca=o(e,"A",{id:!0}),n(ca).forEach(t),xr=m(e),Q=o(e,"H2",{class:!0});var Ao=n(Q);ye=o(Ao,"A",{id:!0,class:!0,href:!0});var Xp=n(ye);is=o(Xp,"SPAN",{});var Zp=n(is);u($t.$$.fragment,Zp),Zp.forEach(t),Xp.forEach(t),Pl=m(Ao),ps=o(Ao,"SPAN",{});var Qp=n(ps);ql=p(Qp,"Fine-tune in native PyTorch"),Qp.forEach(t),Ao.forEach(t),zr=m(e),u(wt.$$.fragment,e),Cr=m(e),yt=o(e,"P",{});var Li=n(yt);ua=o(Li,"A",{href:!0});var Vp=n(ua);xl=p(Vp,"Trainer"),Vp.forEach(t),zl=p(Li," takes care of the training loop and allows you to fine-tune a model in a single line of code. For users who prefer to write their own training loop, you can also fine-tune a \u{1F917} Transformers model in native PyTorch."),Li.forEach(t),Dr=m(e),da=o(e,"P",{});var ef=n(da);Cl=p(ef,"At this point, you may need to restart your notebook or execute the following code to free some memory:"),ef.forEach(t),Fr=m(e),u(bt.$$.fragment,e),Sr=m(e),be=o(e,"P",{});var Po=n(be);Dl=p(Po,"Next, manually postprocess "),fs=o(Po,"CODE",{});var tf=n(fs);Fl=p(tf,"tokenized_dataset"),tf.forEach(t),Sl=p(Po," to prepare it for training."),Po.forEach(t),Nr=m(e),B=o(e,"OL",{});var xa=n(B);kt=o(xa,"LI",{});var qo=n(kt);jt=o(qo,"P",{});var xo=n(jt);Nl=p(xo,"Remove the "),hs=o(xo,"CODE",{});var af=n(hs);Il=p(af,"text"),af.forEach(t),Ll=p(xo," column because the model does not accept raw text as an input:"),xo.forEach(t),Ml=m(qo),u(Et.$$.fragment,qo),qo.forEach(t),Ol=m(xa),Tt=o(xa,"LI",{});var zo=n(Tt);S=o(zo,"P",{});var Ie=n(S);Bl=p(Ie,"Rename the "),ms=o(Ie,"CODE",{});var sf=n(ms);Hl=p(sf,"label"),sf.forEach(t),Yl=p(Ie," column to "),cs=o(Ie,"CODE",{});var rf=n(cs);Wl=p(rf,"labels"),rf.forEach(t),Rl=p(Ie," because the model expects the argument to be named "),us=o(Ie,"CODE",{});var of=n(us);Gl=p(of,"labels"),of.forEach(t),Kl=p(Ie,":"),Ie.forEach(t),Ul=m(zo),u(At.$$.fragment,zo),zo.forEach(t),Jl=m(xa),Pt=o(xa,"LI",{});var Co=n(Pt);ds=o(Co,"P",{});var nf=n(ds);Xl=p(nf,"Set the format of the dataset to return PyTorch tensors instead of lists:"),nf.forEach(t),Zl=m(Co),u(qt.$$.fragment,Co),Co.forEach(t),xa.forEach(t),Ir=m(e),ga=o(e,"P",{});var lf=n(ga);Ql=p(lf,"Then create a smaller subset of the dataset as previously shown to speed up the fine-tuning:"),lf.forEach(t),Lr=m(e),u(xt.$$.fragment,e),Mr=m(e),V=o(e,"H3",{class:!0});var Do=n(V);ke=o(Do,"A",{id:!0,class:!0,href:!0});var pf=n(ke);gs=o(pf,"SPAN",{});var ff=n(gs);u(zt.$$.fragment,ff),ff.forEach(t),pf.forEach(t),Vl=m(Do),_s=o(Do,"SPAN",{});var hf=n(_s);ei=p(hf,"DataLoader"),hf.forEach(t),Do.forEach(t),Or=m(e),je=o(e,"P",{});var Fo=n(je);ti=p(Fo,"Create a "),vs=o(Fo,"CODE",{});var mf=n(vs);ai=p(mf,"DataLoader"),mf.forEach(t),si=p(Fo," for your training and test datasets so you can iterate over batches of data:"),Fo.forEach(t),Br=m(e),u(Ct.$$.fragment,e),Hr=m(e),_a=o(e,"P",{});var cf=n(_a);ri=p(cf,"Load your model with the number of expected labels:"),cf.forEach(t),Yr=m(e),u(Dt.$$.fragment,e),Wr=m(e),ee=o(e,"H3",{class:!0});var So=n(ee);Ee=o(So,"A",{id:!0,class:!0,href:!0});var uf=n(Ee);$s=o(uf,"SPAN",{});var df=n($s);u(Ft.$$.fragment,df),df.forEach(t),uf.forEach(t),oi=m(So),ws=o(So,"SPAN",{});var gf=n(ws);ni=p(gf,"Optimizer and learning rate scheduler"),gf.forEach(t),So.forEach(t),Rr=m(e),Te=o(e,"P",{});var No=n(Te);li=p(No,"Create an optimizer and learning rate scheduler to fine-tune the model. Let\u2019s use the "),St=o(No,"A",{href:!0,rel:!0});var _f=n(St);ys=o(_f,"CODE",{});var vf=n(ys);ii=p(vf,"AdamW"),vf.forEach(t),_f.forEach(t),pi=p(No," optimizer from PyTorch:"),No.forEach(t),Gr=m(e),u(Nt.$$.fragment,e),Kr=m(e),Ae=o(e,"P",{});var Io=n(Ae);fi=p(Io,"Create the default learning rate scheduler from "),va=o(Io,"A",{href:!0});var $f=n(va);hi=p($f,"Trainer"),$f.forEach(t),mi=p(Io,":"),Io.forEach(t),Ur=m(e),u(It.$$.fragment,e),Jr=m(e),Pe=o(e,"P",{});var Lo=n(Pe);ci=p(Lo,"Lastly, specify "),bs=o(Lo,"CODE",{});var wf=n(bs);ui=p(wf,"device"),wf.forEach(t),di=p(Lo," to use a GPU if you have access to one. Otherwise, training on a CPU may take several hours instead of a couple of minutes."),Lo.forEach(t),Xr=m(e),u(Lt.$$.fragment,e),Zr=m(e),u(qe.$$.fragment,e),Qr=m(e),$a=o(e,"P",{});var yf=n($a);gi=p(yf,"Great, now you are ready to train! \u{1F973}"),yf.forEach(t),Vr=m(e),te=o(e,"H3",{class:!0});var Mo=n(te);xe=o(Mo,"A",{id:!0,class:!0,href:!0});var bf=n(xe);ks=o(bf,"SPAN",{});var kf=n(ks);u(Mt.$$.fragment,kf),kf.forEach(t),bf.forEach(t),_i=m(Mo),js=o(Mo,"SPAN",{});var jf=n(js);vi=p(jf,"Training loop"),jf.forEach(t),Mo.forEach(t),eo=m(e),ze=o(e,"P",{});var Oo=n(ze);$i=p(Oo,"To keep track of your training progress, use the "),Ot=o(Oo,"A",{href:!0,rel:!0});var Ef=n(Ot);wi=p(Ef,"tqdm"),Ef.forEach(t),yi=p(Oo," library to add a progress bar over the number of training steps:"),Oo.forEach(t),to=m(e),u(Bt.$$.fragment,e),ao=m(e),ae=o(e,"H3",{class:!0});var Bo=n(ae);Ce=o(Bo,"A",{id:!0,class:!0,href:!0});var Tf=n(Ce);Es=o(Tf,"SPAN",{});var Af=n(Es);u(Ht.$$.fragment,Af),Af.forEach(t),Tf.forEach(t),bi=m(Bo),Ts=o(Bo,"SPAN",{});var Pf=n(Ts);ki=p(Pf,"Metrics"),Pf.forEach(t),Bo.forEach(t),so=m(e),H=o(e,"P",{});var za=n(H);ji=p(za,"Just like how you need to add an evaluation function to "),wa=o(za,"A",{href:!0});var qf=n(wa);Ei=p(qf,"Trainer"),qf.forEach(t),Ti=p(za,", you need to do the same when you write your own training loop. But instead of calculating and reporting the metric at the end of each epoch, this time you will accumulate all the batches with "),Yt=o(za,"A",{href:!0,rel:!0});var xf=n(Yt);As=o(xf,"CODE",{});var zf=n(As);Ai=p(zf,"add_batch"),zf.forEach(t),xf.forEach(t),Pi=p(za," and calculate the metric at the very end."),za.forEach(t),ro=m(e),u(Wt.$$.fragment,e),oo=m(e),ya=o(e,"A",{id:!0}),n(ya).forEach(t),no=m(e),se=o(e,"H2",{class:!0});var Ho=n(se);De=o(Ho,"A",{id:!0,class:!0,href:!0});var Cf=n(De);Ps=o(Cf,"SPAN",{});var Df=n(Ps);u(Rt.$$.fragment,Df),Df.forEach(t),Cf.forEach(t),qi=m(Ho),qs=o(Ho,"SPAN",{});var Ff=n(qs);xi=p(Ff,"Additional resources"),Ff.forEach(t),Ho.forEach(t),lo=m(e),ba=o(e,"P",{});var Sf=n(ba);zi=p(Sf,"For more fine-tuning examples, refer to:"),Sf.forEach(t),io=m(e),Fe=o(e,"UL",{});var Yo=n(Fe);xs=o(Yo,"LI",{});var Nf=n(xs);ka=o(Nf,"P",{});var Mi=n(ka);Gt=o(Mi,"A",{href:!0,rel:!0});var If=n(Gt);Ci=p(If,"\u{1F917} Transformers Examples"),If.forEach(t),Di=p(Mi,` includes scripts to train common NLP tasks in PyTorch and TensorFlow.`),Mi.forEach(t),Nf.forEach(t),Fi=m(Yo),zs=o(Yo,"LI",{});var Lf=n(zs);ja=o(Lf,"P",{});var Oi=n(ja);Ea=o(Oi,"A",{href:!0});var Mf=n(Ea);Si=p(Mf,"\u{1F917} Transformers Notebooks"),Mf.forEach(t),Ni=p(Oi," contains various notebooks on how to fine-tune a model for specific tasks in PyTorch and TensorFlow."),Oi.forEach(t),Lf.forEach(t),Yo.forEach(t),this.h()},h(){f($,"name","hf:doc:metadata"),f($,"content",JSON.stringify(Jf)),f(b,"id","finetune-a-pretrained-model"),f(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(b,"href","#finetune-a-pretrained-model"),f(w,"class","relative group"),f(Jt,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(Xt,"id","data-processing"),f(re,"id","prepare-a-dataset"),f(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(re,"href","#prepare-a-dataset"),f(W,"class","relative group"),f(He,"href","https://huggingface.co/datasets/yelp_review_full"),f(He,"rel","nofollow"),f(We,"href","https://huggingface.co/docs/datasets/process.html#map"),f(We,"rel","nofollow"),f(Vt,"id","trainer"),f(le,"id","finetune-with-trainer"),f(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(le,"href","#finetune-with-trainer"),f(R,"class","relative group"),f(ta,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(aa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(Je,"href","https://huggingface.co/datasets/yelp_review_full#data-fields"),f(Je,"rel","nofollow"),f(fe,"id","training-hyperparameters"),f(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(fe,"href","#training-hyperparameters"),f(G,"class","relative group"),f(sa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),f(Qe,"href","https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments"),f(Qe,"rel","nofollow"),f(he,"id","metrics"),f(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(he,"href","#metrics"),f(K,"class","relative group"),f(oa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(na,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(tt,"href","https://huggingface.co/metrics/accuracy"),f(tt,"rel","nofollow"),f(at,"href","https://huggingface.co/docs/datasets/metrics.html"),f(at,"rel","nofollow"),f(ce,"id","trainer"),f(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ce,"href","#trainer"),f(U,"class","relative group"),f(la,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(ia,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),f(pa,"id","keras"),f(ge,"id","finetune-with-keras"),f(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ge,"href","#finetune-with-keras"),f(J,"class","relative group"),f(_e,"id","convert-dataset-to-tensorflow-format"),f(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(_e,"href","#convert-dataset-to-tensorflow-format"),f(X,"class","relative group"),f(ha,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DefaultDataCollator"),f(ct,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),f(ct,"rel","nofollow"),f($e,"id","compile-and-fit"),f($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f($e,"href","#compile-and-fit"),f(Z,"class","relative group"),f(_t,"href","https://keras.io/api/models/model_training_apis/"),f(_t,"rel","nofollow"),f(ca,"id","pytorch_native"),f(ye,"id","finetune-in-native-pytorch"),f(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ye,"href","#finetune-in-native-pytorch"),f(Q,"class","relative group"),f(ua,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(ke,"id","dataloader"),f(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ke,"href","#dataloader"),f(V,"class","relative group"),f(Ee,"id","optimizer-and-learning-rate-scheduler"),f(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ee,"href","#optimizer-and-learning-rate-scheduler"),f(ee,"class","relative group"),f(St,"href","https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html"),f(St,"rel","nofollow"),f(va,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(xe,"id","training-loop"),f(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(xe,"href","#training-loop"),f(te,"class","relative group"),f(Ot,"href","https://tqdm.github.io/"),f(Ot,"rel","nofollow"),f(Ce,"id","metrics"),f(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ce,"href","#metrics"),f(ae,"class","relative group"),f(wa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(Yt,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch"),f(Yt,"rel","nofollow"),f(ya,"id","additional-resources"),f(De,"id","additional-resources"),f(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(De,"href","#additional-resources"),f(se,"class","relative group"),f(Gt,"href","https://github.com/huggingface/transformers/tree/master/examples"),f(Gt,"rel","nofollow"),f(Ea,"href","notebooks")},m(e,s){a(document.head,$),l(e,k,s),l(e,w,s),a(w,b),a(b,T),d(j,T,null),a(w,F),a(w,P),a(P,E),l(e,A,s),d(z,e,s),l(e,Le,s),l(e,Ut,s),a(Ut,Ro),l(e,Fs,s),l(e,I,s),a(I,Me),a(Me,Go),a(Me,Jt),a(Jt,Ko),a(Me,Uo),a(I,Jo),a(I,Ca),a(Ca,Xo),a(I,Zo),a(I,Da),a(Da,Qo),l(e,Ss,s),l(e,Xt,s),l(e,Ns,s),l(e,W,s),a(W,re),a(re,Fa),d(Oe,Fa,null),a(W,Vo),a(W,Sa),a(Sa,en),l(e,Is,s),d(Be,e,s),l(e,Ls,s),l(e,Zt,s),a(Zt,tn),l(e,Ms,s),l(e,oe,s),a(oe,an),a(oe,He),a(He,sn),a(oe,rn),l(e,Os,s),d(Ye,e,s),l(e,Bs,s),l(e,ne,s),a(ne,on),a(ne,We),a(We,Na),a(Na,nn),a(ne,ln),l(e,Hs,s),d(Re,e,s),l(e,Ys,s),l(e,Qt,s),a(Qt,pn),l(e,Ws,s),d(Ge,e,s),l(e,Rs,s),l(e,Vt,s),l(e,Gs,s),l(e,R,s),a(R,le),a(le,Ia),d(Ke,Ia,null),a(R,fn),a(R,ea),a(ea,hn),a(ea,La),a(La,mn),l(e,Ks,s),d(Ue,e,s),l(e,Us,s),l(e,L,s),a(L,cn),a(L,ta),a(ta,un),a(L,dn),a(L,aa),a(aa,gn),a(L,_n),l(e,Js,s),l(e,ie,s),a(ie,vn),a(ie,Je),a(Je,$n),a(ie,wn),l(e,Xs,s),d(Xe,e,s),l(e,Zs,s),d(pe,e,s),l(e,Qs,s),l(e,G,s),a(G,fe),a(fe,Ma),d(Ze,Ma,null),a(G,yn),a(G,Oa),a(Oa,bn),l(e,Vs,s),l(e,M,s),a(M,kn),a(M,sa),a(sa,jn),a(M,En),a(M,Qe),a(Qe,Tn),a(M,An),l(e,er,s),l(e,ra,s),a(ra,Pn),l(e,tr,s),d(Ve,e,s),l(e,ar,s),l(e,K,s),a(K,he),a(he,Ba),d(et,Ba,null),a(K,qn),a(K,Ha),a(Ha,xn),l(e,sr,s),l(e,q,s),a(q,oa),a(oa,zn),a(q,Cn),a(q,na),a(na,Dn),a(q,Fn),a(q,tt),a(tt,Ya),a(Ya,Sn),a(q,Nn),a(q,Wa),a(Wa,In),a(q,Ln),a(q,at),a(at,Mn),a(q,On),l(e,rr,s),d(st,e,s),l(e,or,s),l(e,C,s),a(C,Bn),a(C,Ra),a(Ra,Hn),a(C,Yn),a(C,Ga),a(Ga,Wn),a(C,Rn),a(C,Ka),a(Ka,Gn),a(C,Kn),l(e,nr,s),d(rt,e,s),l(e,lr,s),l(e,me,s),a(me,Un),a(me,Ua),a(Ua,Jn),a(me,Xn),l(e,ir,s),d(ot,e,s),l(e,pr,s),l(e,U,s),a(U,ce),a(ce,Ja),d(nt,Ja,null),a(U,Zn),a(U,Xa),a(Xa,Qn),l(e,fr,s),l(e,ue,s),a(ue,Vn),a(ue,la),a(la,el),a(ue,tl),l(e,hr,s),d(lt,e,s),l(e,mr,s),l(e,de,s),a(de,al),a(de,ia),a(ia,sl),a(de,rl),l(e,cr,s),d(it,e,s),l(e,ur,s),l(e,pa,s),l(e,dr,s),l(e,J,s),a(J,ge),a(ge,Za),d(pt,Za,null),a(J,ol),a(J,Qa),a(Qa,nl),l(e,gr,s),d(ft,e,s),l(e,_r,s),l(e,fa,s),a(fa,ll),l(e,vr,s),l(e,X,s),a(X,_e),a(_e,Va),d(ht,Va,null),a(X,il),a(X,es),a(es,pl),l(e,$r,s),l(e,O,s),a(O,fl),a(O,ha),a(ha,hl),a(O,ml),a(O,ts),a(ts,cl),a(O,ul),l(e,wr,s),d(mt,e,s),l(e,yr,s),d(ve,e,s),l(e,br,s),l(e,D,s),a(D,dl),a(D,ct),a(ct,as),a(as,gl),a(D,_l),a(D,ss),a(ss,vl),a(D,$l),a(D,rs),a(rs,wl),a(D,yl),l(e,kr,s),d(ut,e,s),l(e,jr,s),l(e,Z,s),a(Z,$e),a($e,os),d(dt,os,null),a(Z,bl),a(Z,ns),a(ns,kl),l(e,Er,s),l(e,ma,s),a(ma,jl),l(e,Tr,s),d(gt,e,s),l(e,Ar,s),l(e,we,s),a(we,El),a(we,_t),a(_t,ls),a(ls,Tl),a(we,Al),l(e,Pr,s),d(vt,e,s),l(e,qr,s),l(e,ca,s),l(e,xr,s),l(e,Q,s),a(Q,ye),a(ye,is),d($t,is,null),a(Q,Pl),a(Q,ps),a(ps,ql),l(e,zr,s),d(wt,e,s),l(e,Cr,s),l(e,yt,s),a(yt,ua),a(ua,xl),a(yt,zl),l(e,Dr,s),l(e,da,s),a(da,Cl),l(e,Fr,s),d(bt,e,s),l(e,Sr,s),l(e,be,s),a(be,Dl),a(be,fs),a(fs,Fl),a(be,Sl),l(e,Nr,s),l(e,B,s),a(B,kt),a(kt,jt),a(jt,Nl),a(jt,hs),a(hs,Il),a(jt,Ll),a(kt,Ml),d(Et,kt,null),a(B,Ol),a(B,Tt),a(Tt,S),a(S,Bl),a(S,ms),a(ms,Hl),a(S,Yl),a(S,cs),a(cs,Wl),a(S,Rl),a(S,us),a(us,Gl),a(S,Kl),a(Tt,Ul),d(At,Tt,null),a(B,Jl),a(B,Pt),a(Pt,ds),a(ds,Xl),a(Pt,Zl),d(qt,Pt,null),l(e,Ir,s),l(e,ga,s),a(ga,Ql),l(e,Lr,s),d(xt,e,s),l(e,Mr,s),l(e,V,s),a(V,ke),a(ke,gs),d(zt,gs,null),a(V,Vl),a(V,_s),a(_s,ei),l(e,Or,s),l(e,je,s),a(je,ti),a(je,vs),a(vs,ai),a(je,si),l(e,Br,s),d(Ct,e,s),l(e,Hr,s),l(e,_a,s),a(_a,ri),l(e,Yr,s),d(Dt,e,s),l(e,Wr,s),l(e,ee,s),a(ee,Ee),a(Ee,$s),d(Ft,$s,null),a(ee,oi),a(ee,ws),a(ws,ni),l(e,Rr,s),l(e,Te,s),a(Te,li),a(Te,St),a(St,ys),a(ys,ii),a(Te,pi),l(e,Gr,s),d(Nt,e,s),l(e,Kr,s),l(e,Ae,s),a(Ae,fi),a(Ae,va),a(va,hi),a(Ae,mi),l(e,Ur,s),d(It,e,s),l(e,Jr,s),l(e,Pe,s),a(Pe,ci),a(Pe,bs),a(bs,ui),a(Pe,di),l(e,Xr,s),d(Lt,e,s),l(e,Zr,s),d(qe,e,s),l(e,Qr,s),l(e,$a,s),a($a,gi),l(e,Vr,s),l(e,te,s),a(te,xe),a(xe,ks),d(Mt,ks,null),a(te,_i),a(te,js),a(js,vi),l(e,eo,s),l(e,ze,s),a(ze,$i),a(ze,Ot),a(Ot,wi),a(ze,yi),l(e,to,s),d(Bt,e,s),l(e,ao,s),l(e,ae,s),a(ae,Ce),a(Ce,Es),d(Ht,Es,null),a(ae,bi),a(ae,Ts),a(Ts,ki),l(e,so,s),l(e,H,s),a(H,ji),a(H,wa),a(wa,Ei),a(H,Ti),a(H,Yt),a(Yt,As),a(As,Ai),a(H,Pi),l(e,ro,s),d(Wt,e,s),l(e,oo,s),l(e,ya,s),l(e,no,s),l(e,se,s),a(se,De),a(De,Ps),d(Rt,Ps,null),a(se,qi),a(se,qs),a(qs,xi),l(e,lo,s),l(e,ba,s),a(ba,zi),l(e,io,s),l(e,Fe,s),a(Fe,xs),a(xs,ka),a(ka,Gt),a(Gt,Ci),a(ka,Di),a(Fe,Fi),a(Fe,zs),a(zs,ja),a(ja,Ea),a(Ea,Si),a(ja,Ni),po=!0},p(e,[s]){const Kt={};s&2&&(Kt.$$scope={dirty:s,ctx:e}),pe.$set(Kt);const Cs={};s&2&&(Cs.$$scope={dirty:s,ctx:e}),ve.$set(Cs);const Ds={};s&2&&(Ds.$$scope={dirty:s,ctx:e}),qe.$set(Ds)},i(e){po||(g(j.$$.fragment,e),g(z.$$.fragment,e),g(Oe.$$.fragment,e),g(Be.$$.fragment,e),g(Ye.$$.fragment,e),g(Re.$$.fragment,e),g(Ge.$$.fragment,e),g(Ke.$$.fragment,e),g(Ue.$$.fragment,e),g(Xe.$$.fragment,e),g(pe.$$.fragment,e),g(Ze.$$.fragment,e),g(Ve.$$.fragment,e),g(et.$$.fragment,e),g(st.$$.fragment,e),g(rt.$$.fragment,e),g(ot.$$.fragment,e),g(nt.$$.fragment,e),g(lt.$$.fragment,e),g(it.$$.fragment,e),g(pt.$$.fragment,e),g(ft.$$.fragment,e),g(ht.$$.fragment,e),g(mt.$$.fragment,e),g(ve.$$.fragment,e),g(ut.$$.fragment,e),g(dt.$$.fragment,e),g(gt.$$.fragment,e),g(vt.$$.fragment,e),g($t.$$.fragment,e),g(wt.$$.fragment,e),g(bt.$$.fragment,e),g(Et.$$.fragment,e),g(At.$$.fragment,e),g(qt.$$.fragment,e),g(xt.$$.fragment,e),g(zt.$$.fragment,e),g(Ct.$$.fragment,e),g(Dt.$$.fragment,e),g(Ft.$$.fragment,e),g(Nt.$$.fragment,e),g(It.$$.fragment,e),g(Lt.$$.fragment,e),g(qe.$$.fragment,e),g(Mt.$$.fragment,e),g(Bt.$$.fragment,e),g(Ht.$$.fragment,e),g(Wt.$$.fragment,e),g(Rt.$$.fragment,e),po=!0)},o(e){_(j.$$.fragment,e),_(z.$$.fragment,e),_(Oe.$$.fragment,e),_(Be.$$.fragment,e),_(Ye.$$.fragment,e),_(Re.$$.fragment,e),_(Ge.$$.fragment,e),_(Ke.$$.fragment,e),_(Ue.$$.fragment,e),_(Xe.$$.fragment,e),_(pe.$$.fragment,e),_(Ze.$$.fragment,e),_(Ve.$$.fragment,e),_(et.$$.fragment,e),_(st.$$.fragment,e),_(rt.$$.fragment,e),_(ot.$$.fragment,e),_(nt.$$.fragment,e),_(lt.$$.fragment,e),_(it.$$.fragment,e),_(pt.$$.fragment,e),_(ft.$$.fragment,e),_(ht.$$.fragment,e),_(mt.$$.fragment,e),_(ve.$$.fragment,e),_(ut.$$.fragment,e),_(dt.$$.fragment,e),_(gt.$$.fragment,e),_(vt.$$.fragment,e),_($t.$$.fragment,e),_(wt.$$.fragment,e),_(bt.$$.fragment,e),_(Et.$$.fragment,e),_(At.$$.fragment,e),_(qt.$$.fragment,e),_(xt.$$.fragment,e),_(zt.$$.fragment,e),_(Ct.$$.fragment,e),_(Dt.$$.fragment,e),_(Ft.$$.fragment,e),_(Nt.$$.fragment,e),_(It.$$.fragment,e),_(Lt.$$.fragment,e),_(qe.$$.fragment,e),_(Mt.$$.fragment,e),_(Bt.$$.fragment,e),_(Ht.$$.fragment,e),_(Wt.$$.fragment,e),_(Rt.$$.fragment,e),po=!1},d(e){t($),e&&t(k),e&&t(w),v(j),e&&t(A),v(z,e),e&&t(Le),e&&t(Ut),e&&t(Fs),e&&t(I),e&&t(Ss),e&&t(Xt),e&&t(Ns),e&&t(W),v(Oe),e&&t(Is),v(Be,e),e&&t(Ls),e&&t(Zt),e&&t(Ms),e&&t(oe),e&&t(Os),v(Ye,e),e&&t(Bs),e&&t(ne),e&&t(Hs),v(Re,e),e&&t(Ys),e&&t(Qt),e&&t(Ws),v(Ge,e),e&&t(Rs),e&&t(Vt),e&&t(Gs),e&&t(R),v(Ke),e&&t(Ks),v(Ue,e),e&&t(Us),e&&t(L),e&&t(Js),e&&t(ie),e&&t(Xs),v(Xe,e),e&&t(Zs),v(pe,e),e&&t(Qs),e&&t(G),v(Ze),e&&t(Vs),e&&t(M),e&&t(er),e&&t(ra),e&&t(tr),v(Ve,e),e&&t(ar),e&&t(K),v(et),e&&t(sr),e&&t(q),e&&t(rr),v(st,e),e&&t(or),e&&t(C),e&&t(nr),v(rt,e),e&&t(lr),e&&t(me),e&&t(ir),v(ot,e),e&&t(pr),e&&t(U),v(nt),e&&t(fr),e&&t(ue),e&&t(hr),v(lt,e),e&&t(mr),e&&t(de),e&&t(cr),v(it,e),e&&t(ur),e&&t(pa),e&&t(dr),e&&t(J),v(pt),e&&t(gr),v(ft,e),e&&t(_r),e&&t(fa),e&&t(vr),e&&t(X),v(ht),e&&t($r),e&&t(O),e&&t(wr),v(mt,e),e&&t(yr),v(ve,e),e&&t(br),e&&t(D),e&&t(kr),v(ut,e),e&&t(jr),e&&t(Z),v(dt),e&&t(Er),e&&t(ma),e&&t(Tr),v(gt,e),e&&t(Ar),e&&t(we),e&&t(Pr),v(vt,e),e&&t(qr),e&&t(ca),e&&t(xr),e&&t(Q),v($t),e&&t(zr),v(wt,e),e&&t(Cr),e&&t(yt),e&&t(Dr),e&&t(da),e&&t(Fr),v(bt,e),e&&t(Sr),e&&t(be),e&&t(Nr),e&&t(B),v(Et),v(At),v(qt),e&&t(Ir),e&&t(ga),e&&t(Lr),v(xt,e),e&&t(Mr),e&&t(V),v(zt),e&&t(Or),e&&t(je),e&&t(Br),v(Ct,e),e&&t(Hr),e&&t(_a),e&&t(Yr),v(Dt,e),e&&t(Wr),e&&t(ee),v(Ft),e&&t(Rr),e&&t(Te),e&&t(Gr),v(Nt,e),e&&t(Kr),e&&t(Ae),e&&t(Ur),v(It,e),e&&t(Jr),e&&t(Pe),e&&t(Xr),v(Lt,e),e&&t(Zr),v(qe,e),e&&t(Qr),e&&t($a),e&&t(Vr),e&&t(te),v(Mt),e&&t(eo),e&&t(ze),e&&t(to),v(Bt,e),e&&t(ao),e&&t(ae),v(Ht),e&&t(so),e&&t(H),e&&t(ro),v(Wt,e),e&&t(oo),e&&t(ya),e&&t(no),e&&t(se),v(Rt),e&&t(lo),e&&t(ba),e&&t(io),e&&t(Fe)}}}const Jf={local:"finetune-a-pretrained-model",sections:[{local:"prepare-a-dataset",title:"Prepare a dataset"},{local:"finetune-with-trainer",sections:[{local:"training-hyperparameters",title:"Training hyperparameters"},{local:"metrics",title:"Metrics"},{local:"trainer",title:"Trainer"}],title:"Fine-tune with `Trainer`"},{local:"finetune-with-keras",sections:[{local:"convert-dataset-to-tensorflow-format",title:"Convert dataset to TensorFlow format"},{local:"compile-and-fit",title:"Compile and fit"}],title:"Fine-tune with Keras"},{local:"finetune-in-native-pytorch",sections:[{local:"dataloader",title:"DataLoader"},{local:"optimizer-and-learning-rate-scheduler",title:"Optimizer and learning rate scheduler"},{local:"training-loop",title:"Training loop"},{local:"metrics",title:"Metrics"}],title:"Fine-tune in native PyTorch"},{local:"additional-resources",title:"Additional resources"}],title:"Fine-tune a pretrained model"};function Xf(Y,$,k){let{fw:w}=$;return Y.$$set=b=>{"fw"in b&&k(0,w=b.fw)},[w]}class rh extends Of{constructor($){super();Bf(this,$,Xf,Uf,Hf,{fw:0})}}export{rh as default,Jf as metadata};
251
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/debugging.mdx-9163dbf4.js
import{S as Vr,i as Xr,s as Zr,e as r,k as u,w as m,t,M as eo,c as o,d as s,m as c,a as i,x as b,h as n,b as y,F as a,g as p,y as w,q as j,o as _,B as v}from"../chunks/vendor-4833417e.js";import{T as Al}from"../chunks/Tip-fffd6df1.js";import{I as it}from"../chunks/IconCopyLink-4b81c553.js";import{C as P}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function so(I){let h,D;return{c(){h=r("p"),D=t("This feature is currently available for PyTorch-only.")},l(d){h=o(d,"P",{});var f=i(h);D=n(f,"This feature is currently available for PyTorch-only."),f.forEach(s)},m(d,f){p(d,h,f),a(h,D)},d(d){d&&s(h)}}}function ao(I){let h,D,d,f,C;return{c(){h=r("p"),D=t("For multi-GPU training it requires DDP ("),d=r("code"),f=t("torch.distributed.launch"),C=t(").")},l($){h=o($,"P",{});var E=i(h);D=n(E,"For multi-GPU training it requires DDP ("),d=o(E,"CODE",{});var F=i(d);f=n(F,"torch.distributed.launch"),F.forEach(s),C=n(E,")."),E.forEach(s)},m($,E){p($,h,E),a(h,D),a(h,d),a(d,f),a(h,C)},d($){$&&s(h)}}}function to(I){let h,D,d,f,C;return{c(){h=r("p"),D=t("This feature can be used with any "),d=r("code"),f=t("nn.Module"),C=t("-based model.")},l($){h=o($,"P",{});var E=i(h);D=n(E,"This feature can be used with any "),d=o(E,"CODE",{});var F=i(d);f=n(F,"nn.Module"),F.forEach(s),C=n(E,"-based model."),E.forEach(s)},m($,E){p($,h,E),a(h,D),a(h,d),a(d,f),a(h,C)},d($){$&&s(h)}}}function no(I){let h,D,d,f,C,$,E,F,ut,zs,M,B,Xe,ce,ct,Ze,ht,Ws,K,dt,es,ft,mt,Js,he,Qs,xe,bt,Vs,de,Xs,Te,wt,Zs,Le,jt,ea,Ae,_t,sa,z,vt,ss,yt,gt,aa,fe,ta,Ne,$t,na,q,W,as,me,Dt,ts,Et,la,J,ra,Q,oa,V,pa,T,kt,ns,Ot,Pt,ls,Ct,xt,rs,Tt,Lt,ia,X,At,Ge,Nt,Gt,ua,be,ca,S,Ut,os,Ft,St,Ue,Rt,It,ha,Fe,Mt,da,we,fa,x,Se,qt,Ht,ps,Yt,Bt,is,Kt,zt,us,Wt,Jt,ma,je,ba,Re,Qt,wa,g,Vt,cs,Xt,Zt,hs,en,sn,ds,an,tn,fs,nn,ln,ms,rn,on,bs,pn,un,ws,cn,hn,ja,Z,dn,js,fn,mn,_a,Ie,bn,va,_e,ya,L,wn,_s,jn,_n,vs,vn,yn,ys,gn,$n,ga,Me,Dn,$a,ve,Da,k,En,gs,kn,On,$s,Pn,Cn,Ds,xn,Tn,Es,Ln,An,ks,Nn,Gn,Ea,A,Un,Os,Fn,Sn,Ps,Rn,In,Cs,Mn,qn,ka,qe,Hn,Oa,ee,Yn,xs,Bn,Kn,Pa,ye,Ca,se,zn,Ts,Wn,Jn,xa,ae,Qn,Ls,Vn,Xn,Ta,N,Zn,As,el,sl,Ns,al,tl,Gs,nl,ll,La,ge,Aa,R,rl,Us,ol,pl,Fs,il,ul,Na,$e,Ga,G,cl,Ss,hl,dl,Rs,fl,ml,Is,bl,wl,Ua,te,jl,Ms,_l,vl,Fa,He,yl,Sa,De,Ra,H,ne,qs,Ee,gl,Hs,$l,Ia,Ye,Dl,Ma,le,El,Ys,kl,Ol,qa,ke,Ha,Be,Pl,Ya,Ke,Cl,Ba,ze,xl,Ka,Oe,za,We,Tl,Wa,Je,Ll,Ja,Pe,Qa;return $=new it({}),ce=new it({}),he=new P({props:{code:"wget https://raw.githubusercontent.com/huggingface/transformers/master/scripts/distributed/torch-distributed-gpu-test.py",highlighted:"wget https://raw.githubusercontent.com/huggingface/transformers/master/scripts/distributed/torch-distributed-gpu-test.py"}}),de=new P({props:{code:"python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py",highlighted:"python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py"}}),fe=new P({props:{code:"NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py",highlighted:"NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py"}}),me=new it({}),J=new Al({props:{$$slots:{default:[so]},$$scope:{ctx:I}}}),Q=new Al({props:{$$slots:{default:[ao]},$$scope:{ctx:I}}}),V=new Al({props:{$$slots:{default:[to]},$$scope:{ctx:I}}}),be=new P({props:{code:"--debug underflow_overflow",highlighted:"--debug underflow_overflow"}}),we=new P({props:{code:`from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model)`,highlighted:`<span class="hljs-keyword">from</span> .debug_utils <span class="hljs-keyword">import</span> DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model)`}}),je=new P({props:{code:`Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata encoder.block.1.layer.1.DenseReluDense.dropout Dropout 0.00e+00 2.57e+02 input[0] 0.00e+00 2.85e+02 output [...] encoder.block.2.layer.0 T5LayerSelfAttention 6.78e-04 3.15e+03 input[0] 2.65e-04 3.42e+03 output[0] None output[1] 2.25e-01 1.00e+04 output[2] encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.dropout Dropout 0.00e+00 8.76e+03 input[0] 0.00e+00 9.74e+03 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output`,highlighted:`<span class="hljs-attribute">Detected</span> inf/nan during batch_number=<span class="hljs-number">0</span> <span class="hljs-attribute">Last</span> <span class="hljs-number">21</span> forward frames: <span class="hljs-attribute">abs</span> min abs max metadata <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">1</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.dropout Dropout <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">2</span>.<span class="hljs-number">57</span>e+<span class="hljs-number">02</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">2</span>.<span class="hljs-number">85</span>e+<span class="hljs-number">02</span> output<span class="hljs-meta"> [...]</span> <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">0</span> T5LayerSelfAttention <span class="hljs-attribute">6</span>.<span class="hljs-number">78</span>e-<span class="hljs-number">04</span> <span class="hljs-number">3</span>.<span class="hljs-number">15</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">65</span>e-<span class="hljs-number">04</span> <span class="hljs-number">3</span>.<span class="hljs-number">42</span>e+<span class="hljs-number">03</span> output[<span class="hljs-number">0</span>] <span class="hljs-attribute">None</span> output[<span class="hljs-number">1</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">25</span>e-<span class="hljs-number">01</span> <span class="hljs-number">1</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">04</span> output[<span class="hljs-number">2</span>] <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.layer_norm T5LayerNorm <span class="hljs-attribute">8</span>.<span class="hljs-number">69</span>e-<span class="hljs-number">02</span> <span class="hljs-number">4</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">01</span> weight <span class="hljs-attribute">2</span>.<span class="hljs-number">65</span>e-<span class="hljs-number">04</span> <span class="hljs-number">3</span>.<span class="hljs-number">42</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_0 Linear <span class="hljs-attribute">2</span>.<span class="hljs-number">17</span>e-<span class="hljs-number">07</span> <span class="hljs-number">4</span>.<span class="hljs-number">50</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">68</span>e-<span class="hljs-number">06</span> <span class="hljs-number">3</span>.<span class="hljs-number">70</span>e+<span class="hljs-number">01</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_1 Linear <span class="hljs-attribute">8</span>.<span class="hljs-number">08</span>e-<span class="hljs-number">07</span> <span class="hljs-number">2</span>.<span class="hljs-number">66</span>e+<span class="hljs-number">01</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">27</span>e-<span class="hljs-number">04</span> <span class="hljs-number">2</span>.<span class="hljs-number">37</span>e+<span class="hljs-number">02</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.dropout Dropout <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">8</span>.<span class="hljs-number">76</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">9</span>.<span class="hljs-number">74</span>e+<span class="hljs-number">03</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wo Linear <span class="hljs-attribute">1</span>.<span class="hljs-number">01</span>e-<span class="hljs-number">06</span> <span class="hljs-number">6</span>.<span class="hljs-number">44</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">9</span>.<span class="hljs-number">74</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense T5DenseGatedGeluDense <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.dropout Dropout <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> inf output`}}),_e=new P({props:{code:` encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output`,highlighted:` <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.layer_norm T5LayerNorm <span class="hljs-attribute">8</span>.<span class="hljs-number">69</span>e-<span class="hljs-number">02</span> <span class="hljs-number">4</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">01</span> weight <span class="hljs-attribute">2</span>.<span class="hljs-number">65</span>e-<span class="hljs-number">04</span> <span class="hljs-number">3</span>.<span class="hljs-number">42</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> output`}}),ve=new P({props:{code:`Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output`,highlighted:`<span class="hljs-attribute">Detected</span> inf/nan during batch_number=<span class="hljs-number">0</span> <span class="hljs-attribute">Last</span> <span class="hljs-number">21</span> forward frames: <span class="hljs-attribute">abs</span> min abs max metadata<span class="hljs-meta"> [...]</span> <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_0 Linear <span class="hljs-attribute">2</span>.<span class="hljs-number">17</span>e-<span class="hljs-number">07</span> <span class="hljs-number">4</span>.<span class="hljs-number">50</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">68</span>e-<span class="hljs-number">06</span> <span class="hljs-number">3</span>.<span class="hljs-number">70</span>e+<span class="hljs-number">01</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_1 Linear <span class="hljs-attribute">8</span>.<span class="hljs-number">08</span>e-<span class="hljs-number">07</span> <span class="hljs-number">2</span>.<span class="hljs-number">66</span>e+<span class="hljs-number">01</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">27</span>e-<span class="hljs-number">04</span> <span class="hljs-number">2</span>.<span class="hljs-number">37</span>e+<span class="hljs-number">02</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wo Linear <span class="hljs-attribute">1</span>.<span class="hljs-number">01</span>e-<span class="hljs-number">06</span> <span class="hljs-number">6</span>.<span class="hljs-number">44</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">9</span>.<span class="hljs-number">74</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense T5DenseGatedGeluDense <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.dropout Dropout <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> inf output`}}),ye=new P({props:{code:`class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">T5DenseGatedGeluDense</span>(nn.Module): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__init__</span>(<span class="hljs-params">self, config</span>): <span class="hljs-built_in">super</span>().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=<span class="hljs-literal">False</span>) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=<span class="hljs-literal">False</span>) self.wo = nn.Linear(config.d_ff, config.d_model, bias=<span class="hljs-literal">False</span>) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN[<span class="hljs-string">&quot;gelu_new&quot;</span>] <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, hidden_states</span>): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) <span class="hljs-keyword">return</span> hidden_states`}}),ge=new P({props:{code:`def _forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states import torch def forward(self, hidden_states): if torch.is_autocast_enabled(): with torch.cuda.amp.autocast(enabled=False): return self._forward(hidden_states) else: return self._forward(hidden_states)`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">_forward</span>(<span class="hljs-params">self, hidden_states</span>): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) <span class="hljs-keyword">return</span> hidden_states <span class="hljs-keyword">import</span> torch <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, hidden_states</span>): <span class="hljs-keyword">if</span> torch.is_autocast_enabled(): <span class="hljs-keyword">with</span> torch.cuda.amp.autocast(enabled=<span class="hljs-literal">False</span>): <span class="hljs-keyword">return</span> self._forward(hidden_states) <span class="hljs-keyword">else</span>: <span class="hljs-keyword">return</span> self._forward(hidden_states)`}}),$e=new P({props:{code:`from debug_utils import detect_overflow class T5LayerFF(nn.Module): [...] def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, "after layer_norm") forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, "after DenseReluDense") return hidden_states + self.dropout(forwarded_states)`,highlighted:`<span class="hljs-keyword">from</span> debug_utils <span class="hljs-keyword">import</span> detect_overflow <span class="hljs-keyword">class</span> <span class="hljs-title class_">T5LayerFF</span>(nn.Module): [...] <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, hidden_states</span>): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, <span class="hljs-string">&quot;after layer_norm&quot;</span>) forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, <span class="hljs-string">&quot;after DenseReluDense&quot;</span>) <span class="hljs-keyword">return</span> hidden_states + self.dropout(forwarded_states)`}}),De=new P({props:{code:`from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)`,highlighted:`<span class="hljs-keyword">from</span> .debug_utils <span class="hljs-keyword">import</span> DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=<span class="hljs-number">100</span>)`}}),Ee=new it({}),ke=new P({props:{code:"debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])",highlighted:'debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])'}}),Oe=new P({props:{code:` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.47e+04 input[0] 5.36e-05 7.92e+02 output [...] decoder.dropout Dropout 1.60e-07 2.27e+01 input[0] 0.00e+00 2.52e+01 output decoder T5Stack not a tensor output lm_head Linear 1.01e-06 7.92e+02 weight 0.00e+00 1.11e+00 input[0] 6.06e-02 8.39e+01 output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.78e+04 input[0] 5.36e-05 7.92e+02 output [...]`,highlighted:` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e<span class="hljs-string">-06</span> 7.92e<span class="hljs-string">+02</span> weight 0.00e<span class="hljs-string">+00</span> 2.47e<span class="hljs-string">+04</span> input[0] 5.36e<span class="hljs-string">-05</span> 7.92e<span class="hljs-string">+02</span> output [...] decoder.dropout Dropout 1.60e<span class="hljs-string">-07</span> 2.27e<span class="hljs-string">+01</span> input[0] 0.00e<span class="hljs-string">+00</span> 2.52e<span class="hljs-string">+01</span> output decoder T5Stack not a tensor output lm_head Linear 1.01e<span class="hljs-string">-06</span> 7.92e<span class="hljs-string">+02</span> weight 0.00e<span class="hljs-string">+00</span> 1.11e<span class="hljs-string">+00</span> input[0] 6.06e<span class="hljs-string">-02</span> 8.39e<span class="hljs-string">+01</span> output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e<span class="hljs-string">-06</span> 7.92e<span class="hljs-string">+02</span> weight 0.00e<span class="hljs-string">+00</span> 2.78e<span class="hljs-string">+04</span> input[0] 5.36e<span class="hljs-string">-05</span> 7.92e<span class="hljs-string">+02</span> output [...]`}}),Pe=new P({props:{code:"debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)",highlighted:'debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>], abort_after_batch_num=<span class="hljs-number">3</span>)'}}),{c(){h=r("meta"),D=u(),d=r("h1"),f=r("a"),C=r("span"),m($.$$.fragment),E=u(),F=r("span"),ut=t("Debugging"),zs=u(),M=r("h2"),B=r("a"),Xe=r("span"),m(ce.$$.fragment),ct=u(),Ze=r("span"),ht=t("Multi-GPU Network Issues Debug"),Ws=u(),K=r("p"),dt=t("When training or inferencing with "),es=r("code"),ft=t("DistributedDataParallel"),mt=t(" and multiple GPU, if you run into issue of inter-communication between processes and/or nodes, you can use the following script to diagnose network issues."),Js=u(),m(he.$$.fragment),Qs=u(),xe=r("p"),bt=t("For example to test how 2 GPUs interact do:"),Vs=u(),m(de.$$.fragment),Xs=u(),Te=r("p"),wt=t("If both processes can talk to each and allocate GPU memory each will print an OK status."),Zs=u(),Le=r("p"),jt=t("For more GPUs or nodes adjust the arguments in the script."),ea=u(),Ae=r("p"),_t=t("You will find a lot more details inside the diagnostics script and even a recipe to how you could run it in a SLURM environment."),sa=u(),z=r("p"),vt=t("An additional level of debug is to add "),ss=r("code"),yt=t("NCCL_DEBUG=INFO"),gt=t(" environment variable as follows:"),aa=u(),m(fe.$$.fragment),ta=u(),Ne=r("p"),$t=t("This will dump a lot of NCCL-related debug information, which you can then search online if you find that some problems are reported. Or if you\u2019re not sure how to interpret the output you can share the log file in an Issue."),na=u(),q=r("h2"),W=r("a"),as=r("span"),m(me.$$.fragment),Dt=u(),ts=r("span"),Et=t("Underflow and Overflow Detection"),la=u(),m(J.$$.fragment),ra=u(),m(Q.$$.fragment),oa=u(),m(V.$$.fragment),pa=u(),T=r("p"),kt=t("If you start getting "),ns=r("code"),Ot=t("loss=NaN"),Pt=t(" or the model inhibits some other abnormal behavior due to "),ls=r("code"),Ct=t("inf"),xt=t(" or "),rs=r("code"),Tt=t("nan"),Lt=t(` in activations or weights one needs to discover where the first underflow or overflow happens and what led to it. Luckily you can accomplish that easily by activating a special module that will do the detection automatically.`),ia=u(),X=r("p"),At=t("If you\u2019re using "),Ge=r("a"),Nt=t("Trainer"),Gt=t(", you just need to add:"),ua=u(),m(be.$$.fragment),ca=u(),S=r("p"),Ut=t("to the normal command line arguments, or pass "),os=r("code"),Ft=t('debug="underflow_overflow"'),St=t(` when creating the `),Ue=r("a"),Rt=t("TrainingArguments"),It=t(" object."),ha=u(),Fe=r("p"),Mt=t("If you\u2019re using your own training loop or another Trainer you can accomplish the same with:"),da=u(),m(we.$$.fragment),fa=u(),x=r("p"),Se=r("a"),qt=t("DebugUnderflowOverflow"),Ht=t(` inserts hooks into the model that immediately after each forward call will test input and output variables and also the corresponding module\u2019s weights. As soon as `),ps=r("code"),Yt=t("inf"),Bt=t(` or `),is=r("code"),Kt=t("nan"),zt=t(` is detected in at least one element of the activations or weights, the program will assert and print a report like this (this was caught with `),us=r("code"),Wt=t("google/mt5-small"),Jt=t(" under fp16 mixed precision):"),ma=u(),m(je.$$.fragment),ba=u(),Re=r("p"),Qt=t("The example output has been trimmed in the middle for brevity."),wa=u(),g=r("p"),Vt=t(`The second column shows the value of the absolute largest element, so if you have a closer look at the last few frames, the inputs and outputs were in the range of `),cs=r("code"),Xt=t("1e4"),Zt=t(`. So when this training was done under fp16 mixed precision the very last step overflowed (since under `),hs=r("code"),en=t("fp16"),sn=t(" the largest number before "),ds=r("code"),an=t("inf"),tn=t(" is "),fs=r("code"),nn=t("64e3"),ln=t(`). To avoid overflows under `),ms=r("code"),rn=t("fp16"),on=t(" the activations must remain way below "),bs=r("code"),pn=t("1e4"),un=t(", because "),ws=r("code"),cn=t("1e4 * 1e4 = 1e8"),hn=t(` so any matrix multiplication with large activations is going to lead to a numerical overflow condition.`),ja=u(),Z=r("p"),dn=t("At the very start of the trace you can discover at which batch number the problem occurred (here "),js=r("code"),fn=t("Detected inf/nan during batch_number=0"),mn=t(" means the problem occurred on the first batch)."),_a=u(),Ie=r("p"),bn=t(`Each reported frame starts by declaring the fully qualified entry for the corresponding module this frame is reporting for. If we look just at this frame:`),va=u(),m(_e.$$.fragment),ya=u(),L=r("p"),wn=t("Here, "),_s=r("code"),jn=t("encoder.block.2.layer.1.layer_norm"),_n=t(` indicates that it was a layer norm for the first layer, of the second block of the encoder. And the specific calls of the `),vs=r("code"),vn=t("forward"),yn=t(" is "),ys=r("code"),gn=t("T5LayerNorm"),$n=t("."),ga=u(),Me=r("p"),Dn=t("Let\u2019s look at the last few frames of that report:"),$a=u(),m(ve.$$.fragment),Da=u(),k=r("p"),En=t("The last frame reports for "),gs=r("code"),kn=t("Dropout.forward"),On=t(` function with the first entry for the only input and the second for the only output. You can see that it was called from an attribute `),$s=r("code"),Pn=t("dropout"),Cn=t(" inside "),Ds=r("code"),xn=t("DenseReluDense"),Tn=t(` class. We can see that it happened during the first layer, of the 2nd block, during the very first batch. Finally, the absolute largest input elements was `),Es=r("code"),Ln=t("6.27e+04"),An=t(" and same for the output was "),ks=r("code"),Nn=t("inf"),Gn=t("."),Ea=u(),A=r("p"),Un=t("You can see here, that "),Os=r("code"),Fn=t("T5DenseGatedGeluDense.forward"),Sn=t(` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16\u2019s top limit of 64K. In the next frame we have `),Ps=r("code"),Rn=t("Dropout"),In=t(` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overflow (`),Cs=r("code"),Mn=t("inf"),qn=t(")."),ka=u(),qe=r("p"),Hn=t(`As you can see it\u2019s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.`),Oa=u(),ee=r("p"),Yn=t("Let\u2019s match the report to the code from "),xs=r("code"),Bn=t("models/t5/modeling_t5.py"),Kn=t(":"),Pa=u(),m(ye.$$.fragment),Ca=u(),se=r("p"),zn=t("Now it\u2019s easy to see the "),Ts=r("code"),Wn=t("dropout"),Jn=t(" call, and all the previous calls as well."),xa=u(),ae=r("p"),Qn=t("Since the detection is happening in a forward hook, these reports are printed immediately after each "),Ls=r("code"),Vn=t("forward"),Xn=t(` returns.`),Ta=u(),N=r("p"),Zn=t(`Going back to the full report, to act on it and to fix the problem, we need to go a few frames up where the numbers started to go up and most likely switch to the `),As=r("code"),el=t("fp32"),sl=t(` mode here, so that the numbers don\u2019t overflow when multiplied or summed up. Of course, there might be other solutions. For example, we could turn off `),Ns=r("code"),al=t("amp"),tl=t(` temporarily if it\u2019s enabled, after moving the original `),Gs=r("code"),nl=t("forward"),ll=t(" into a helper wrapper, like so:"),La=u(),m(ge.$$.fragment),Aa=u(),R=r("p"),rl=t(`Since the automatic detector only reports on inputs and outputs of full frames, once you know where to look, you may want to analyse the intermediary stages of any specific `),Us=r("code"),ol=t("forward"),pl=t(` function as well. In such a case you can use the `),Fs=r("code"),il=t("detect_overflow"),ul=t(" helper function to inject the detector where you want it, for example:"),Na=u(),m($e.$$.fragment),Ga=u(),G=r("p"),cl=t("You can see that we added 2 of these and now we track if "),Ss=r("code"),hl=t("inf"),dl=t(" or "),Rs=r("code"),fl=t("nan"),ml=t(" for "),Is=r("code"),bl=t("forwarded_states"),wl=t(` was detected somewhere in between.`),Ua=u(),te=r("p"),jl=t("Actually, the detector already reports these because each of the calls in the example above is a "),Ms=r("code"),_l=t("nn.Module"),vl=t(`, but let\u2019s say if you had some local direct calculations this is how you\u2019d do that.`),Fa=u(),He=r("p"),yl=t(`Additionally, if you\u2019re instantiating the debugger in your own code, you can adjust the number of frames printed from its default, e.g.:`),Sa=u(),m(De.$$.fragment),Ra=u(),H=r("h3"),ne=r("a"),qs=r("span"),m(Ee.$$.fragment),gl=u(),Hs=r("span"),$l=t("Specific batch absolute mix and max value tracing"),Ia=u(),Ye=r("p"),Dl=t("The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off."),Ma=u(),le=r("p"),El=t("Let\u2019s say you want to watch the absolute min and max values for all the ingredients of each "),Ys=r("code"),kl=t("forward"),Ol=t(` call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as:`),qa=u(),m(ke.$$.fragment),Ha=u(),Be=r("p"),Pl=t("And now full batches 1 and 3 will be traced using the same format as the underflow/overflow detector does."),Ya=u(),Ke=r("p"),Cl=t("Batches are 0-indexed."),Ba=u(),ze=r("p"),xl=t(`This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area. Here is a sample truncated output for such configuration:`),Ka=u(),m(Oe.$$.fragment),za=u(),We=r("p"),Tl=t(`Here you will get a huge number of frames dumped - as many as there were forward calls in your model, so it may or may not what you want, but sometimes it can be easier to use for debugging purposes than a normal debugger. For example, if a problem starts happening at batch number 150. So you can dump traces for batches 149 and 150 and compare where numbers started to diverge.`),Wa=u(),Je=r("p"),Ll=t("You can also specify the batch number after which to stop the training, with:"),Ja=u(),m(Pe.$$.fragment),this.h()},l(e){const l=eo('[data-svelte="svelte-1phssyn"]',document.head);h=o(l,"META",{name:!0,content:!0}),l.forEach(s),D=c(e),d=o(e,"H1",{class:!0});var Ce=i(d);f=o(Ce,"A",{id:!0,class:!0,href:!0});var Bs=i(f);C=o(Bs,"SPAN",{});var Ks=i(C);b($.$$.fragment,Ks),Ks.forEach(s),Bs.forEach(s),E=c(Ce),F=o(Ce,"SPAN",{});var Nl=i(F);ut=n(Nl,"Debugging"),Nl.forEach(s),Ce.forEach(s),zs=c(e),M=o(e,"H2",{class:!0});var Va=i(M);B=o(Va,"A",{id:!0,class:!0,href:!0});var Gl=i(B);Xe=o(Gl,"SPAN",{});var Ul=i(Xe);b(ce.$$.fragment,Ul),Ul.forEach(s),Gl.forEach(s),ct=c(Va),Ze=o(Va,"SPAN",{});var Fl=i(Ze);ht=n(Fl,"Multi-GPU Network Issues Debug"),Fl.forEach(s),Va.forEach(s),Ws=c(e),K=o(e,"P",{});var Xa=i(K);dt=n(Xa,"When training or inferencing with "),es=o(Xa,"CODE",{});var Sl=i(es);ft=n(Sl,"DistributedDataParallel"),Sl.forEach(s),mt=n(Xa," and multiple GPU, if you run into issue of inter-communication between processes and/or nodes, you can use the following script to diagnose network issues."),Xa.forEach(s),Js=c(e),b(he.$$.fragment,e),Qs=c(e),xe=o(e,"P",{});var Rl=i(xe);bt=n(Rl,"For example to test how 2 GPUs interact do:"),Rl.forEach(s),Vs=c(e),b(de.$$.fragment,e),Xs=c(e),Te=o(e,"P",{});var Il=i(Te);wt=n(Il,"If both processes can talk to each and allocate GPU memory each will print an OK status."),Il.forEach(s),Zs=c(e),Le=o(e,"P",{});var Ml=i(Le);jt=n(Ml,"For more GPUs or nodes adjust the arguments in the script."),Ml.forEach(s),ea=c(e),Ae=o(e,"P",{});var ql=i(Ae);_t=n(ql,"You will find a lot more details inside the diagnostics script and even a recipe to how you could run it in a SLURM environment."),ql.forEach(s),sa=c(e),z=o(e,"P",{});var Za=i(z);vt=n(Za,"An additional level of debug is to add "),ss=o(Za,"CODE",{});var Hl=i(ss);yt=n(Hl,"NCCL_DEBUG=INFO"),Hl.forEach(s),gt=n(Za," environment variable as follows:"),Za.forEach(s),aa=c(e),b(fe.$$.fragment,e),ta=c(e),Ne=o(e,"P",{});var Yl=i(Ne);$t=n(Yl,"This will dump a lot of NCCL-related debug information, which you can then search online if you find that some problems are reported. Or if you\u2019re not sure how to interpret the output you can share the log file in an Issue."),Yl.forEach(s),na=c(e),q=o(e,"H2",{class:!0});var et=i(q);W=o(et,"A",{id:!0,class:!0,href:!0});var Bl=i(W);as=o(Bl,"SPAN",{});var Kl=i(as);b(me.$$.fragment,Kl),Kl.forEach(s),Bl.forEach(s),Dt=c(et),ts=o(et,"SPAN",{});var zl=i(ts);Et=n(zl,"Underflow and Overflow Detection"),zl.forEach(s),et.forEach(s),la=c(e),b(J.$$.fragment,e),ra=c(e),b(Q.$$.fragment,e),oa=c(e),b(V.$$.fragment,e),pa=c(e),T=o(e,"P",{});var re=i(T);kt=n(re,"If you start getting "),ns=o(re,"CODE",{});var Wl=i(ns);Ot=n(Wl,"loss=NaN"),Wl.forEach(s),Pt=n(re," or the model inhibits some other abnormal behavior due to "),ls=o(re,"CODE",{});var Jl=i(ls);Ct=n(Jl,"inf"),Jl.forEach(s),xt=n(re," or "),rs=o(re,"CODE",{});var Ql=i(rs);Tt=n(Ql,"nan"),Ql.forEach(s),Lt=n(re,` in activations or weights one needs to discover where the first underflow or overflow happens and what led to it. Luckily you can accomplish that easily by activating a special module that will do the detection automatically.`),re.forEach(s),ia=c(e),X=o(e,"P",{});var st=i(X);At=n(st,"If you\u2019re using "),Ge=o(st,"A",{href:!0});var Vl=i(Ge);Nt=n(Vl,"Trainer"),Vl.forEach(s),Gt=n(st,", you just need to add:"),st.forEach(s),ua=c(e),b(be.$$.fragment,e),ca=c(e),S=o(e,"P",{});var Qe=i(S);Ut=n(Qe,"to the normal command line arguments, or pass "),os=o(Qe,"CODE",{});var Xl=i(os);Ft=n(Xl,'debug="underflow_overflow"'),Xl.forEach(s),St=n(Qe,` when creating the `),Ue=o(Qe,"A",{href:!0});var Zl=i(Ue);Rt=n(Zl,"TrainingArguments"),Zl.forEach(s),It=n(Qe," object."),Qe.forEach(s),ha=c(e),Fe=o(e,"P",{});var er=i(Fe);Mt=n(er,"If you\u2019re using your own training loop or another Trainer you can accomplish the same with:"),er.forEach(s),da=c(e),b(we.$$.fragment,e),fa=c(e),x=o(e,"P",{});var Y=i(x);Se=o(Y,"A",{href:!0});var sr=i(Se);qt=n(sr,"DebugUnderflowOverflow"),sr.forEach(s),Ht=n(Y,` inserts hooks into the model that immediately after each forward call will test input and output variables and also the corresponding module\u2019s weights. As soon as `),ps=o(Y,"CODE",{});var ar=i(ps);Yt=n(ar,"inf"),ar.forEach(s),Bt=n(Y,` or `),is=o(Y,"CODE",{});var tr=i(is);Kt=n(tr,"nan"),tr.forEach(s),zt=n(Y,` is detected in at least one element of the activations or weights, the program will assert and print a report like this (this was caught with `),us=o(Y,"CODE",{});var nr=i(us);Wt=n(nr,"google/mt5-small"),nr.forEach(s),Jt=n(Y," under fp16 mixed precision):"),Y.forEach(s),ma=c(e),b(je.$$.fragment,e),ba=c(e),Re=o(e,"P",{});var lr=i(Re);Qt=n(lr,"The example output has been trimmed in the middle for brevity."),lr.forEach(s),wa=c(e),g=o(e,"P",{});var O=i(g);Vt=n(O,`The second column shows the value of the absolute largest element, so if you have a closer look at the last few frames, the inputs and outputs were in the range of `),cs=o(O,"CODE",{});var rr=i(cs);Xt=n(rr,"1e4"),rr.forEach(s),Zt=n(O,`. So when this training was done under fp16 mixed precision the very last step overflowed (since under `),hs=o(O,"CODE",{});var or=i(hs);en=n(or,"fp16"),or.forEach(s),sn=n(O," the largest number before "),ds=o(O,"CODE",{});var pr=i(ds);an=n(pr,"inf"),pr.forEach(s),tn=n(O," is "),fs=o(O,"CODE",{});var ir=i(fs);nn=n(ir,"64e3"),ir.forEach(s),ln=n(O,`). To avoid overflows under `),ms=o(O,"CODE",{});var ur=i(ms);rn=n(ur,"fp16"),ur.forEach(s),on=n(O," the activations must remain way below "),bs=o(O,"CODE",{});var cr=i(bs);pn=n(cr,"1e4"),cr.forEach(s),un=n(O,", because "),ws=o(O,"CODE",{});var hr=i(ws);cn=n(hr,"1e4 * 1e4 = 1e8"),hr.forEach(s),hn=n(O,` so any matrix multiplication with large activations is going to lead to a numerical overflow condition.`),O.forEach(s),ja=c(e),Z=o(e,"P",{});var at=i(Z);dn=n(at,"At the very start of the trace you can discover at which batch number the problem occurred (here "),js=o(at,"CODE",{});var dr=i(js);fn=n(dr,"Detected inf/nan during batch_number=0"),dr.forEach(s),mn=n(at," means the problem occurred on the first batch)."),at.forEach(s),_a=c(e),Ie=o(e,"P",{});var fr=i(Ie);bn=n(fr,`Each reported frame starts by declaring the fully qualified entry for the corresponding module this frame is reporting for. If we look just at this frame:`),fr.forEach(s),va=c(e),b(_e.$$.fragment,e),ya=c(e),L=o(e,"P",{});var oe=i(L);wn=n(oe,"Here, "),_s=o(oe,"CODE",{});var mr=i(_s);jn=n(mr,"encoder.block.2.layer.1.layer_norm"),mr.forEach(s),_n=n(oe,` indicates that it was a layer norm for the first layer, of the second block of the encoder. And the specific calls of the `),vs=o(oe,"CODE",{});var br=i(vs);vn=n(br,"forward"),br.forEach(s),yn=n(oe," is "),ys=o(oe,"CODE",{});var wr=i(ys);gn=n(wr,"T5LayerNorm"),wr.forEach(s),$n=n(oe,"."),oe.forEach(s),ga=c(e),Me=o(e,"P",{});var jr=i(Me);Dn=n(jr,"Let\u2019s look at the last few frames of that report:"),jr.forEach(s),$a=c(e),b(ve.$$.fragment,e),Da=c(e),k=o(e,"P",{});var U=i(k);En=n(U,"The last frame reports for "),gs=o(U,"CODE",{});var _r=i(gs);kn=n(_r,"Dropout.forward"),_r.forEach(s),On=n(U,` function with the first entry for the only input and the second for the only output. You can see that it was called from an attribute `),$s=o(U,"CODE",{});var vr=i($s);Pn=n(vr,"dropout"),vr.forEach(s),Cn=n(U," inside "),Ds=o(U,"CODE",{});var yr=i(Ds);xn=n(yr,"DenseReluDense"),yr.forEach(s),Tn=n(U,` class. We can see that it happened during the first layer, of the 2nd block, during the very first batch. Finally, the absolute largest input elements was `),Es=o(U,"CODE",{});var gr=i(Es);Ln=n(gr,"6.27e+04"),gr.forEach(s),An=n(U," and same for the output was "),ks=o(U,"CODE",{});var $r=i(ks);Nn=n($r,"inf"),$r.forEach(s),Gn=n(U,"."),U.forEach(s),Ea=c(e),A=o(e,"P",{});var pe=i(A);Un=n(pe,"You can see here, that "),Os=o(pe,"CODE",{});var Dr=i(Os);Fn=n(Dr,"T5DenseGatedGeluDense.forward"),Dr.forEach(s),Sn=n(pe,` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16\u2019s top limit of 64K. In the next frame we have `),Ps=o(pe,"CODE",{});var Er=i(Ps);Rn=n(Er,"Dropout"),Er.forEach(s),In=n(pe,` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overflow (`),Cs=o(pe,"CODE",{});var kr=i(Cs);Mn=n(kr,"inf"),kr.forEach(s),qn=n(pe,")."),pe.forEach(s),ka=c(e),qe=o(e,"P",{});var Or=i(qe);Hn=n(Or,`As you can see it\u2019s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.`),Or.forEach(s),Oa=c(e),ee=o(e,"P",{});var tt=i(ee);Yn=n(tt,"Let\u2019s match the report to the code from "),xs=o(tt,"CODE",{});var Pr=i(xs);Bn=n(Pr,"models/t5/modeling_t5.py"),Pr.forEach(s),Kn=n(tt,":"),tt.forEach(s),Pa=c(e),b(ye.$$.fragment,e),Ca=c(e),se=o(e,"P",{});var nt=i(se);zn=n(nt,"Now it\u2019s easy to see the "),Ts=o(nt,"CODE",{});var Cr=i(Ts);Wn=n(Cr,"dropout"),Cr.forEach(s),Jn=n(nt," call, and all the previous calls as well."),nt.forEach(s),xa=c(e),ae=o(e,"P",{});var lt=i(ae);Qn=n(lt,"Since the detection is happening in a forward hook, these reports are printed immediately after each "),Ls=o(lt,"CODE",{});var xr=i(Ls);Vn=n(xr,"forward"),xr.forEach(s),Xn=n(lt,` returns.`),lt.forEach(s),Ta=c(e),N=o(e,"P",{});var ie=i(N);Zn=n(ie,`Going back to the full report, to act on it and to fix the problem, we need to go a few frames up where the numbers started to go up and most likely switch to the `),As=o(ie,"CODE",{});var Tr=i(As);el=n(Tr,"fp32"),Tr.forEach(s),sl=n(ie,` mode here, so that the numbers don\u2019t overflow when multiplied or summed up. Of course, there might be other solutions. For example, we could turn off `),Ns=o(ie,"CODE",{});var Lr=i(Ns);al=n(Lr,"amp"),Lr.forEach(s),tl=n(ie,` temporarily if it\u2019s enabled, after moving the original `),Gs=o(ie,"CODE",{});var Ar=i(Gs);nl=n(Ar,"forward"),Ar.forEach(s),ll=n(ie," into a helper wrapper, like so:"),ie.forEach(s),La=c(e),b(ge.$$.fragment,e),Aa=c(e),R=o(e,"P",{});var Ve=i(R);rl=n(Ve,`Since the automatic detector only reports on inputs and outputs of full frames, once you know where to look, you may want to analyse the intermediary stages of any specific `),Us=o(Ve,"CODE",{});var Nr=i(Us);ol=n(Nr,"forward"),Nr.forEach(s),pl=n(Ve,` function as well. In such a case you can use the `),Fs=o(Ve,"CODE",{});var Gr=i(Fs);il=n(Gr,"detect_overflow"),Gr.forEach(s),ul=n(Ve," helper function to inject the detector where you want it, for example:"),Ve.forEach(s),Na=c(e),b($e.$$.fragment,e),Ga=c(e),G=o(e,"P",{});var ue=i(G);cl=n(ue,"You can see that we added 2 of these and now we track if "),Ss=o(ue,"CODE",{});var Ur=i(Ss);hl=n(Ur,"inf"),Ur.forEach(s),dl=n(ue," or "),Rs=o(ue,"CODE",{});var Fr=i(Rs);fl=n(Fr,"nan"),Fr.forEach(s),ml=n(ue," for "),Is=o(ue,"CODE",{});var Sr=i(Is);bl=n(Sr,"forwarded_states"),Sr.forEach(s),wl=n(ue,` was detected somewhere in between.`),ue.forEach(s),Ua=c(e),te=o(e,"P",{});var rt=i(te);jl=n(rt,"Actually, the detector already reports these because each of the calls in the example above is a "),Ms=o(rt,"CODE",{});var Rr=i(Ms);_l=n(Rr,"nn.Module"),Rr.forEach(s),vl=n(rt,`, but let\u2019s say if you had some local direct calculations this is how you\u2019d do that.`),rt.forEach(s),Fa=c(e),He=o(e,"P",{});var Ir=i(He);yl=n(Ir,`Additionally, if you\u2019re instantiating the debugger in your own code, you can adjust the number of frames printed from its default, e.g.:`),Ir.forEach(s),Sa=c(e),b(De.$$.fragment,e),Ra=c(e),H=o(e,"H3",{class:!0});var ot=i(H);ne=o(ot,"A",{id:!0,class:!0,href:!0});var Mr=i(ne);qs=o(Mr,"SPAN",{});var qr=i(qs);b(Ee.$$.fragment,qr),qr.forEach(s),Mr.forEach(s),gl=c(ot),Hs=o(ot,"SPAN",{});var Hr=i(Hs);$l=n(Hr,"Specific batch absolute mix and max value tracing"),Hr.forEach(s),ot.forEach(s),Ia=c(e),Ye=o(e,"P",{});var Yr=i(Ye);Dl=n(Yr,"The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off."),Yr.forEach(s),Ma=c(e),le=o(e,"P",{});var pt=i(le);El=n(pt,"Let\u2019s say you want to watch the absolute min and max values for all the ingredients of each "),Ys=o(pt,"CODE",{});var Br=i(Ys);kl=n(Br,"forward"),Br.forEach(s),Ol=n(pt,` call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as:`),pt.forEach(s),qa=c(e),b(ke.$$.fragment,e),Ha=c(e),Be=o(e,"P",{});var Kr=i(Be);Pl=n(Kr,"And now full batches 1 and 3 will be traced using the same format as the underflow/overflow detector does."),Kr.forEach(s),Ya=c(e),Ke=o(e,"P",{});var zr=i(Ke);Cl=n(zr,"Batches are 0-indexed."),zr.forEach(s),Ba=c(e),ze=o(e,"P",{});var Wr=i(ze);xl=n(Wr,`This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area. Here is a sample truncated output for such configuration:`),Wr.forEach(s),Ka=c(e),b(Oe.$$.fragment,e),za=c(e),We=o(e,"P",{});var Jr=i(We);Tl=n(Jr,`Here you will get a huge number of frames dumped - as many as there were forward calls in your model, so it may or may not what you want, but sometimes it can be easier to use for debugging purposes than a normal debugger. For example, if a problem starts happening at batch number 150. So you can dump traces for batches 149 and 150 and compare where numbers started to diverge.`),Jr.forEach(s),Wa=c(e),Je=o(e,"P",{});var Qr=i(Je);Ll=n(Qr,"You can also specify the batch number after which to stop the training, with:"),Qr.forEach(s),Ja=c(e),b(Pe.$$.fragment,e),this.h()},h(){y(h,"name","hf:doc:metadata"),y(h,"content",JSON.stringify(lo)),y(f,"id","debugging"),y(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),y(f,"href","#debugging"),y(d,"class","relative group"),y(B,"id","multigpu-network-issues-debug"),y(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),y(B,"href","#multigpu-network-issues-debug"),y(M,"class","relative group"),y(W,"id","underflow-and-overflow-detection"),y(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),y(W,"href","#underflow-and-overflow-detection"),y(q,"class","relative group"),y(Ge,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),y(Ue,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),y(Se,"href","/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.debug_utils.DebugUnderflowOverflow"),y(ne,"id","specific-batch-absolute-mix-and-max-value-tracing"),y(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),y(ne,"href","#specific-batch-absolute-mix-and-max-value-tracing"),y(H,"class","relative group")},m(e,l){a(document.head,h),p(e,D,l),p(e,d,l),a(d,f),a(f,C),w($,C,null),a(d,E),a(d,F),a(F,ut),p(e,zs,l),p(e,M,l),a(M,B),a(B,Xe),w(ce,Xe,null),a(M,ct),a(M,Ze),a(Ze,ht),p(e,Ws,l),p(e,K,l),a(K,dt),a(K,es),a(es,ft),a(K,mt),p(e,Js,l),w(he,e,l),p(e,Qs,l),p(e,xe,l),a(xe,bt),p(e,Vs,l),w(de,e,l),p(e,Xs,l),p(e,Te,l),a(Te,wt),p(e,Zs,l),p(e,Le,l),a(Le,jt),p(e,ea,l),p(e,Ae,l),a(Ae,_t),p(e,sa,l),p(e,z,l),a(z,vt),a(z,ss),a(ss,yt),a(z,gt),p(e,aa,l),w(fe,e,l),p(e,ta,l),p(e,Ne,l),a(Ne,$t),p(e,na,l),p(e,q,l),a(q,W),a(W,as),w(me,as,null),a(q,Dt),a(q,ts),a(ts,Et),p(e,la,l),w(J,e,l),p(e,ra,l),w(Q,e,l),p(e,oa,l),w(V,e,l),p(e,pa,l),p(e,T,l),a(T,kt),a(T,ns),a(ns,Ot),a(T,Pt),a(T,ls),a(ls,Ct),a(T,xt),a(T,rs),a(rs,Tt),a(T,Lt),p(e,ia,l),p(e,X,l),a(X,At),a(X,Ge),a(Ge,Nt),a(X,Gt),p(e,ua,l),w(be,e,l),p(e,ca,l),p(e,S,l),a(S,Ut),a(S,os),a(os,Ft),a(S,St),a(S,Ue),a(Ue,Rt),a(S,It),p(e,ha,l),p(e,Fe,l),a(Fe,Mt),p(e,da,l),w(we,e,l),p(e,fa,l),p(e,x,l),a(x,Se),a(Se,qt),a(x,Ht),a(x,ps),a(ps,Yt),a(x,Bt),a(x,is),a(is,Kt),a(x,zt),a(x,us),a(us,Wt),a(x,Jt),p(e,ma,l),w(je,e,l),p(e,ba,l),p(e,Re,l),a(Re,Qt),p(e,wa,l),p(e,g,l),a(g,Vt),a(g,cs),a(cs,Xt),a(g,Zt),a(g,hs),a(hs,en),a(g,sn),a(g,ds),a(ds,an),a(g,tn),a(g,fs),a(fs,nn),a(g,ln),a(g,ms),a(ms,rn),a(g,on),a(g,bs),a(bs,pn),a(g,un),a(g,ws),a(ws,cn),a(g,hn),p(e,ja,l),p(e,Z,l),a(Z,dn),a(Z,js),a(js,fn),a(Z,mn),p(e,_a,l),p(e,Ie,l),a(Ie,bn),p(e,va,l),w(_e,e,l),p(e,ya,l),p(e,L,l),a(L,wn),a(L,_s),a(_s,jn),a(L,_n),a(L,vs),a(vs,vn),a(L,yn),a(L,ys),a(ys,gn),a(L,$n),p(e,ga,l),p(e,Me,l),a(Me,Dn),p(e,$a,l),w(ve,e,l),p(e,Da,l),p(e,k,l),a(k,En),a(k,gs),a(gs,kn),a(k,On),a(k,$s),a($s,Pn),a(k,Cn),a(k,Ds),a(Ds,xn),a(k,Tn),a(k,Es),a(Es,Ln),a(k,An),a(k,ks),a(ks,Nn),a(k,Gn),p(e,Ea,l),p(e,A,l),a(A,Un),a(A,Os),a(Os,Fn),a(A,Sn),a(A,Ps),a(Ps,Rn),a(A,In),a(A,Cs),a(Cs,Mn),a(A,qn),p(e,ka,l),p(e,qe,l),a(qe,Hn),p(e,Oa,l),p(e,ee,l),a(ee,Yn),a(ee,xs),a(xs,Bn),a(ee,Kn),p(e,Pa,l),w(ye,e,l),p(e,Ca,l),p(e,se,l),a(se,zn),a(se,Ts),a(Ts,Wn),a(se,Jn),p(e,xa,l),p(e,ae,l),a(ae,Qn),a(ae,Ls),a(Ls,Vn),a(ae,Xn),p(e,Ta,l),p(e,N,l),a(N,Zn),a(N,As),a(As,el),a(N,sl),a(N,Ns),a(Ns,al),a(N,tl),a(N,Gs),a(Gs,nl),a(N,ll),p(e,La,l),w(ge,e,l),p(e,Aa,l),p(e,R,l),a(R,rl),a(R,Us),a(Us,ol),a(R,pl),a(R,Fs),a(Fs,il),a(R,ul),p(e,Na,l),w($e,e,l),p(e,Ga,l),p(e,G,l),a(G,cl),a(G,Ss),a(Ss,hl),a(G,dl),a(G,Rs),a(Rs,fl),a(G,ml),a(G,Is),a(Is,bl),a(G,wl),p(e,Ua,l),p(e,te,l),a(te,jl),a(te,Ms),a(Ms,_l),a(te,vl),p(e,Fa,l),p(e,He,l),a(He,yl),p(e,Sa,l),w(De,e,l),p(e,Ra,l),p(e,H,l),a(H,ne),a(ne,qs),w(Ee,qs,null),a(H,gl),a(H,Hs),a(Hs,$l),p(e,Ia,l),p(e,Ye,l),a(Ye,Dl),p(e,Ma,l),p(e,le,l),a(le,El),a(le,Ys),a(Ys,kl),a(le,Ol),p(e,qa,l),w(ke,e,l),p(e,Ha,l),p(e,Be,l),a(Be,Pl),p(e,Ya,l),p(e,Ke,l),a(Ke,Cl),p(e,Ba,l),p(e,ze,l),a(ze,xl),p(e,Ka,l),w(Oe,e,l),p(e,za,l),p(e,We,l),a(We,Tl),p(e,Wa,l),p(e,Je,l),a(Je,Ll),p(e,Ja,l),w(Pe,e,l),Qa=!0},p(e,[l]){const Ce={};l&2&&(Ce.$$scope={dirty:l,ctx:e}),J.$set(Ce);const Bs={};l&2&&(Bs.$$scope={dirty:l,ctx:e}),Q.$set(Bs);const Ks={};l&2&&(Ks.$$scope={dirty:l,ctx:e}),V.$set(Ks)},i(e){Qa||(j($.$$.fragment,e),j(ce.$$.fragment,e),j(he.$$.fragment,e),j(de.$$.fragment,e),j(fe.$$.fragment,e),j(me.$$.fragment,e),j(J.$$.fragment,e),j(Q.$$.fragment,e),j(V.$$.fragment,e),j(be.$$.fragment,e),j(we.$$.fragment,e),j(je.$$.fragment,e),j(_e.$$.fragment,e),j(ve.$$.fragment,e),j(ye.$$.fragment,e),j(ge.$$.fragment,e),j($e.$$.fragment,e),j(De.$$.fragment,e),j(Ee.$$.fragment,e),j(ke.$$.fragment,e),j(Oe.$$.fragment,e),j(Pe.$$.fragment,e),Qa=!0)},o(e){_($.$$.fragment,e),_(ce.$$.fragment,e),_(he.$$.fragment,e),_(de.$$.fragment,e),_(fe.$$.fragment,e),_(me.$$.fragment,e),_(J.$$.fragment,e),_(Q.$$.fragment,e),_(V.$$.fragment,e),_(be.$$.fragment,e),_(we.$$.fragment,e),_(je.$$.fragment,e),_(_e.$$.fragment,e),_(ve.$$.fragment,e),_(ye.$$.fragment,e),_(ge.$$.fragment,e),_($e.$$.fragment,e),_(De.$$.fragment,e),_(Ee.$$.fragment,e),_(ke.$$.fragment,e),_(Oe.$$.fragment,e),_(Pe.$$.fragment,e),Qa=!1},d(e){s(h),e&&s(D),e&&s(d),v($),e&&s(zs),e&&s(M),v(ce),e&&s(Ws),e&&s(K),e&&s(Js),v(he,e),e&&s(Qs),e&&s(xe),e&&s(Vs),v(de,e),e&&s(Xs),e&&s(Te),e&&s(Zs),e&&s(Le),e&&s(ea),e&&s(Ae),e&&s(sa),e&&s(z),e&&s(aa),v(fe,e),e&&s(ta),e&&s(Ne),e&&s(na),e&&s(q),v(me),e&&s(la),v(J,e),e&&s(ra),v(Q,e),e&&s(oa),v(V,e),e&&s(pa),e&&s(T),e&&s(ia),e&&s(X),e&&s(ua),v(be,e),e&&s(ca),e&&s(S),e&&s(ha),e&&s(Fe),e&&s(da),v(we,e),e&&s(fa),e&&s(x),e&&s(ma),v(je,e),e&&s(ba),e&&s(Re),e&&s(wa),e&&s(g),e&&s(ja),e&&s(Z),e&&s(_a),e&&s(Ie),e&&s(va),v(_e,e),e&&s(ya),e&&s(L),e&&s(ga),e&&s(Me),e&&s($a),v(ve,e),e&&s(Da),e&&s(k),e&&s(Ea),e&&s(A),e&&s(ka),e&&s(qe),e&&s(Oa),e&&s(ee),e&&s(Pa),v(ye,e),e&&s(Ca),e&&s(se),e&&s(xa),e&&s(ae),e&&s(Ta),e&&s(N),e&&s(La),v(ge,e),e&&s(Aa),e&&s(R),e&&s(Na),v($e,e),e&&s(Ga),e&&s(G),e&&s(Ua),e&&s(te),e&&s(Fa),e&&s(He),e&&s(Sa),v(De,e),e&&s(Ra),e&&s(H),v(Ee),e&&s(Ia),e&&s(Ye),e&&s(Ma),e&&s(le),e&&s(qa),v(ke,e),e&&s(Ha),e&&s(Be),e&&s(Ya),e&&s(Ke),e&&s(Ba),e&&s(ze),e&&s(Ka),v(Oe,e),e&&s(za),e&&s(We),e&&s(Wa),e&&s(Je),e&&s(Ja),v(Pe,e)}}}const lo={local:"debugging",sections:[{local:"multigpu-network-issues-debug",title:"Multi-GPU Network Issues Debug"},{local:"underflow-and-overflow-detection",sections:[{local:"specific-batch-absolute-mix-and-max-value-tracing",title:"Specific batch absolute mix and max value tracing"}],title:"Underflow and Overflow Detection"}],title:"Debugging"};function ro(I,h,D){let{fw:d}=h;return I.$$set=f=>{"fw"in f&&D(0,d=f.fw)},[d]}class ho extends Vr{constructor(h){super();Xr(this,h,ro,no,Zr,{fw:0})}}export{ho as default,lo as metadata};
252
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/migration.mdx-040dfdaa.js
import{S as hw,i as fw,s as pw,e as l,k as c,w as p,t as r,M as mw,c as s,d as o,m as h,a as i,x as m,h as a,b as f,F as e,g as d,y as u,L as uw,q as _,o as v,B as E}from"../chunks/vendor-4833417e.js";import{I as $}from"../chunks/IconCopyLink-4b81c553.js";import{C as x}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function _w(Ec){let Z,Zo,A,F,Lr,no,bc,Ir,wc,Bi,ve,Ge,Sr,co,yc,Ue,kc,Mr,Tc,gc,jr,$c,qi,er,Cc,Ni,Ee,Xe,Fr,ho,Oc,Br,Dc,Hi,tr,xc,Ri,or,Ac,Wi,Ke,qr,zc,Pc,Nr,Lc,Gi,be,Je,Hr,fo,Ic,Rr,Sc,Ui,Qe,po,Mc,Ve,jc,Wr,Fc,Bc,qc,Nc,we,Hc,Gr,Rc,Wc,Ur,Gc,Uc,Xi,Ye,Xc,Xr,Kc,Jc,Ki,mo,Ji,Ze,Qc,Kr,Vc,Yc,Qi,uo,Vi,ye,et,Jr,_o,Zc,Qr,eh,Yi,z,th,Vr,oh,rh,Yr,ah,lh,Zr,sh,ih,Zi,tt,nh,ea,dh,ch,en,T,ta,oa,hh,fh,ra,aa,ph,mh,la,sa,uh,_h,ia,na,vh,Eh,da,ca,bh,wh,ha,fa,yh,kh,pa,ma,Th,gh,ua,_a,$h,tn,ke,ot,va,vo,Ch,Ea,Oh,on,ee,Dh,ba,xh,Ah,wa,zh,Ph,rn,rt,Lh,ya,Ih,Sh,an,Eo,ln,at,Mh,ka,jh,Fh,sn,bo,nn,rr,Bh,dn,wo,cn,Te,lt,Ta,yo,qh,ga,Nh,hn,st,Hh,$a,Rh,Wh,fn,ar,Gh,pn,ge,it,Ca,ko,Uh,Oa,Xh,mn,nt,Kh,Da,Jh,Qh,un,dt,Vh,xa,Yh,Zh,_n,To,vn,ct,ef,Aa,tf,of,En,go,bn,$e,ht,za,$o,rf,Ce,af,Pa,lf,sf,La,nf,df,wn,ft,cf,Co,Ia,hf,ff,pf,yn,pt,mf,Sa,uf,_f,kn,Oe,mt,Ma,Oo,vf,ja,Ef,Tn,P,bf,Fa,wf,yf,Ba,kf,Tf,qa,gf,$f,gn,ut,Cf,Na,Of,Df,$n,Do,Cn,_t,xf,Ha,Af,zf,On,xo,Dn,lr,Pf,xn,Ao,An,De,vt,Ra,zo,Lf,Wa,If,zn,Et,Sf,Po,Mf,jf,Pn,sr,Ff,Ln,ir,Bf,In,b,L,Ga,qf,Nf,Ua,Hf,Rf,Xa,Wf,Gf,Ka,Uf,Xf,Kf,I,Ja,Jf,Qf,Qa,Vf,Yf,Va,Zf,ep,Ya,tp,op,rp,te,Za,ap,lp,el,sp,ip,tl,np,dp,cp,oe,ol,hp,fp,rl,pp,mp,al,up,_p,vp,re,ll,Ep,bp,sl,wp,yp,il,kp,Tp,gp,ae,nl,$p,Cp,dl,Op,Dp,cl,xp,Ap,zp,le,hl,Pp,Lp,fl,Ip,Sp,pl,Mp,jp,Fp,se,ml,Bp,qp,ul,Np,Hp,_l,Rp,Wp,Gp,ie,vl,Up,Xp,El,Kp,Jp,bl,Qp,Vp,Yp,ne,wl,Zp,em,yl,tm,om,kl,rm,am,lm,de,Tl,sm,im,gl,nm,dm,$l,cm,hm,Sn,nr,fm,Mn,S,bt,Cl,pm,mm,Ol,um,_m,vm,wt,Dl,Em,bm,xl,wm,ym,km,yt,Al,Tm,gm,zl,$m,Cm,Om,kt,Pl,Dm,xm,Ll,Am,zm,jn,dr,Pm,Fn,ce,xe,Lm,Il,Im,Sm,Sl,Mm,jm,Fm,Ae,Bm,Ml,qm,Nm,jl,Hm,Rm,Wm,ze,Gm,Fl,Um,Xm,Bl,Km,Jm,Bn,Tt,Qm,ql,Vm,Ym,qn,g,B,Zm,Nl,eu,tu,Hl,ou,ru,Rl,au,lu,su,q,iu,Wl,nu,du,Gl,cu,hu,Ul,fu,pu,mu,Pe,uu,Xl,_u,vu,Kl,Eu,bu,wu,N,yu,Jl,ku,Tu,Ql,gu,$u,Vl,Cu,Ou,Du,H,xu,Yl,Au,zu,Zl,Pu,Lu,es,Iu,Su,Mu,R,ju,ts,Fu,Bu,os,qu,Nu,rs,Hu,Ru,Wu,W,Gu,as,Uu,Xu,ls,Ku,Ju,ss,Qu,Vu,Yu,G,Zu,is,e_,t_,ns,o_,r_,ds,a_,l_,Nn,gt,s_,cs,i_,n_,Hn,D,U,d_,hs,c_,h_,fs,f_,p_,ps,m_,u_,__,X,v_,ms,E_,b_,us,w_,y_,_s,k_,T_,g_,K,$_,vs,C_,O_,Es,D_,x_,bs,A_,z_,P_,J,L_,ws,I_,S_,ys,M_,j_,ks,F_,B_,q_,Q,N_,Ts,H_,R_,gs,W_,G_,$s,U_,X_,Rn,$t,K_,Cs,J_,Q_,Wn,cr,V,V_,Os,Y_,Z_,Ds,ev,tv,xs,ov,rv,Gn,hr,av,Un,Ct,Le,lv,As,sv,iv,zs,nv,dv,cv,Ie,hv,Ps,fv,pv,Ls,mv,uv,Xn,fr,_v,Kn,pr,Y,vv,Is,Ev,bv,Ss,wv,yv,Ms,kv,Tv,Jn,Se,Ot,js,Lo,gv,Fs,$v,Qn,Dt,Cv,Bs,Ov,Dv,Vn,Me,xt,qs,Io,xv,je,Av,Ns,zv,Pv,Hs,Lv,Iv,Yn,M,Sv,Rs,Mv,jv,Ws,Fv,Bv,Gs,qv,Nv,Zn,At,Hv,Us,Rv,Wv,ed,zt,Gv,Xs,Uv,Xv,td,Fe,Pt,Ks,So,Kv,Js,Jv,od,Lt,Qv,Qs,Vv,Yv,rd,Be,It,Vs,Mo,Zv,mr,e1,Ys,t1,ad,he,o1,Zs,r1,a1,ei,l1,s1,ld,St,i1,jo,n1,d1,sd,Mt,c1,ti,h1,f1,id,fe,p1,oi,m1,u1,ri,_1,v1,nd,Fo,dd,qe,jt,ai,Bo,E1,li,b1,cd,Ft,w1,si,y1,k1,hd,Bt,ii,Ne,T1,ni,g1,$1,di,C1,O1,D1,ci,w,x1,hi,A1,z1,fi,P1,L1,pi,I1,S1,mi,M1,j1,ui,F1,B1,_i,q1,N1,vi,H1,R1,Ei,W1,G1,bi,U1,X1,wi,K1,J1,fd,qt,Q1,yi,V1,Y1,pd,ur,Z1,md,qo,ud,He,Nt,ki,No,eE,Ti,tE,_d,j,oE,gi,rE,aE,$i,lE,sE,Ci,iE,nE,vd,pe,Oi,dE,cE,Di,hE,fE,xi,pE,Ed,me,mE,Ai,uE,_E,zi,vE,EE,bd,Ht,bE,Ho,wE,yE,wd,ue,kE,Pi,TE,gE,Li,$E,CE,yd,Ro,kd;return no=new $({}),co=new $({}),ho=new $({}),fo=new $({}),mo=new x({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>)`}}),uo=new x({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased", use_fast=False)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, use_fast=<span class="hljs-literal">False</span>)`}}),_o=new $({}),vo=new $({}),Eo=new x({props:{code:"pip install transformers",highlighted:"pip install transformers"}}),bo=new x({props:{code:"pip install transformers[sentencepiece]",highlighted:"pip install transformers[sentencepiece]"}}),wo=new x({props:{code:"pip install transformers sentencepiece",highlighted:"pip install transformers sentencepiece"}}),yo=new $({}),ko=new $({}),To=new x({props:{code:"from transformers.modeling_bert import BertLayer",highlighted:"from transformers.modeling_bert import BertLayer"}}),go=new x({props:{code:"from transformers.models.bert.modeling_bert import BertLayer",highlighted:"from transformers.models.bert.modeling_bert import BertLayer"}}),$o=new $({}),Oo=new $({}),Do=new x({props:{code:`model = BertModel.from_pretrained("bert-base-cased") outputs = model(**inputs)`,highlighted:`model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) outputs = model(**inputs)`}}),xo=new x({props:{code:`model = BertModel.from_pretrained("bert-base-cased") outputs = model(**inputs, return_dict=False)`,highlighted:`model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) outputs = model(**inputs, return_dict=False)`}}),Ao=new x({props:{code:`model = BertModel.from_pretrained("bert-base-cased", return_dict=False) outputs = model(**inputs)`,highlighted:`model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, return_dict=False) outputs = model(**inputs)`}}),zo=new $({}),Lo=new $({}),Io=new $({}),So=new $({}),Mo=new $({}),Fo=new x({props:{code:`# Let's load our model model = BertForSequenceClassification.from_pretrained("bert-base-uncased") # If you used to have this line in pytorch-pretrained-bert: loss = model(input_ids, labels=labels) # Now just use this line in \u{1F917} Transformers to extract the loss from the output tuple: outputs = model(input_ids, labels=labels) loss = outputs[0] # In \u{1F917} Transformers you can also have access to the logits: loss, logits = outputs[:2] # And even the attention weights if you configure the model to output them (and other outputs too, see the docstrings and documentation) model = BertForSequenceClassification.from_pretrained("bert-base-uncased", output_attentions=True) outputs = model(input_ids, labels=labels) loss, logits, attentions = outputs`,highlighted:`<span class="hljs-comment"># Let&#x27;s load our model</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># If you used to have this line in pytorch-pretrained-bert:</span> loss = model(input_ids, labels=labels) <span class="hljs-comment"># Now just use this line in \u{1F917} Transformers to extract the loss from the output tuple:</span> outputs = model(input_ids, labels=labels) loss = outputs[<span class="hljs-number">0</span>] <span class="hljs-comment"># In \u{1F917} Transformers you can also have access to the logits:</span> loss, logits = outputs[:<span class="hljs-number">2</span>] <span class="hljs-comment"># And even the attention weights if you configure the model to output them (and other outputs too, see the docstrings and documentation)</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) outputs = model(input_ids, labels=labels) loss, logits, attentions = outputs`}}),Bo=new $({}),qo=new x({props:{code:`### Let's load a model and tokenizer model = BertForSequenceClassification.from_pretrained("bert-base-uncased") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") ### Do some stuff to our model and tokenizer # Ex: add new tokens to the vocabulary and embeddings of our model tokenizer.add_tokens(["[SPECIAL_TOKEN_1]", "[SPECIAL_TOKEN_2]"]) model.resize_token_embeddings(len(tokenizer)) # Train our model train(model) ### Now let's save our model and tokenizer to a directory model.save_pretrained("./my_saved_model_directory/") tokenizer.save_pretrained("./my_saved_model_directory/") ### Reload the model and the tokenizer model = BertForSequenceClassification.from_pretrained("./my_saved_model_directory/") tokenizer = BertTokenizer.from_pretrained("./my_saved_model_directory/")`,highlighted:`<span class="hljs-comment">### Let&#x27;s load a model and tokenizer</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment">### Do some stuff to our model and tokenizer</span> <span class="hljs-comment"># Ex: add new tokens to the vocabulary and embeddings of our model</span> tokenizer.add_tokens([<span class="hljs-string">&quot;[SPECIAL_TOKEN_1]&quot;</span>, <span class="hljs-string">&quot;[SPECIAL_TOKEN_2]&quot;</span>]) model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-comment"># Train our model</span> train(model) <span class="hljs-comment">### Now let&#x27;s save our model and tokenizer to a directory</span> model.save_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>) tokenizer.save_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>) <span class="hljs-comment">### Reload the model and the tokenizer</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>) tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>)`}}),No=new $({}),Ro=new x({props:{code:`# Parameters: lr = 1e-3 max_grad_norm = 1.0 num_training_steps = 1000 num_warmup_steps = 100 warmup_proportion = float(num_warmup_steps) / float(num_training_steps) # 0.1 ### Previously BertAdam optimizer was instantiated like this: optimizer = BertAdam( model.parameters(), lr=lr, schedule="warmup_linear", warmup=warmup_proportion, num_training_steps=num_training_steps, ) ### and used like this: for batch in train_data: loss = model(batch) loss.backward() optimizer.step() ### In \u{1F917} Transformers, optimizer and schedules are split and instantiated like this: optimizer = AdamW( model.parameters(), lr=lr, correct_bias=False ) # To reproduce BertAdam specific behavior set correct_bias=False scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps ) # PyTorch scheduler ### and used like this: for batch in train_data: loss = model(batch) loss.backward() torch.nn.utils.clip_grad_norm_( model.parameters(), max_grad_norm ) # Gradient clipping is not in AdamW anymore (so you can use amp without issue) optimizer.step() scheduler.step()`,highlighted:`<span class="hljs-comment"># Parameters:</span> lr = <span class="hljs-number">1e-3</span> max_grad_norm = <span class="hljs-number">1.0</span> num_training_steps = <span class="hljs-number">1000</span> num_warmup_steps = <span class="hljs-number">100</span> warmup_proportion = <span class="hljs-built_in">float</span>(num_warmup_steps) / <span class="hljs-built_in">float</span>(num_training_steps) <span class="hljs-comment"># 0.1</span> <span class="hljs-comment">### Previously BertAdam optimizer was instantiated like this:</span> optimizer = BertAdam( model.parameters(), lr=lr, schedule=<span class="hljs-string">&quot;warmup_linear&quot;</span>, warmup=warmup_proportion, num_training_steps=num_training_steps, ) <span class="hljs-comment">### and used like this:</span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_data: loss = model(batch) loss.backward() optimizer.step() <span class="hljs-comment">### In \u{1F917} Transformers, optimizer and schedules are split and instantiated like this:</span> optimizer = AdamW( model.parameters(), lr=lr, correct_bias=<span class="hljs-literal">False</span> ) <span class="hljs-comment"># To reproduce BertAdam specific behavior set correct_bias=False</span> scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps ) <span class="hljs-comment"># PyTorch scheduler</span> <span class="hljs-comment">### and used like this:</span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_data: loss = model(batch) loss.backward() torch.nn.utils.clip_grad_norm_( model.parameters(), max_grad_norm ) <span class="hljs-comment"># Gradient clipping is not in AdamW anymore (so you can use amp without issue)</span> optimizer.step() scheduler.step()`}}),{c(){Z=l("meta"),Zo=c(),A=l("h1"),F=l("a"),Lr=l("span"),p(no.$$.fragment),bc=c(),Ir=l("span"),wc=r("Migrating from previous packages"),Bi=c(),ve=l("h2"),Ge=l("a"),Sr=l("span"),p(co.$$.fragment),yc=c(),Ue=l("span"),kc=r("Migrating from transformers "),Mr=l("code"),Tc=r("v3.x"),gc=r(" to "),jr=l("code"),$c=r("v4.x"),qi=c(),er=l("p"),Cc=r(`A couple of changes were introduced when the switch from version 3 to version 4 was done. Below is a summary of the expected changes:`),Ni=c(),Ee=l("h4"),Xe=l("a"),Fr=l("span"),p(ho.$$.fragment),Oc=c(),Br=l("span"),Dc=r("1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default."),Hi=c(),tr=l("p"),xc=r("The python and rust tokenizers have roughly the same API, but the rust tokenizers have a more complete feature set."),Ri=c(),or=l("p"),Ac=r("This introduces two breaking changes:"),Wi=c(),Ke=l("ul"),qr=l("li"),zc=r("The handling of overflowing tokens between the python and rust tokenizers is different."),Pc=c(),Nr=l("li"),Lc=r("The rust tokenizers do not accept integers in the encoding methods."),Gi=c(),be=l("h5"),Je=l("a"),Hr=l("span"),p(fo.$$.fragment),Ic=c(),Rr=l("span"),Sc=r("How to obtain the same behavior as v3.x in v4.x"),Ui=c(),Qe=l("ul"),po=l("li"),Mc=r("The pipelines now contain additional features out of the box. See the "),Ve=l("a"),jc=r("token-classification pipeline with the "),Wr=l("code"),Fc=r("grouped_entities"),Bc=r(" flag"),qc=r("."),Nc=c(),we=l("li"),Hc=r("The auto-tokenizers now return rust tokenizers. In order to obtain the python tokenizers instead, the user may use the "),Gr=l("code"),Rc=r("use_fast"),Wc=r(" flag by setting it to "),Ur=l("code"),Gc=r("False"),Uc=r(":"),Xi=c(),Ye=l("p"),Xc=r("In version "),Xr=l("code"),Kc=r("v3.x"),Jc=r(":"),Ki=c(),p(mo.$$.fragment),Ji=c(),Ze=l("p"),Qc=r("to obtain the same in version "),Kr=l("code"),Vc=r("v4.x"),Yc=r(":"),Qi=c(),p(uo.$$.fragment),Vi=c(),ye=l("h4"),et=l("a"),Jr=l("span"),p(_o.$$.fragment),Zc=c(),Qr=l("span"),eh=r("2. SentencePiece is removed from the required dependencies"),Yi=c(),z=l("p"),th=r("The requirement on the SentencePiece dependency has been lifted from the "),Vr=l("code"),oh=r("setup.py"),rh=r(". This is done so that we may have a channel on anaconda cloud without relying on "),Yr=l("code"),ah=r("conda-forge"),lh=r(". This means that the tokenizers that depend on the SentencePiece library will not be available with a standard "),Zr=l("code"),sh=r("transformers"),ih=r(" installation."),Zi=c(),tt=l("p"),nh=r("This includes the "),ea=l("strong"),dh=r("slow"),ch=r(" versions of:"),en=c(),T=l("ul"),ta=l("li"),oa=l("code"),hh=r("XLNetTokenizer"),fh=c(),ra=l("li"),aa=l("code"),ph=r("AlbertTokenizer"),mh=c(),la=l("li"),sa=l("code"),uh=r("CamembertTokenizer"),_h=c(),ia=l("li"),na=l("code"),vh=r("MBartTokenizer"),Eh=c(),da=l("li"),ca=l("code"),bh=r("PegasusTokenizer"),wh=c(),ha=l("li"),fa=l("code"),yh=r("T5Tokenizer"),kh=c(),pa=l("li"),ma=l("code"),Th=r("ReformerTokenizer"),gh=c(),ua=l("li"),_a=l("code"),$h=r("XLMRobertaTokenizer"),tn=c(),ke=l("h5"),ot=l("a"),va=l("span"),p(vo.$$.fragment),Ch=c(),Ea=l("span"),Oh=r("How to obtain the same behavior as v3.x in v4.x"),on=c(),ee=l("p"),Dh=r("In order to obtain the same behavior as version "),ba=l("code"),xh=r("v3.x"),Ah=r(", you should install "),wa=l("code"),zh=r("sentencepiece"),Ph=r(" additionally:"),rn=c(),rt=l("p"),Lh=r("In version "),ya=l("code"),Ih=r("v3.x"),Sh=r(":"),an=c(),p(Eo.$$.fragment),ln=c(),at=l("p"),Mh=r("to obtain the same in version "),ka=l("code"),jh=r("v4.x"),Fh=r(":"),sn=c(),p(bo.$$.fragment),nn=c(),rr=l("p"),Bh=r("or"),dn=c(),p(wo.$$.fragment),cn=c(),Te=l("h4"),lt=l("a"),Ta=l("span"),p(yo.$$.fragment),qh=c(),ga=l("span"),Nh=r("3. The architecture of the repo has been updated so that each model resides in its folder"),hn=c(),st=l("p"),Hh=r("The past and foreseeable addition of new models means that the number of files in the directory "),$a=l("code"),Rh=r("src/transformers"),Wh=r(" keeps growing and becomes harder to navigate and understand. We made the choice to put each model and the files accompanying it in their own sub-directories."),fn=c(),ar=l("p"),Gh=r("This is a breaking change as importing intermediary layers using a model\u2019s module directly needs to be done via a different path."),pn=c(),ge=l("h5"),it=l("a"),Ca=l("span"),p(ko.$$.fragment),Uh=c(),Oa=l("span"),Xh=r("How to obtain the same behavior as v3.x in v4.x"),mn=c(),nt=l("p"),Kh=r("In order to obtain the same behavior as version "),Da=l("code"),Jh=r("v3.x"),Qh=r(", you should update the path used to access the layers."),un=c(),dt=l("p"),Vh=r("In version "),xa=l("code"),Yh=r("v3.x"),Zh=r(":"),_n=c(),p(To.$$.fragment),vn=c(),ct=l("p"),ef=r("to obtain the same in version "),Aa=l("code"),tf=r("v4.x"),of=r(":"),En=c(),p(go.$$.fragment),bn=c(),$e=l("h4"),ht=l("a"),za=l("span"),p($o.$$.fragment),rf=c(),Ce=l("span"),af=r("4. Switching the "),Pa=l("code"),lf=r("return_dict"),sf=r(" argument to "),La=l("code"),nf=r("True"),df=r(" by default"),wn=c(),ft=l("p"),cf=r("The "),Co=l("a"),Ia=l("code"),hf=r("return_dict"),ff=r(" argument"),pf=r(" enables the return of dict-like python objects containing the model outputs, instead of the standard tuples. This object is self-documented as keys can be used to retrieve values, while also behaving as a tuple as users may retrieve objects by index or by slice."),yn=c(),pt=l("p"),mf=r("This is a breaking change as the limitation of that tuple is that it cannot be unpacked: "),Sa=l("code"),uf=r("value0, value1 = outputs"),_f=r(" will not work."),kn=c(),Oe=l("h5"),mt=l("a"),Ma=l("span"),p(Oo.$$.fragment),vf=c(),ja=l("span"),Ef=r("How to obtain the same behavior as v3.x in v4.x"),Tn=c(),P=l("p"),bf=r("In order to obtain the same behavior as version "),Fa=l("code"),wf=r("v3.x"),yf=r(", you should specify the "),Ba=l("code"),kf=r("return_dict"),Tf=r(" argument to "),qa=l("code"),gf=r("False"),$f=r(", either in the model configuration or during the forward pass."),gn=c(),ut=l("p"),Cf=r("In version "),Na=l("code"),Of=r("v3.x"),Df=r(":"),$n=c(),p(Do.$$.fragment),Cn=c(),_t=l("p"),xf=r("to obtain the same in version "),Ha=l("code"),Af=r("v4.x"),zf=r(":"),On=c(),p(xo.$$.fragment),Dn=c(),lr=l("p"),Pf=r("or"),xn=c(),p(Ao.$$.fragment),An=c(),De=l("h4"),vt=l("a"),Ra=l("span"),p(zo.$$.fragment),Lf=c(),Wa=l("span"),If=r("5. Removed some deprecated attributes"),zn=c(),Et=l("p"),Sf=r("Attributes that were deprecated have been removed if they had been deprecated for at least a month. The full list of deprecated attributes can be found in "),Po=l("a"),Mf=r("#8604"),jf=r("."),Pn=c(),sr=l("p"),Ff=r("Here is a list of these attributes/methods/arguments and what their replacements should be:"),Ln=c(),ir=l("p"),Bf=r("In several models, the labels become consistent with the other models:"),In=c(),b=l("ul"),L=l("li"),Ga=l("code"),qf=r("masked_lm_labels"),Nf=r(" becomes "),Ua=l("code"),Hf=r("labels"),Rf=r(" in "),Xa=l("code"),Wf=r("AlbertForMaskedLM"),Gf=r(" and "),Ka=l("code"),Uf=r("AlbertForPreTraining"),Xf=r("."),Kf=c(),I=l("li"),Ja=l("code"),Jf=r("masked_lm_labels"),Qf=r(" becomes "),Qa=l("code"),Vf=r("labels"),Yf=r(" in "),Va=l("code"),Zf=r("BertForMaskedLM"),ep=r(" and "),Ya=l("code"),tp=r("BertForPreTraining"),op=r("."),rp=c(),te=l("li"),Za=l("code"),ap=r("masked_lm_labels"),lp=r(" becomes "),el=l("code"),sp=r("labels"),ip=r(" in "),tl=l("code"),np=r("DistilBertForMaskedLM"),dp=r("."),cp=c(),oe=l("li"),ol=l("code"),hp=r("masked_lm_labels"),fp=r(" becomes "),rl=l("code"),pp=r("labels"),mp=r(" in "),al=l("code"),up=r("ElectraForMaskedLM"),_p=r("."),vp=c(),re=l("li"),ll=l("code"),Ep=r("masked_lm_labels"),bp=r(" becomes "),sl=l("code"),wp=r("labels"),yp=r(" in "),il=l("code"),kp=r("LongformerForMaskedLM"),Tp=r("."),gp=c(),ae=l("li"),nl=l("code"),$p=r("masked_lm_labels"),Cp=r(" becomes "),dl=l("code"),Op=r("labels"),Dp=r(" in "),cl=l("code"),xp=r("MobileBertForMaskedLM"),Ap=r("."),zp=c(),le=l("li"),hl=l("code"),Pp=r("masked_lm_labels"),Lp=r(" becomes "),fl=l("code"),Ip=r("labels"),Sp=r(" in "),pl=l("code"),Mp=r("RobertaForMaskedLM"),jp=r("."),Fp=c(),se=l("li"),ml=l("code"),Bp=r("lm_labels"),qp=r(" becomes "),ul=l("code"),Np=r("labels"),Hp=r(" in "),_l=l("code"),Rp=r("BartForConditionalGeneration"),Wp=r("."),Gp=c(),ie=l("li"),vl=l("code"),Up=r("lm_labels"),Xp=r(" becomes "),El=l("code"),Kp=r("labels"),Jp=r(" in "),bl=l("code"),Qp=r("GPT2DoubleHeadsModel"),Vp=r("."),Yp=c(),ne=l("li"),wl=l("code"),Zp=r("lm_labels"),em=r(" becomes "),yl=l("code"),tm=r("labels"),om=r(" in "),kl=l("code"),rm=r("OpenAIGPTDoubleHeadsModel"),am=r("."),lm=c(),de=l("li"),Tl=l("code"),sm=r("lm_labels"),im=r(" becomes "),gl=l("code"),nm=r("labels"),dm=r(" in "),$l=l("code"),cm=r("T5ForConditionalGeneration"),hm=r("."),Sn=c(),nr=l("p"),fm=r("In several models, the caching mechanism becomes consistent with the other models:"),Mn=c(),S=l("ul"),bt=l("li"),Cl=l("code"),pm=r("decoder_cached_states"),mm=r(" becomes "),Ol=l("code"),um=r("past_key_values"),_m=r(" in all BART-like, FSMT and T5 models."),vm=c(),wt=l("li"),Dl=l("code"),Em=r("decoder_past_key_values"),bm=r(" becomes "),xl=l("code"),wm=r("past_key_values"),ym=r(" in all BART-like, FSMT and T5 models."),km=c(),yt=l("li"),Al=l("code"),Tm=r("past"),gm=r(" becomes "),zl=l("code"),$m=r("past_key_values"),Cm=r(" in all CTRL models."),Om=c(),kt=l("li"),Pl=l("code"),Dm=r("past"),xm=r(" becomes "),Ll=l("code"),Am=r("past_key_values"),zm=r(" in all GPT-2 models."),jn=c(),dr=l("p"),Pm=r("Regarding the tokenizer classes:"),Fn=c(),ce=l("ul"),xe=l("li"),Lm=r("The tokenizer attribute "),Il=l("code"),Im=r("max_len"),Sm=r(" becomes "),Sl=l("code"),Mm=r("model_max_length"),jm=r("."),Fm=c(),Ae=l("li"),Bm=r("The tokenizer attribute "),Ml=l("code"),qm=r("return_lengths"),Nm=r(" becomes "),jl=l("code"),Hm=r("return_length"),Rm=r("."),Wm=c(),ze=l("li"),Gm=r("The tokenizer encoding argument "),Fl=l("code"),Um=r("is_pretokenized"),Xm=r(" becomes "),Bl=l("code"),Km=r("is_split_into_words"),Jm=r("."),Bn=c(),Tt=l("p"),Qm=r("Regarding the "),ql=l("code"),Vm=r("Trainer"),Ym=r(" class:"),qn=c(),g=l("ul"),B=l("li"),Zm=r("The "),Nl=l("code"),eu=r("Trainer"),tu=r(" argument "),Hl=l("code"),ou=r("tb_writer"),ru=r(" is removed in favor of the callback "),Rl=l("code"),au=r("TensorBoardCallback(tb_writer=...)"),lu=r("."),su=c(),q=l("li"),iu=r("The "),Wl=l("code"),nu=r("Trainer"),du=r(" argument "),Gl=l("code"),cu=r("prediction_loss_only"),hu=r(" is removed in favor of the class argument "),Ul=l("code"),fu=r("args.prediction_loss_only"),pu=r("."),mu=c(),Pe=l("li"),uu=r("The "),Xl=l("code"),_u=r("Trainer"),vu=r(" attribute "),Kl=l("code"),Eu=r("data_collator"),bu=r(" should be a callable."),wu=c(),N=l("li"),yu=r("The "),Jl=l("code"),ku=r("Trainer"),Tu=r(" method "),Ql=l("code"),gu=r("_log"),$u=r(" is deprecated in favor of "),Vl=l("code"),Cu=r("log"),Ou=r("."),Du=c(),H=l("li"),xu=r("The "),Yl=l("code"),Au=r("Trainer"),zu=r(" method "),Zl=l("code"),Pu=r("_training_step"),Lu=r(" is deprecated in favor of "),es=l("code"),Iu=r("training_step"),Su=r("."),Mu=c(),R=l("li"),ju=r("The "),ts=l("code"),Fu=r("Trainer"),Bu=r(" method "),os=l("code"),qu=r("_prediction_loop"),Nu=r(" is deprecated in favor of "),rs=l("code"),Hu=r("prediction_loop"),Ru=r("."),Wu=c(),W=l("li"),Gu=r("The "),as=l("code"),Uu=r("Trainer"),Xu=r(" method "),ls=l("code"),Ku=r("is_local_master"),Ju=r(" is deprecated in favor of "),ss=l("code"),Qu=r("is_local_process_zero"),Vu=r("."),Yu=c(),G=l("li"),Zu=r("The "),is=l("code"),e_=r("Trainer"),t_=r(" method "),ns=l("code"),o_=r("is_world_master"),r_=r(" is deprecated in favor of "),ds=l("code"),a_=r("is_world_process_zero"),l_=r("."),Nn=c(),gt=l("p"),s_=r("Regarding the "),cs=l("code"),i_=r("TFTrainer"),n_=r(" class:"),Hn=c(),D=l("ul"),U=l("li"),d_=r("The "),hs=l("code"),c_=r("TFTrainer"),h_=r(" argument "),fs=l("code"),f_=r("prediction_loss_only"),p_=r(" is removed in favor of the class argument "),ps=l("code"),m_=r("args.prediction_loss_only"),u_=r("."),__=c(),X=l("li"),v_=r("The "),ms=l("code"),E_=r("Trainer"),b_=r(" method "),us=l("code"),w_=r("_log"),y_=r(" is deprecated in favor of "),_s=l("code"),k_=r("log"),T_=r("."),g_=c(),K=l("li"),$_=r("The "),vs=l("code"),C_=r("TFTrainer"),O_=r(" method "),Es=l("code"),D_=r("_prediction_loop"),x_=r(" is deprecated in favor of "),bs=l("code"),A_=r("prediction_loop"),z_=r("."),P_=c(),J=l("li"),L_=r("The "),ws=l("code"),I_=r("TFTrainer"),S_=r(" method "),ys=l("code"),M_=r("_setup_wandb"),j_=r(" is deprecated in favor of "),ks=l("code"),F_=r("setup_wandb"),B_=r("."),q_=c(),Q=l("li"),N_=r("The "),Ts=l("code"),H_=r("TFTrainer"),R_=r(" method "),gs=l("code"),W_=r("_run_model"),G_=r(" is deprecated in favor of "),$s=l("code"),U_=r("run_model"),X_=r("."),Rn=c(),$t=l("p"),K_=r("Regarding the "),Cs=l("code"),J_=r("TrainingArguments"),Q_=r(" class:"),Wn=c(),cr=l("ul"),V=l("li"),V_=r("The "),Os=l("code"),Y_=r("TrainingArguments"),Z_=r(" argument "),Ds=l("code"),ev=r("evaluate_during_training"),tv=r(" is deprecated in favor of "),xs=l("code"),ov=r("evaluation_strategy"),rv=r("."),Gn=c(),hr=l("p"),av=r("Regarding the Transfo-XL model:"),Un=c(),Ct=l("ul"),Le=l("li"),lv=r("The Transfo-XL configuration attribute "),As=l("code"),sv=r("tie_weight"),iv=r(" becomes "),zs=l("code"),nv=r("tie_words_embeddings"),dv=r("."),cv=c(),Ie=l("li"),hv=r("The Transfo-XL modeling method "),Ps=l("code"),fv=r("reset_length"),pv=r(" becomes "),Ls=l("code"),mv=r("reset_memory_length"),uv=r("."),Xn=c(),fr=l("p"),_v=r("Regarding pipelines:"),Kn=c(),pr=l("ul"),Y=l("li"),vv=r("The "),Is=l("code"),Ev=r("FillMaskPipeline"),bv=r(" argument "),Ss=l("code"),wv=r("topk"),yv=r(" becomes "),Ms=l("code"),kv=r("top_k"),Tv=r("."),Jn=c(),Se=l("h2"),Ot=l("a"),js=l("span"),p(Lo.$$.fragment),gv=c(),Fs=l("span"),$v=r("Migrating from pytorch-transformers to \u{1F917} Transformers"),Qn=c(),Dt=l("p"),Cv=r("Here is a quick summary of what you should take care of when migrating from "),Bs=l("code"),Ov=r("pytorch-transformers"),Dv=r(" to \u{1F917} Transformers."),Vn=c(),Me=l("h3"),xt=l("a"),qs=l("span"),p(Io.$$.fragment),xv=c(),je=l("span"),Av=r("Positional order of some models' keywords inputs ("),Ns=l("code"),zv=r("attention_mask"),Pv=r(", "),Hs=l("code"),Lv=r("token_type_ids"),Iv=r("...) changed"),Yn=c(),M=l("p"),Sv=r("To be able to use Torchscript (see #1010, #1204 and #1195) the specific order of some models "),Rs=l("strong"),Mv=r("keywords inputs"),jv=r(" ("),Ws=l("code"),Fv=r("attention_mask"),Bv=r(", "),Gs=l("code"),qv=r("token_type_ids"),Nv=r("\u2026) has been changed."),Zn=c(),At=l("p"),Hv=r("If you used to call the models with keyword names for keyword arguments, e.g. "),Us=l("code"),Rv=r("model(inputs_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)"),Wv=r(", this should not cause any change."),ed=c(),zt=l("p"),Gv=r("If you used to call the models with positional inputs for keyword arguments, e.g. "),Xs=l("code"),Uv=r("model(inputs_ids, attention_mask, token_type_ids)"),Xv=r(", you may have to double check the exact order of input arguments."),td=c(),Fe=l("h2"),Pt=l("a"),Ks=l("span"),p(So.$$.fragment),Kv=c(),Js=l("span"),Jv=r("Migrating from pytorch-pretrained-bert"),od=c(),Lt=l("p"),Qv=r("Here is a quick summary of what you should take care of when migrating from "),Qs=l("code"),Vv=r("pytorch-pretrained-bert"),Yv=r(" to \u{1F917} Transformers"),rd=c(),Be=l("h3"),It=l("a"),Vs=l("span"),p(Mo.$$.fragment),Zv=c(),mr=l("span"),e1=r("Models always output "),Ys=l("code"),t1=r("tuples"),ad=c(),he=l("p"),o1=r("The main breaking change when migrating from "),Zs=l("code"),r1=r("pytorch-pretrained-bert"),a1=r(" to \u{1F917} Transformers is that the models forward method always outputs a "),ei=l("code"),l1=r("tuple"),s1=r(" with various elements depending on the model and the configuration parameters."),ld=c(),St=l("p"),i1=r("The exact content of the tuples for each model are detailed in the models\u2019 docstrings and the "),jo=l("a"),n1=r("documentation"),d1=r("."),sd=c(),Mt=l("p"),c1=r("In pretty much every case, you will be fine by taking the first element of the output as the output you previously used in "),ti=l("code"),h1=r("pytorch-pretrained-bert"),f1=r("."),id=c(),fe=l("p"),p1=r("Here is a "),oi=l("code"),m1=r("pytorch-pretrained-bert"),u1=r(" to \u{1F917} Transformers conversion example for a "),ri=l("code"),_1=r("BertForSequenceClassification"),v1=r(" classification model:"),nd=c(),p(Fo.$$.fragment),dd=c(),qe=l("h3"),jt=l("a"),ai=l("span"),p(Bo.$$.fragment),E1=c(),li=l("span"),b1=r("Serialization"),cd=c(),Ft=l("p"),w1=r("Breaking change in the "),si=l("code"),y1=r("from_pretrained()"),k1=r("method:"),hd=c(),Bt=l("ol"),ii=l("li"),Ne=l("p"),T1=r("Models are now set in evaluation mode by default when instantiated with the "),ni=l("code"),g1=r("from_pretrained()"),$1=r(" method. To train them don\u2019t forget to set them back in training mode ("),di=l("code"),C1=r("model.train()"),O1=r(") to activate the dropout modules."),D1=c(),ci=l("li"),w=l("p"),x1=r("The additional "),hi=l("code"),A1=r("*inputs"),z1=r(" and "),fi=l("code"),P1=r("**kwargs"),L1=r(" arguments supplied to the "),pi=l("code"),I1=r("from_pretrained()"),S1=r(" method used to be directly passed to the underlying model\u2019s class "),mi=l("code"),M1=r("__init__()"),j1=r(" method. They are now used to update the model configuration attribute first which can break derived model classes build based on the previous "),ui=l("code"),F1=r("BertForSequenceClassification"),B1=r(" examples. More precisely, the positional arguments "),_i=l("code"),q1=r("*inputs"),N1=r(" provided to "),vi=l("code"),H1=r("from_pretrained()"),R1=r(" are directly forwarded the model "),Ei=l("code"),W1=r("__init__()"),G1=r(" method while the keyword arguments "),bi=l("code"),U1=r("**kwargs"),X1=r(" (i) which match configuration class attributes are used to update said attributes (ii) which don\u2019t match any configuration class attributes are forwarded to the model "),wi=l("code"),K1=r("__init__()"),J1=r(" method."),fd=c(),qt=l("p"),Q1=r("Also, while not a breaking change, the serialization methods have been standardized and you probably should switch to the new method "),yi=l("code"),V1=r("save_pretrained(save_directory)"),Y1=r(" if you were using any other serialization method before."),pd=c(),ur=l("p"),Z1=r("Here is an example:"),md=c(),p(qo.$$.fragment),ud=c(),He=l("h3"),Nt=l("a"),ki=l("span"),p(No.$$.fragment),eE=c(),Ti=l("span"),tE=r("Optimizers: BertAdam & OpenAIAdam are now AdamW, schedules are standard PyTorch schedules"),_d=c(),j=l("p"),oE=r("The two optimizers previously included, "),gi=l("code"),rE=r("BertAdam"),aE=r(" and "),$i=l("code"),lE=r("OpenAIAdam"),sE=r(", have been replaced by a single "),Ci=l("code"),iE=r("AdamW"),nE=r(" optimizer which has a few differences:"),vd=c(),pe=l("ul"),Oi=l("li"),dE=r("it only implements weights decay correction,"),cE=c(),Di=l("li"),hE=r("schedules are now externals (see below),"),fE=c(),xi=l("li"),pE=r("gradient clipping is now also external (see below)."),Ed=c(),me=l("p"),mE=r("The new optimizer "),Ai=l("code"),uE=r("AdamW"),_E=r(" matches PyTorch "),zi=l("code"),vE=r("Adam"),EE=r(" optimizer API and let you use standard PyTorch or apex methods for the schedule and clipping."),bd=c(),Ht=l("p"),bE=r("The schedules are now standard "),Ho=l("a"),wE=r("PyTorch learning rate schedulers"),yE=r(" and not part of the optimizer anymore."),wd=c(),ue=l("p"),kE=r("Here is a conversion examples from "),Pi=l("code"),TE=r("BertAdam"),gE=r(" with a linear warmup and decay schedule to "),Li=l("code"),$E=r("AdamW"),CE=r(" and the same schedule:"),yd=c(),p(Ro.$$.fragment),this.h()},l(t){const n=mw('[data-svelte="svelte-1phssyn"]',document.head);Z=s(n,"META",{name:!0,content:!0}),n.forEach(o),Zo=h(t),A=s(t,"H1",{class:!0});var Td=i(A);F=s(Td,"A",{id:!0,class:!0,href:!0});var xE=i(F);Lr=s(xE,"SPAN",{});var AE=i(Lr);m(no.$$.fragment,AE),AE.forEach(o),xE.forEach(o),bc=h(Td),Ir=s(Td,"SPAN",{});var zE=i(Ir);wc=a(zE,"Migrating from previous packages"),zE.forEach(o),Td.forEach(o),Bi=h(t),ve=s(t,"H2",{class:!0});var gd=i(ve);Ge=s(gd,"A",{id:!0,class:!0,href:!0});var PE=i(Ge);Sr=s(PE,"SPAN",{});var LE=i(Sr);m(co.$$.fragment,LE),LE.forEach(o),PE.forEach(o),yc=h(gd),Ue=s(gd,"SPAN",{});var Ii=i(Ue);kc=a(Ii,"Migrating from transformers "),Mr=s(Ii,"CODE",{});var IE=i(Mr);Tc=a(IE,"v3.x"),IE.forEach(o),gc=a(Ii," to "),jr=s(Ii,"CODE",{});var SE=i(jr);$c=a(SE,"v4.x"),SE.forEach(o),Ii.forEach(o),gd.forEach(o),qi=h(t),er=s(t,"P",{});var ME=i(er);Cc=a(ME,`A couple of changes were introduced when the switch from version 3 to version 4 was done. Below is a summary of the expected changes:`),ME.forEach(o),Ni=h(t),Ee=s(t,"H4",{class:!0});var $d=i(Ee);Xe=s($d,"A",{id:!0,class:!0,href:!0});var jE=i(Xe);Fr=s(jE,"SPAN",{});var FE=i(Fr);m(ho.$$.fragment,FE),FE.forEach(o),jE.forEach(o),Oc=h($d),Br=s($d,"SPAN",{});var BE=i(Br);Dc=a(BE,"1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default."),BE.forEach(o),$d.forEach(o),Hi=h(t),tr=s(t,"P",{});var qE=i(tr);xc=a(qE,"The python and rust tokenizers have roughly the same API, but the rust tokenizers have a more complete feature set."),qE.forEach(o),Ri=h(t),or=s(t,"P",{});var NE=i(or);Ac=a(NE,"This introduces two breaking changes:"),NE.forEach(o),Wi=h(t),Ke=s(t,"UL",{});var Cd=i(Ke);qr=s(Cd,"LI",{});var HE=i(qr);zc=a(HE,"The handling of overflowing tokens between the python and rust tokenizers is different."),HE.forEach(o),Pc=h(Cd),Nr=s(Cd,"LI",{});var RE=i(Nr);Lc=a(RE,"The rust tokenizers do not accept integers in the encoding methods."),RE.forEach(o),Cd.forEach(o),Gi=h(t),be=s(t,"H5",{class:!0});var Od=i(be);Je=s(Od,"A",{id:!0,class:!0,href:!0});var WE=i(Je);Hr=s(WE,"SPAN",{});var GE=i(Hr);m(fo.$$.fragment,GE),GE.forEach(o),WE.forEach(o),Ic=h(Od),Rr=s(Od,"SPAN",{});var UE=i(Rr);Sc=a(UE,"How to obtain the same behavior as v3.x in v4.x"),UE.forEach(o),Od.forEach(o),Ui=h(t),Qe=s(t,"UL",{});var Dd=i(Qe);po=s(Dd,"LI",{});var xd=i(po);Mc=a(xd,"The pipelines now contain additional features out of the box. See the "),Ve=s(xd,"A",{href:!0});var Ad=i(Ve);jc=a(Ad,"token-classification pipeline with the "),Wr=s(Ad,"CODE",{});var XE=i(Wr);Fc=a(XE,"grouped_entities"),XE.forEach(o),Bc=a(Ad," flag"),Ad.forEach(o),qc=a(xd,"."),xd.forEach(o),Nc=h(Dd),we=s(Dd,"LI",{});var _r=i(we);Hc=a(_r,"The auto-tokenizers now return rust tokenizers. In order to obtain the python tokenizers instead, the user may use the "),Gr=s(_r,"CODE",{});var KE=i(Gr);Rc=a(KE,"use_fast"),KE.forEach(o),Wc=a(_r," flag by setting it to "),Ur=s(_r,"CODE",{});var JE=i(Ur);Gc=a(JE,"False"),JE.forEach(o),Uc=a(_r,":"),_r.forEach(o),Dd.forEach(o),Xi=h(t),Ye=s(t,"P",{});var zd=i(Ye);Xc=a(zd,"In version "),Xr=s(zd,"CODE",{});var QE=i(Xr);Kc=a(QE,"v3.x"),QE.forEach(o),Jc=a(zd,":"),zd.forEach(o),Ki=h(t),m(mo.$$.fragment,t),Ji=h(t),Ze=s(t,"P",{});var Pd=i(Ze);Qc=a(Pd,"to obtain the same in version "),Kr=s(Pd,"CODE",{});var VE=i(Kr);Vc=a(VE,"v4.x"),VE.forEach(o),Yc=a(Pd,":"),Pd.forEach(o),Qi=h(t),m(uo.$$.fragment,t),Vi=h(t),ye=s(t,"H4",{class:!0});var Ld=i(ye);et=s(Ld,"A",{id:!0,class:!0,href:!0});var YE=i(et);Jr=s(YE,"SPAN",{});var ZE=i(Jr);m(_o.$$.fragment,ZE),ZE.forEach(o),YE.forEach(o),Zc=h(Ld),Qr=s(Ld,"SPAN",{});var eb=i(Qr);eh=a(eb,"2. SentencePiece is removed from the required dependencies"),eb.forEach(o),Ld.forEach(o),Yi=h(t),z=s(t,"P",{});var Rt=i(z);th=a(Rt,"The requirement on the SentencePiece dependency has been lifted from the "),Vr=s(Rt,"CODE",{});var tb=i(Vr);oh=a(tb,"setup.py"),tb.forEach(o),rh=a(Rt,". This is done so that we may have a channel on anaconda cloud without relying on "),Yr=s(Rt,"CODE",{});var ob=i(Yr);ah=a(ob,"conda-forge"),ob.forEach(o),lh=a(Rt,". This means that the tokenizers that depend on the SentencePiece library will not be available with a standard "),Zr=s(Rt,"CODE",{});var rb=i(Zr);sh=a(rb,"transformers"),rb.forEach(o),ih=a(Rt," installation."),Rt.forEach(o),Zi=h(t),tt=s(t,"P",{});var Id=i(tt);nh=a(Id,"This includes the "),ea=s(Id,"STRONG",{});var ab=i(ea);dh=a(ab,"slow"),ab.forEach(o),ch=a(Id," versions of:"),Id.forEach(o),en=h(t),T=s(t,"UL",{});var C=i(T);ta=s(C,"LI",{});var lb=i(ta);oa=s(lb,"CODE",{});var sb=i(oa);hh=a(sb,"XLNetTokenizer"),sb.forEach(o),lb.forEach(o),fh=h(C),ra=s(C,"LI",{});var ib=i(ra);aa=s(ib,"CODE",{});var nb=i(aa);ph=a(nb,"AlbertTokenizer"),nb.forEach(o),ib.forEach(o),mh=h(C),la=s(C,"LI",{});var db=i(la);sa=s(db,"CODE",{});var cb=i(sa);uh=a(cb,"CamembertTokenizer"),cb.forEach(o),db.forEach(o),_h=h(C),ia=s(C,"LI",{});var hb=i(ia);na=s(hb,"CODE",{});var fb=i(na);vh=a(fb,"MBartTokenizer"),fb.forEach(o),hb.forEach(o),Eh=h(C),da=s(C,"LI",{});var pb=i(da);ca=s(pb,"CODE",{});var mb=i(ca);bh=a(mb,"PegasusTokenizer"),mb.forEach(o),pb.forEach(o),wh=h(C),ha=s(C,"LI",{});var ub=i(ha);fa=s(ub,"CODE",{});var _b=i(fa);yh=a(_b,"T5Tokenizer"),_b.forEach(o),ub.forEach(o),kh=h(C),pa=s(C,"LI",{});var vb=i(pa);ma=s(vb,"CODE",{});var Eb=i(ma);Th=a(Eb,"ReformerTokenizer"),Eb.forEach(o),vb.forEach(o),gh=h(C),ua=s(C,"LI",{});var bb=i(ua);_a=s(bb,"CODE",{});var wb=i(_a);$h=a(wb,"XLMRobertaTokenizer"),wb.forEach(o),bb.forEach(o),C.forEach(o),tn=h(t),ke=s(t,"H5",{class:!0});var Sd=i(ke);ot=s(Sd,"A",{id:!0,class:!0,href:!0});var yb=i(ot);va=s(yb,"SPAN",{});var kb=i(va);m(vo.$$.fragment,kb),kb.forEach(o),yb.forEach(o),Ch=h(Sd),Ea=s(Sd,"SPAN",{});var Tb=i(Ea);Oh=a(Tb,"How to obtain the same behavior as v3.x in v4.x"),Tb.forEach(o),Sd.forEach(o),on=h(t),ee=s(t,"P",{});var vr=i(ee);Dh=a(vr,"In order to obtain the same behavior as version "),ba=s(vr,"CODE",{});var gb=i(ba);xh=a(gb,"v3.x"),gb.forEach(o),Ah=a(vr,", you should install "),wa=s(vr,"CODE",{});var $b=i(wa);zh=a($b,"sentencepiece"),$b.forEach(o),Ph=a(vr," additionally:"),vr.forEach(o),rn=h(t),rt=s(t,"P",{});var Md=i(rt);Lh=a(Md,"In version "),ya=s(Md,"CODE",{});var Cb=i(ya);Ih=a(Cb,"v3.x"),Cb.forEach(o),Sh=a(Md,":"),Md.forEach(o),an=h(t),m(Eo.$$.fragment,t),ln=h(t),at=s(t,"P",{});var jd=i(at);Mh=a(jd,"to obtain the same in version "),ka=s(jd,"CODE",{});var Ob=i(ka);jh=a(Ob,"v4.x"),Ob.forEach(o),Fh=a(jd,":"),jd.forEach(o),sn=h(t),m(bo.$$.fragment,t),nn=h(t),rr=s(t,"P",{});var Db=i(rr);Bh=a(Db,"or"),Db.forEach(o),dn=h(t),m(wo.$$.fragment,t),cn=h(t),Te=s(t,"H4",{class:!0});var Fd=i(Te);lt=s(Fd,"A",{id:!0,class:!0,href:!0});var xb=i(lt);Ta=s(xb,"SPAN",{});var Ab=i(Ta);m(yo.$$.fragment,Ab),Ab.forEach(o),xb.forEach(o),qh=h(Fd),ga=s(Fd,"SPAN",{});var zb=i(ga);Nh=a(zb,"3. The architecture of the repo has been updated so that each model resides in its folder"),zb.forEach(o),Fd.forEach(o),hn=h(t),st=s(t,"P",{});var Bd=i(st);Hh=a(Bd,"The past and foreseeable addition of new models means that the number of files in the directory "),$a=s(Bd,"CODE",{});var Pb=i($a);Rh=a(Pb,"src/transformers"),Pb.forEach(o),Wh=a(Bd," keeps growing and becomes harder to navigate and understand. We made the choice to put each model and the files accompanying it in their own sub-directories."),Bd.forEach(o),fn=h(t),ar=s(t,"P",{});var Lb=i(ar);Gh=a(Lb,"This is a breaking change as importing intermediary layers using a model\u2019s module directly needs to be done via a different path."),Lb.forEach(o),pn=h(t),ge=s(t,"H5",{class:!0});var qd=i(ge);it=s(qd,"A",{id:!0,class:!0,href:!0});var Ib=i(it);Ca=s(Ib,"SPAN",{});var Sb=i(Ca);m(ko.$$.fragment,Sb),Sb.forEach(o),Ib.forEach(o),Uh=h(qd),Oa=s(qd,"SPAN",{});var Mb=i(Oa);Xh=a(Mb,"How to obtain the same behavior as v3.x in v4.x"),Mb.forEach(o),qd.forEach(o),mn=h(t),nt=s(t,"P",{});var Nd=i(nt);Kh=a(Nd,"In order to obtain the same behavior as version "),Da=s(Nd,"CODE",{});var jb=i(Da);Jh=a(jb,"v3.x"),jb.forEach(o),Qh=a(Nd,", you should update the path used to access the layers."),Nd.forEach(o),un=h(t),dt=s(t,"P",{});var Hd=i(dt);Vh=a(Hd,"In version "),xa=s(Hd,"CODE",{});var Fb=i(xa);Yh=a(Fb,"v3.x"),Fb.forEach(o),Zh=a(Hd,":"),Hd.forEach(o),_n=h(t),m(To.$$.fragment,t),vn=h(t),ct=s(t,"P",{});var Rd=i(ct);ef=a(Rd,"to obtain the same in version "),Aa=s(Rd,"CODE",{});var Bb=i(Aa);tf=a(Bb,"v4.x"),Bb.forEach(o),of=a(Rd,":"),Rd.forEach(o),En=h(t),m(go.$$.fragment,t),bn=h(t),$e=s(t,"H4",{class:!0});var Wd=i($e);ht=s(Wd,"A",{id:!0,class:!0,href:!0});var qb=i(ht);za=s(qb,"SPAN",{});var Nb=i(za);m($o.$$.fragment,Nb),Nb.forEach(o),qb.forEach(o),rf=h(Wd),Ce=s(Wd,"SPAN",{});var Er=i(Ce);af=a(Er,"4. Switching the "),Pa=s(Er,"CODE",{});var Hb=i(Pa);lf=a(Hb,"return_dict"),Hb.forEach(o),sf=a(Er," argument to "),La=s(Er,"CODE",{});var Rb=i(La);nf=a(Rb,"True"),Rb.forEach(o),df=a(Er," by default"),Er.forEach(o),Wd.forEach(o),wn=h(t),ft=s(t,"P",{});var Gd=i(ft);cf=a(Gd,"The "),Co=s(Gd,"A",{href:!0});var OE=i(Co);Ia=s(OE,"CODE",{});var Wb=i(Ia);hf=a(Wb,"return_dict"),Wb.forEach(o),ff=a(OE," argument"),OE.forEach(o),pf=a(Gd," enables the return of dict-like python objects containing the model outputs, instead of the standard tuples. This object is self-documented as keys can be used to retrieve values, while also behaving as a tuple as users may retrieve objects by index or by slice."),Gd.forEach(o),yn=h(t),pt=s(t,"P",{});var Ud=i(pt);mf=a(Ud,"This is a breaking change as the limitation of that tuple is that it cannot be unpacked: "),Sa=s(Ud,"CODE",{});var Gb=i(Sa);uf=a(Gb,"value0, value1 = outputs"),Gb.forEach(o),_f=a(Ud," will not work."),Ud.forEach(o),kn=h(t),Oe=s(t,"H5",{class:!0});var Xd=i(Oe);mt=s(Xd,"A",{id:!0,class:!0,href:!0});var Ub=i(mt);Ma=s(Ub,"SPAN",{});var Xb=i(Ma);m(Oo.$$.fragment,Xb),Xb.forEach(o),Ub.forEach(o),vf=h(Xd),ja=s(Xd,"SPAN",{});var Kb=i(ja);Ef=a(Kb,"How to obtain the same behavior as v3.x in v4.x"),Kb.forEach(o),Xd.forEach(o),Tn=h(t),P=s(t,"P",{});var Wt=i(P);bf=a(Wt,"In order to obtain the same behavior as version "),Fa=s(Wt,"CODE",{});var Jb=i(Fa);wf=a(Jb,"v3.x"),Jb.forEach(o),yf=a(Wt,", you should specify the "),Ba=s(Wt,"CODE",{});var Qb=i(Ba);kf=a(Qb,"return_dict"),Qb.forEach(o),Tf=a(Wt," argument to "),qa=s(Wt,"CODE",{});var Vb=i(qa);gf=a(Vb,"False"),Vb.forEach(o),$f=a(Wt,", either in the model configuration or during the forward pass."),Wt.forEach(o),gn=h(t),ut=s(t,"P",{});var Kd=i(ut);Cf=a(Kd,"In version "),Na=s(Kd,"CODE",{});var Yb=i(Na);Of=a(Yb,"v3.x"),Yb.forEach(o),Df=a(Kd,":"),Kd.forEach(o),$n=h(t),m(Do.$$.fragment,t),Cn=h(t),_t=s(t,"P",{});var Jd=i(_t);xf=a(Jd,"to obtain the same in version "),Ha=s(Jd,"CODE",{});var Zb=i(Ha);Af=a(Zb,"v4.x"),Zb.forEach(o),zf=a(Jd,":"),Jd.forEach(o),On=h(t),m(xo.$$.fragment,t),Dn=h(t),lr=s(t,"P",{});var e3=i(lr);Pf=a(e3,"or"),e3.forEach(o),xn=h(t),m(Ao.$$.fragment,t),An=h(t),De=s(t,"H4",{class:!0});var Qd=i(De);vt=s(Qd,"A",{id:!0,class:!0,href:!0});var t3=i(vt);Ra=s(t3,"SPAN",{});var o3=i(Ra);m(zo.$$.fragment,o3),o3.forEach(o),t3.forEach(o),Lf=h(Qd),Wa=s(Qd,"SPAN",{});var r3=i(Wa);If=a(r3,"5. Removed some deprecated attributes"),r3.forEach(o),Qd.forEach(o),zn=h(t),Et=s(t,"P",{});var Vd=i(Et);Sf=a(Vd,"Attributes that were deprecated have been removed if they had been deprecated for at least a month. The full list of deprecated attributes can be found in "),Po=s(Vd,"A",{href:!0,rel:!0});var a3=i(Po);Mf=a(a3,"#8604"),a3.forEach(o),jf=a(Vd,"."),Vd.forEach(o),Pn=h(t),sr=s(t,"P",{});var l3=i(sr);Ff=a(l3,"Here is a list of these attributes/methods/arguments and what their replacements should be:"),l3.forEach(o),Ln=h(t),ir=s(t,"P",{});var s3=i(ir);Bf=a(s3,"In several models, the labels become consistent with the other models:"),s3.forEach(o),In=h(t),b=s(t,"UL",{});var y=i(b);L=s(y,"LI",{});var Re=i(L);Ga=s(Re,"CODE",{});var i3=i(Ga);qf=a(i3,"masked_lm_labels"),i3.forEach(o),Nf=a(Re," becomes "),Ua=s(Re,"CODE",{});var n3=i(Ua);Hf=a(n3,"labels"),n3.forEach(o),Rf=a(Re," in "),Xa=s(Re,"CODE",{});var d3=i(Xa);Wf=a(d3,"AlbertForMaskedLM"),d3.forEach(o),Gf=a(Re," and "),Ka=s(Re,"CODE",{});var c3=i(Ka);Uf=a(c3,"AlbertForPreTraining"),c3.forEach(o),Xf=a(Re,"."),Re.forEach(o),Kf=h(y),I=s(y,"LI",{});var We=i(I);Ja=s(We,"CODE",{});var h3=i(Ja);Jf=a(h3,"masked_lm_labels"),h3.forEach(o),Qf=a(We," becomes "),Qa=s(We,"CODE",{});var f3=i(Qa);Vf=a(f3,"labels"),f3.forEach(o),Yf=a(We," in "),Va=s(We,"CODE",{});var p3=i(Va);Zf=a(p3,"BertForMaskedLM"),p3.forEach(o),ep=a(We," and "),Ya=s(We,"CODE",{});var m3=i(Ya);tp=a(m3,"BertForPreTraining"),m3.forEach(o),op=a(We,"."),We.forEach(o),rp=h(y),te=s(y,"LI",{});var Wo=i(te);Za=s(Wo,"CODE",{});var u3=i(Za);ap=a(u3,"masked_lm_labels"),u3.forEach(o),lp=a(Wo," becomes "),el=s(Wo,"CODE",{});var _3=i(el);sp=a(_3,"labels"),_3.forEach(o),ip=a(Wo," in "),tl=s(Wo,"CODE",{});var v3=i(tl);np=a(v3,"DistilBertForMaskedLM"),v3.forEach(o),dp=a(Wo,"."),Wo.forEach(o),cp=h(y),oe=s(y,"LI",{});var Go=i(oe);ol=s(Go,"CODE",{});var E3=i(ol);hp=a(E3,"masked_lm_labels"),E3.forEach(o),fp=a(Go," becomes "),rl=s(Go,"CODE",{});var b3=i(rl);pp=a(b3,"labels"),b3.forEach(o),mp=a(Go," in "),al=s(Go,"CODE",{});var w3=i(al);up=a(w3,"ElectraForMaskedLM"),w3.forEach(o),_p=a(Go,"."),Go.forEach(o),vp=h(y),re=s(y,"LI",{});var Uo=i(re);ll=s(Uo,"CODE",{});var y3=i(ll);Ep=a(y3,"masked_lm_labels"),y3.forEach(o),bp=a(Uo," becomes "),sl=s(Uo,"CODE",{});var k3=i(sl);wp=a(k3,"labels"),k3.forEach(o),yp=a(Uo," in "),il=s(Uo,"CODE",{});var T3=i(il);kp=a(T3,"LongformerForMaskedLM"),T3.forEach(o),Tp=a(Uo,"."),Uo.forEach(o),gp=h(y),ae=s(y,"LI",{});var Xo=i(ae);nl=s(Xo,"CODE",{});var g3=i(nl);$p=a(g3,"masked_lm_labels"),g3.forEach(o),Cp=a(Xo," becomes "),dl=s(Xo,"CODE",{});var $3=i(dl);Op=a($3,"labels"),$3.forEach(o),Dp=a(Xo," in "),cl=s(Xo,"CODE",{});var C3=i(cl);xp=a(C3,"MobileBertForMaskedLM"),C3.forEach(o),Ap=a(Xo,"."),Xo.forEach(o),zp=h(y),le=s(y,"LI",{});var Ko=i(le);hl=s(Ko,"CODE",{});var O3=i(hl);Pp=a(O3,"masked_lm_labels"),O3.forEach(o),Lp=a(Ko," becomes "),fl=s(Ko,"CODE",{});var D3=i(fl);Ip=a(D3,"labels"),D3.forEach(o),Sp=a(Ko," in "),pl=s(Ko,"CODE",{});var x3=i(pl);Mp=a(x3,"RobertaForMaskedLM"),x3.forEach(o),jp=a(Ko,"."),Ko.forEach(o),Fp=h(y),se=s(y,"LI",{});var Jo=i(se);ml=s(Jo,"CODE",{});var A3=i(ml);Bp=a(A3,"lm_labels"),A3.forEach(o),qp=a(Jo," becomes "),ul=s(Jo,"CODE",{});var z3=i(ul);Np=a(z3,"labels"),z3.forEach(o),Hp=a(Jo," in "),_l=s(Jo,"CODE",{});var P3=i(_l);Rp=a(P3,"BartForConditionalGeneration"),P3.forEach(o),Wp=a(Jo,"."),Jo.forEach(o),Gp=h(y),ie=s(y,"LI",{});var Qo=i(ie);vl=s(Qo,"CODE",{});var L3=i(vl);Up=a(L3,"lm_labels"),L3.forEach(o),Xp=a(Qo," becomes "),El=s(Qo,"CODE",{});var I3=i(El);Kp=a(I3,"labels"),I3.forEach(o),Jp=a(Qo," in "),bl=s(Qo,"CODE",{});var S3=i(bl);Qp=a(S3,"GPT2DoubleHeadsModel"),S3.forEach(o),Vp=a(Qo,"."),Qo.forEach(o),Yp=h(y),ne=s(y,"LI",{});var Vo=i(ne);wl=s(Vo,"CODE",{});var M3=i(wl);Zp=a(M3,"lm_labels"),M3.forEach(o),em=a(Vo," becomes "),yl=s(Vo,"CODE",{});var j3=i(yl);tm=a(j3,"labels"),j3.forEach(o),om=a(Vo," in "),kl=s(Vo,"CODE",{});var F3=i(kl);rm=a(F3,"OpenAIGPTDoubleHeadsModel"),F3.forEach(o),am=a(Vo,"."),Vo.forEach(o),lm=h(y),de=s(y,"LI",{});var Yo=i(de);Tl=s(Yo,"CODE",{});var B3=i(Tl);sm=a(B3,"lm_labels"),B3.forEach(o),im=a(Yo," becomes "),gl=s(Yo,"CODE",{});var q3=i(gl);nm=a(q3,"labels"),q3.forEach(o),dm=a(Yo," in "),$l=s(Yo,"CODE",{});var N3=i($l);cm=a(N3,"T5ForConditionalGeneration"),N3.forEach(o),hm=a(Yo,"."),Yo.forEach(o),y.forEach(o),Sn=h(t),nr=s(t,"P",{});var H3=i(nr);fm=a(H3,"In several models, the caching mechanism becomes consistent with the other models:"),H3.forEach(o),Mn=h(t),S=s(t,"UL",{});var Gt=i(S);bt=s(Gt,"LI",{});var Si=i(bt);Cl=s(Si,"CODE",{});var R3=i(Cl);pm=a(R3,"decoder_cached_states"),R3.forEach(o),mm=a(Si," becomes "),Ol=s(Si,"CODE",{});var W3=i(Ol);um=a(W3,"past_key_values"),W3.forEach(o),_m=a(Si," in all BART-like, FSMT and T5 models."),Si.forEach(o),vm=h(Gt),wt=s(Gt,"LI",{});var Mi=i(wt);Dl=s(Mi,"CODE",{});var G3=i(Dl);Em=a(G3,"decoder_past_key_values"),G3.forEach(o),bm=a(Mi," becomes "),xl=s(Mi,"CODE",{});var U3=i(xl);wm=a(U3,"past_key_values"),U3.forEach(o),ym=a(Mi," in all BART-like, FSMT and T5 models."),Mi.forEach(o),km=h(Gt),yt=s(Gt,"LI",{});var ji=i(yt);Al=s(ji,"CODE",{});var X3=i(Al);Tm=a(X3,"past"),X3.forEach(o),gm=a(ji," becomes "),zl=s(ji,"CODE",{});var K3=i(zl);$m=a(K3,"past_key_values"),K3.forEach(o),Cm=a(ji," in all CTRL models."),ji.forEach(o),Om=h(Gt),kt=s(Gt,"LI",{});var Fi=i(kt);Pl=s(Fi,"CODE",{});var J3=i(Pl);Dm=a(J3,"past"),J3.forEach(o),xm=a(Fi," becomes "),Ll=s(Fi,"CODE",{});var Q3=i(Ll);Am=a(Q3,"past_key_values"),Q3.forEach(o),zm=a(Fi," in all GPT-2 models."),Fi.forEach(o),Gt.forEach(o),jn=h(t),dr=s(t,"P",{});var V3=i(dr);Pm=a(V3,"Regarding the tokenizer classes:"),V3.forEach(o),Fn=h(t),ce=s(t,"UL",{});var br=i(ce);xe=s(br,"LI",{});var wr=i(xe);Lm=a(wr,"The tokenizer attribute "),Il=s(wr,"CODE",{});var Y3=i(Il);Im=a(Y3,"max_len"),Y3.forEach(o),Sm=a(wr," becomes "),Sl=s(wr,"CODE",{});var Z3=i(Sl);Mm=a(Z3,"model_max_length"),Z3.forEach(o),jm=a(wr,"."),wr.forEach(o),Fm=h(br),Ae=s(br,"LI",{});var yr=i(Ae);Bm=a(yr,"The tokenizer attribute "),Ml=s(yr,"CODE",{});var e4=i(Ml);qm=a(e4,"return_lengths"),e4.forEach(o),Nm=a(yr," becomes "),jl=s(yr,"CODE",{});var t4=i(jl);Hm=a(t4,"return_length"),t4.forEach(o),Rm=a(yr,"."),yr.forEach(o),Wm=h(br),ze=s(br,"LI",{});var kr=i(ze);Gm=a(kr,"The tokenizer encoding argument "),Fl=s(kr,"CODE",{});var o4=i(Fl);Um=a(o4,"is_pretokenized"),o4.forEach(o),Xm=a(kr," becomes "),Bl=s(kr,"CODE",{});var r4=i(Bl);Km=a(r4,"is_split_into_words"),r4.forEach(o),Jm=a(kr,"."),kr.forEach(o),br.forEach(o),Bn=h(t),Tt=s(t,"P",{});var Yd=i(Tt);Qm=a(Yd,"Regarding the "),ql=s(Yd,"CODE",{});var a4=i(ql);Vm=a(a4,"Trainer"),a4.forEach(o),Ym=a(Yd," class:"),Yd.forEach(o),qn=h(t),g=s(t,"UL",{});var O=i(g);B=s(O,"LI",{});var Ut=i(B);Zm=a(Ut,"The "),Nl=s(Ut,"CODE",{});var l4=i(Nl);eu=a(l4,"Trainer"),l4.forEach(o),tu=a(Ut," argument "),Hl=s(Ut,"CODE",{});var s4=i(Hl);ou=a(s4,"tb_writer"),s4.forEach(o),ru=a(Ut," is removed in favor of the callback "),Rl=s(Ut,"CODE",{});var i4=i(Rl);au=a(i4,"TensorBoardCallback(tb_writer=...)"),i4.forEach(o),lu=a(Ut,"."),Ut.forEach(o),su=h(O),q=s(O,"LI",{});var Xt=i(q);iu=a(Xt,"The "),Wl=s(Xt,"CODE",{});var n4=i(Wl);nu=a(n4,"Trainer"),n4.forEach(o),du=a(Xt," argument "),Gl=s(Xt,"CODE",{});var d4=i(Gl);cu=a(d4,"prediction_loss_only"),d4.forEach(o),hu=a(Xt," is removed in favor of the class argument "),Ul=s(Xt,"CODE",{});var c4=i(Ul);fu=a(c4,"args.prediction_loss_only"),c4.forEach(o),pu=a(Xt,"."),Xt.forEach(o),mu=h(O),Pe=s(O,"LI",{});var Tr=i(Pe);uu=a(Tr,"The "),Xl=s(Tr,"CODE",{});var h4=i(Xl);_u=a(h4,"Trainer"),h4.forEach(o),vu=a(Tr," attribute "),Kl=s(Tr,"CODE",{});var f4=i(Kl);Eu=a(f4,"data_collator"),f4.forEach(o),bu=a(Tr," should be a callable."),Tr.forEach(o),wu=h(O),N=s(O,"LI",{});var Kt=i(N);yu=a(Kt,"The "),Jl=s(Kt,"CODE",{});var p4=i(Jl);ku=a(p4,"Trainer"),p4.forEach(o),Tu=a(Kt," method "),Ql=s(Kt,"CODE",{});var m4=i(Ql);gu=a(m4,"_log"),m4.forEach(o),$u=a(Kt," is deprecated in favor of "),Vl=s(Kt,"CODE",{});var u4=i(Vl);Cu=a(u4,"log"),u4.forEach(o),Ou=a(Kt,"."),Kt.forEach(o),Du=h(O),H=s(O,"LI",{});var Jt=i(H);xu=a(Jt,"The "),Yl=s(Jt,"CODE",{});var _4=i(Yl);Au=a(_4,"Trainer"),_4.forEach(o),zu=a(Jt," method "),Zl=s(Jt,"CODE",{});var v4=i(Zl);Pu=a(v4,"_training_step"),v4.forEach(o),Lu=a(Jt," is deprecated in favor of "),es=s(Jt,"CODE",{});var E4=i(es);Iu=a(E4,"training_step"),E4.forEach(o),Su=a(Jt,"."),Jt.forEach(o),Mu=h(O),R=s(O,"LI",{});var Qt=i(R);ju=a(Qt,"The "),ts=s(Qt,"CODE",{});var b4=i(ts);Fu=a(b4,"Trainer"),b4.forEach(o),Bu=a(Qt," method "),os=s(Qt,"CODE",{});var w4=i(os);qu=a(w4,"_prediction_loop"),w4.forEach(o),Nu=a(Qt," is deprecated in favor of "),rs=s(Qt,"CODE",{});var y4=i(rs);Hu=a(y4,"prediction_loop"),y4.forEach(o),Ru=a(Qt,"."),Qt.forEach(o),Wu=h(O),W=s(O,"LI",{});var Vt=i(W);Gu=a(Vt,"The "),as=s(Vt,"CODE",{});var k4=i(as);Uu=a(k4,"Trainer"),k4.forEach(o),Xu=a(Vt," method "),ls=s(Vt,"CODE",{});var T4=i(ls);Ku=a(T4,"is_local_master"),T4.forEach(o),Ju=a(Vt," is deprecated in favor of "),ss=s(Vt,"CODE",{});var g4=i(ss);Qu=a(g4,"is_local_process_zero"),g4.forEach(o),Vu=a(Vt,"."),Vt.forEach(o),Yu=h(O),G=s(O,"LI",{});var Yt=i(G);Zu=a(Yt,"The "),is=s(Yt,"CODE",{});var $4=i(is);e_=a($4,"Trainer"),$4.forEach(o),t_=a(Yt," method "),ns=s(Yt,"CODE",{});var C4=i(ns);o_=a(C4,"is_world_master"),C4.forEach(o),r_=a(Yt," is deprecated in favor of "),ds=s(Yt,"CODE",{});var O4=i(ds);a_=a(O4,"is_world_process_zero"),O4.forEach(o),l_=a(Yt,"."),Yt.forEach(o),O.forEach(o),Nn=h(t),gt=s(t,"P",{});var Zd=i(gt);s_=a(Zd,"Regarding the "),cs=s(Zd,"CODE",{});var D4=i(cs);i_=a(D4,"TFTrainer"),D4.forEach(o),n_=a(Zd," class:"),Zd.forEach(o),Hn=h(t),D=s(t,"UL",{});var _e=i(D);U=s(_e,"LI",{});var Zt=i(U);d_=a(Zt,"The "),hs=s(Zt,"CODE",{});var x4=i(hs);c_=a(x4,"TFTrainer"),x4.forEach(o),h_=a(Zt," argument "),fs=s(Zt,"CODE",{});var A4=i(fs);f_=a(A4,"prediction_loss_only"),A4.forEach(o),p_=a(Zt," is removed in favor of the class argument "),ps=s(Zt,"CODE",{});var z4=i(ps);m_=a(z4,"args.prediction_loss_only"),z4.forEach(o),u_=a(Zt,"."),Zt.forEach(o),__=h(_e),X=s(_e,"LI",{});var eo=i(X);v_=a(eo,"The "),ms=s(eo,"CODE",{});var P4=i(ms);E_=a(P4,"Trainer"),P4.forEach(o),b_=a(eo," method "),us=s(eo,"CODE",{});var L4=i(us);w_=a(L4,"_log"),L4.forEach(o),y_=a(eo," is deprecated in favor of "),_s=s(eo,"CODE",{});var I4=i(_s);k_=a(I4,"log"),I4.forEach(o),T_=a(eo,"."),eo.forEach(o),g_=h(_e),K=s(_e,"LI",{});var to=i(K);$_=a(to,"The "),vs=s(to,"CODE",{});var S4=i(vs);C_=a(S4,"TFTrainer"),S4.forEach(o),O_=a(to," method "),Es=s(to,"CODE",{});var M4=i(Es);D_=a(M4,"_prediction_loop"),M4.forEach(o),x_=a(to," is deprecated in favor of "),bs=s(to,"CODE",{});var j4=i(bs);A_=a(j4,"prediction_loop"),j4.forEach(o),z_=a(to,"."),to.forEach(o),P_=h(_e),J=s(_e,"LI",{});var oo=i(J);L_=a(oo,"The "),ws=s(oo,"CODE",{});var F4=i(ws);I_=a(F4,"TFTrainer"),F4.forEach(o),S_=a(oo," method "),ys=s(oo,"CODE",{});var B4=i(ys);M_=a(B4,"_setup_wandb"),B4.forEach(o),j_=a(oo," is deprecated in favor of "),ks=s(oo,"CODE",{});var q4=i(ks);F_=a(q4,"setup_wandb"),q4.forEach(o),B_=a(oo,"."),oo.forEach(o),q_=h(_e),Q=s(_e,"LI",{});var ro=i(Q);N_=a(ro,"The "),Ts=s(ro,"CODE",{});var N4=i(Ts);H_=a(N4,"TFTrainer"),N4.forEach(o),R_=a(ro," method "),gs=s(ro,"CODE",{});var H4=i(gs);W_=a(H4,"_run_model"),H4.forEach(o),G_=a(ro," is deprecated in favor of "),$s=s(ro,"CODE",{});var R4=i($s);U_=a(R4,"run_model"),R4.forEach(o),X_=a(ro,"."),ro.forEach(o),_e.forEach(o),Rn=h(t),$t=s(t,"P",{});var ec=i($t);K_=a(ec,"Regarding the "),Cs=s(ec,"CODE",{});var W4=i(Cs);J_=a(W4,"TrainingArguments"),W4.forEach(o),Q_=a(ec," class:"),ec.forEach(o),Wn=h(t),cr=s(t,"UL",{});var G4=i(cr);V=s(G4,"LI",{});var ao=i(V);V_=a(ao,"The "),Os=s(ao,"CODE",{});var U4=i(Os);Y_=a(U4,"TrainingArguments"),U4.forEach(o),Z_=a(ao," argument "),Ds=s(ao,"CODE",{});var X4=i(Ds);ev=a(X4,"evaluate_during_training"),X4.forEach(o),tv=a(ao," is deprecated in favor of "),xs=s(ao,"CODE",{});var K4=i(xs);ov=a(K4,"evaluation_strategy"),K4.forEach(o),rv=a(ao,"."),ao.forEach(o),G4.forEach(o),Gn=h(t),hr=s(t,"P",{});var J4=i(hr);av=a(J4,"Regarding the Transfo-XL model:"),J4.forEach(o),Un=h(t),Ct=s(t,"UL",{});var tc=i(Ct);Le=s(tc,"LI",{});var gr=i(Le);lv=a(gr,"The Transfo-XL configuration attribute "),As=s(gr,"CODE",{});var Q4=i(As);sv=a(Q4,"tie_weight"),Q4.forEach(o),iv=a(gr," becomes "),zs=s(gr,"CODE",{});var V4=i(zs);nv=a(V4,"tie_words_embeddings"),V4.forEach(o),dv=a(gr,"."),gr.forEach(o),cv=h(tc),Ie=s(tc,"LI",{});var $r=i(Ie);hv=a($r,"The Transfo-XL modeling method "),Ps=s($r,"CODE",{});var Y4=i(Ps);fv=a(Y4,"reset_length"),Y4.forEach(o),pv=a($r," becomes "),Ls=s($r,"CODE",{});var Z4=i(Ls);mv=a(Z4,"reset_memory_length"),Z4.forEach(o),uv=a($r,"."),$r.forEach(o),tc.forEach(o),Xn=h(t),fr=s(t,"P",{});var e2=i(fr);_v=a(e2,"Regarding pipelines:"),e2.forEach(o),Kn=h(t),pr=s(t,"UL",{});var t2=i(pr);Y=s(t2,"LI",{});var lo=i(Y);vv=a(lo,"The "),Is=s(lo,"CODE",{});var o2=i(Is);Ev=a(o2,"FillMaskPipeline"),o2.forEach(o),bv=a(lo," argument "),Ss=s(lo,"CODE",{});var r2=i(Ss);wv=a(r2,"topk"),r2.forEach(o),yv=a(lo," becomes "),Ms=s(lo,"CODE",{});var a2=i(Ms);kv=a(a2,"top_k"),a2.forEach(o),Tv=a(lo,"."),lo.forEach(o),t2.forEach(o),Jn=h(t),Se=s(t,"H2",{class:!0});var oc=i(Se);Ot=s(oc,"A",{id:!0,class:!0,href:!0});var l2=i(Ot);js=s(l2,"SPAN",{});var s2=i(js);m(Lo.$$.fragment,s2),s2.forEach(o),l2.forEach(o),gv=h(oc),Fs=s(oc,"SPAN",{});var i2=i(Fs);$v=a(i2,"Migrating from pytorch-transformers to \u{1F917} Transformers"),i2.forEach(o),oc.forEach(o),Qn=h(t),Dt=s(t,"P",{});var rc=i(Dt);Cv=a(rc,"Here is a quick summary of what you should take care of when migrating from "),Bs=s(rc,"CODE",{});var n2=i(Bs);Ov=a(n2,"pytorch-transformers"),n2.forEach(o),Dv=a(rc," to \u{1F917} Transformers."),rc.forEach(o),Vn=h(t),Me=s(t,"H3",{class:!0});var ac=i(Me);xt=s(ac,"A",{id:!0,class:!0,href:!0});var d2=i(xt);qs=s(d2,"SPAN",{});var c2=i(qs);m(Io.$$.fragment,c2),c2.forEach(o),d2.forEach(o),xv=h(ac),je=s(ac,"SPAN",{});var Cr=i(je);Av=a(Cr,"Positional order of some models' keywords inputs ("),Ns=s(Cr,"CODE",{});var h2=i(Ns);zv=a(h2,"attention_mask"),h2.forEach(o),Pv=a(Cr,", "),Hs=s(Cr,"CODE",{});var f2=i(Hs);Lv=a(f2,"token_type_ids"),f2.forEach(o),Iv=a(Cr,"...) changed"),Cr.forEach(o),ac.forEach(o),Yn=h(t),M=s(t,"P",{});var so=i(M);Sv=a(so,"To be able to use Torchscript (see #1010, #1204 and #1195) the specific order of some models "),Rs=s(so,"STRONG",{});var p2=i(Rs);Mv=a(p2,"keywords inputs"),p2.forEach(o),jv=a(so," ("),Ws=s(so,"CODE",{});var m2=i(Ws);Fv=a(m2,"attention_mask"),m2.forEach(o),Bv=a(so,", "),Gs=s(so,"CODE",{});var u2=i(Gs);qv=a(u2,"token_type_ids"),u2.forEach(o),Nv=a(so,"\u2026) has been changed."),so.forEach(o),Zn=h(t),At=s(t,"P",{});var lc=i(At);Hv=a(lc,"If you used to call the models with keyword names for keyword arguments, e.g. "),Us=s(lc,"CODE",{});var _2=i(Us);Rv=a(_2,"model(inputs_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)"),_2.forEach(o),Wv=a(lc,", this should not cause any change."),lc.forEach(o),ed=h(t),zt=s(t,"P",{});var sc=i(zt);Gv=a(sc,"If you used to call the models with positional inputs for keyword arguments, e.g. "),Xs=s(sc,"CODE",{});var v2=i(Xs);Uv=a(v2,"model(inputs_ids, attention_mask, token_type_ids)"),v2.forEach(o),Xv=a(sc,", you may have to double check the exact order of input arguments."),sc.forEach(o),td=h(t),Fe=s(t,"H2",{class:!0});var ic=i(Fe);Pt=s(ic,"A",{id:!0,class:!0,href:!0});var E2=i(Pt);Ks=s(E2,"SPAN",{});var b2=i(Ks);m(So.$$.fragment,b2),b2.forEach(o),E2.forEach(o),Kv=h(ic),Js=s(ic,"SPAN",{});var w2=i(Js);Jv=a(w2,"Migrating from pytorch-pretrained-bert"),w2.forEach(o),ic.forEach(o),od=h(t),Lt=s(t,"P",{});var nc=i(Lt);Qv=a(nc,"Here is a quick summary of what you should take care of when migrating from "),Qs=s(nc,"CODE",{});var y2=i(Qs);Vv=a(y2,"pytorch-pretrained-bert"),y2.forEach(o),Yv=a(nc," to \u{1F917} Transformers"),nc.forEach(o),rd=h(t),Be=s(t,"H3",{class:!0});var dc=i(Be);It=s(dc,"A",{id:!0,class:!0,href:!0});var k2=i(It);Vs=s(k2,"SPAN",{});var T2=i(Vs);m(Mo.$$.fragment,T2),T2.forEach(o),k2.forEach(o),Zv=h(dc),mr=s(dc,"SPAN",{});var DE=i(mr);e1=a(DE,"Models always output "),Ys=s(DE,"CODE",{});var g2=i(Ys);t1=a(g2,"tuples"),g2.forEach(o),DE.forEach(o),dc.forEach(o),ad=h(t),he=s(t,"P",{});var Or=i(he);o1=a(Or,"The main breaking change when migrating from "),Zs=s(Or,"CODE",{});var $2=i(Zs);r1=a($2,"pytorch-pretrained-bert"),$2.forEach(o),a1=a(Or," to \u{1F917} Transformers is that the models forward method always outputs a "),ei=s(Or,"CODE",{});var C2=i(ei);l1=a(C2,"tuple"),C2.forEach(o),s1=a(Or," with various elements depending on the model and the configuration parameters."),Or.forEach(o),ld=h(t),St=s(t,"P",{});var cc=i(St);i1=a(cc,"The exact content of the tuples for each model are detailed in the models\u2019 docstrings and the "),jo=s(cc,"A",{href:!0,rel:!0});var O2=i(jo);n1=a(O2,"documentation"),O2.forEach(o),d1=a(cc,"."),cc.forEach(o),sd=h(t),Mt=s(t,"P",{});var hc=i(Mt);c1=a(hc,"In pretty much every case, you will be fine by taking the first element of the output as the output you previously used in "),ti=s(hc,"CODE",{});var D2=i(ti);h1=a(D2,"pytorch-pretrained-bert"),D2.forEach(o),f1=a(hc,"."),hc.forEach(o),id=h(t),fe=s(t,"P",{});var Dr=i(fe);p1=a(Dr,"Here is a "),oi=s(Dr,"CODE",{});var x2=i(oi);m1=a(x2,"pytorch-pretrained-bert"),x2.forEach(o),u1=a(Dr," to \u{1F917} Transformers conversion example for a "),ri=s(Dr,"CODE",{});var A2=i(ri);_1=a(A2,"BertForSequenceClassification"),A2.forEach(o),v1=a(Dr," classification model:"),Dr.forEach(o),nd=h(t),m(Fo.$$.fragment,t),dd=h(t),qe=s(t,"H3",{class:!0});var fc=i(qe);jt=s(fc,"A",{id:!0,class:!0,href:!0});var z2=i(jt);ai=s(z2,"SPAN",{});var P2=i(ai);m(Bo.$$.fragment,P2),P2.forEach(o),z2.forEach(o),E1=h(fc),li=s(fc,"SPAN",{});var L2=i(li);b1=a(L2,"Serialization"),L2.forEach(o),fc.forEach(o),cd=h(t),Ft=s(t,"P",{});var pc=i(Ft);w1=a(pc,"Breaking change in the "),si=s(pc,"CODE",{});var I2=i(si);y1=a(I2,"from_pretrained()"),I2.forEach(o),k1=a(pc,"method:"),pc.forEach(o),hd=h(t),Bt=s(t,"OL",{});var mc=i(Bt);ii=s(mc,"LI",{});var S2=i(ii);Ne=s(S2,"P",{});var xr=i(Ne);T1=a(xr,"Models are now set in evaluation mode by default when instantiated with the "),ni=s(xr,"CODE",{});var M2=i(ni);g1=a(M2,"from_pretrained()"),M2.forEach(o),$1=a(xr," method. To train them don\u2019t forget to set them back in training mode ("),di=s(xr,"CODE",{});var j2=i(di);C1=a(j2,"model.train()"),j2.forEach(o),O1=a(xr,") to activate the dropout modules."),xr.forEach(o),S2.forEach(o),D1=h(mc),ci=s(mc,"LI",{});var F2=i(ci);w=s(F2,"P",{});var k=i(w);x1=a(k,"The additional "),hi=s(k,"CODE",{});var B2=i(hi);A1=a(B2,"*inputs"),B2.forEach(o),z1=a(k," and "),fi=s(k,"CODE",{});var q2=i(fi);P1=a(q2,"**kwargs"),q2.forEach(o),L1=a(k," arguments supplied to the "),pi=s(k,"CODE",{});var N2=i(pi);I1=a(N2,"from_pretrained()"),N2.forEach(o),S1=a(k," method used to be directly passed to the underlying model\u2019s class "),mi=s(k,"CODE",{});var H2=i(mi);M1=a(H2,"__init__()"),H2.forEach(o),j1=a(k," method. They are now used to update the model configuration attribute first which can break derived model classes build based on the previous "),ui=s(k,"CODE",{});var R2=i(ui);F1=a(R2,"BertForSequenceClassification"),R2.forEach(o),B1=a(k," examples. More precisely, the positional arguments "),_i=s(k,"CODE",{});var W2=i(_i);q1=a(W2,"*inputs"),W2.forEach(o),N1=a(k," provided to "),vi=s(k,"CODE",{});var G2=i(vi);H1=a(G2,"from_pretrained()"),G2.forEach(o),R1=a(k," are directly forwarded the model "),Ei=s(k,"CODE",{});var U2=i(Ei);W1=a(U2,"__init__()"),U2.forEach(o),G1=a(k," method while the keyword arguments "),bi=s(k,"CODE",{});var X2=i(bi);U1=a(X2,"**kwargs"),X2.forEach(o),X1=a(k," (i) which match configuration class attributes are used to update said attributes (ii) which don\u2019t match any configuration class attributes are forwarded to the model "),wi=s(k,"CODE",{});var K2=i(wi);K1=a(K2,"__init__()"),K2.forEach(o),J1=a(k," method."),k.forEach(o),F2.forEach(o),mc.forEach(o),fd=h(t),qt=s(t,"P",{});var uc=i(qt);Q1=a(uc,"Also, while not a breaking change, the serialization methods have been standardized and you probably should switch to the new method "),yi=s(uc,"CODE",{});var J2=i(yi);V1=a(J2,"save_pretrained(save_directory)"),J2.forEach(o),Y1=a(uc," if you were using any other serialization method before."),uc.forEach(o),pd=h(t),ur=s(t,"P",{});var Q2=i(ur);Z1=a(Q2,"Here is an example:"),Q2.forEach(o),md=h(t),m(qo.$$.fragment,t),ud=h(t),He=s(t,"H3",{class:!0});var _c=i(He);Nt=s(_c,"A",{id:!0,class:!0,href:!0});var V2=i(Nt);ki=s(V2,"SPAN",{});var Y2=i(ki);m(No.$$.fragment,Y2),Y2.forEach(o),V2.forEach(o),eE=h(_c),Ti=s(_c,"SPAN",{});var Z2=i(Ti);tE=a(Z2,"Optimizers: BertAdam & OpenAIAdam are now AdamW, schedules are standard PyTorch schedules"),Z2.forEach(o),_c.forEach(o),_d=h(t),j=s(t,"P",{});var io=i(j);oE=a(io,"The two optimizers previously included, "),gi=s(io,"CODE",{});var ew=i(gi);rE=a(ew,"BertAdam"),ew.forEach(o),aE=a(io," and "),$i=s(io,"CODE",{});var tw=i($i);lE=a(tw,"OpenAIAdam"),tw.forEach(o),sE=a(io,", have been replaced by a single "),Ci=s(io,"CODE",{});var ow=i(Ci);iE=a(ow,"AdamW"),ow.forEach(o),nE=a(io," optimizer which has a few differences:"),io.forEach(o),vd=h(t),pe=s(t,"UL",{});var Ar=i(pe);Oi=s(Ar,"LI",{});var rw=i(Oi);dE=a(rw,"it only implements weights decay correction,"),rw.forEach(o),cE=h(Ar),Di=s(Ar,"LI",{});var aw=i(Di);hE=a(aw,"schedules are now externals (see below),"),aw.forEach(o),fE=h(Ar),xi=s(Ar,"LI",{});var lw=i(xi);pE=a(lw,"gradient clipping is now also external (see below)."),lw.forEach(o),Ar.forEach(o),Ed=h(t),me=s(t,"P",{});var zr=i(me);mE=a(zr,"The new optimizer "),Ai=s(zr,"CODE",{});var sw=i(Ai);uE=a(sw,"AdamW"),sw.forEach(o),_E=a(zr," matches PyTorch "),zi=s(zr,"CODE",{});var iw=i(zi);vE=a(iw,"Adam"),iw.forEach(o),EE=a(zr," optimizer API and let you use standard PyTorch or apex methods for the schedule and clipping."),zr.forEach(o),bd=h(t),Ht=s(t,"P",{});var vc=i(Ht);bE=a(vc,"The schedules are now standard "),Ho=s(vc,"A",{href:!0,rel:!0});var nw=i(Ho);wE=a(nw,"PyTorch learning rate schedulers"),nw.forEach(o),yE=a(vc," and not part of the optimizer anymore."),vc.forEach(o),wd=h(t),ue=s(t,"P",{});var Pr=i(ue);kE=a(Pr,"Here is a conversion examples from "),Pi=s(Pr,"CODE",{});var dw=i(Pi);TE=a(dw,"BertAdam"),dw.forEach(o),gE=a(Pr," with a linear warmup and decay schedule to "),Li=s(Pr,"CODE",{});var cw=i(Li);$E=a(cw,"AdamW"),cw.forEach(o),CE=a(Pr," and the same schedule:"),Pr.forEach(o),yd=h(t),m(Ro.$$.fragment,t),this.h()},h(){f(Z,"name","hf:doc:metadata"),f(Z,"content",JSON.stringify(vw)),f(F,"id","migrating-from-previous-packages"),f(F,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(F,"href","#migrating-from-previous-packages"),f(A,"class","relative group"),f(Ge,"id","migrating-from-transformers-v3x-to-v4x"),f(Ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ge,"href","#migrating-from-transformers-v3x-to-v4x"),f(ve,"class","relative group"),f(Xe,"id","1-autotokenizers-and-pipelines-now-use-fast-rust-tokenizers-by-default"),f(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Xe,"href","#1-autotokenizers-and-pipelines-now-use-fast-rust-tokenizers-by-default"),f(Ee,"class","relative group"),f(Je,"id","how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Je,"href","#how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(be,"class","relative group"),f(Ve,"href","main_classes/pipelines#transformers.TokenClassificationPipeline"),f(et,"id","2-sentencepiece-is-removed-from-the-required-dependencies"),f(et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(et,"href","#2-sentencepiece-is-removed-from-the-required-dependencies"),f(ye,"class","relative group"),f(ot,"id","how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ot,"href","#how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(ke,"class","relative group"),f(lt,"id","3-the-architecture-of-the-repo-has-been-updated-so-that-each-model-resides-in-its-folder"),f(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(lt,"href","#3-the-architecture-of-the-repo-has-been-updated-so-that-each-model-resides-in-its-folder"),f(Te,"class","relative group"),f(it,"id","how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(it,"href","#how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(ge,"class","relative group"),f(ht,"id","4-switching-the-returndict-argument-to-true-by-default"),f(ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ht,"href","#4-switching-the-returndict-argument-to-true-by-default"),f($e,"class","relative group"),f(Co,"href","main_classes/output"),f(mt,"id","how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(mt,"href","#how-to-obtain-the-same-behavior-as-v3x-in-v4x"),f(Oe,"class","relative group"),f(vt,"id","5-removed-some-deprecated-attributes"),f(vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(vt,"href","#5-removed-some-deprecated-attributes"),f(De,"class","relative group"),f(Po,"href","https://github.com/huggingface/transformers/pull/8604"),f(Po,"rel","nofollow"),f(Ot,"id","migrating-from-pytorchtransformers-to-transformers"),f(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ot,"href","#migrating-from-pytorchtransformers-to-transformers"),f(Se,"class","relative group"),f(xt,"id","positional-order-of-some-models-keywords-inputs-attentionmask-tokentypeids-changed"),f(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(xt,"href","#positional-order-of-some-models-keywords-inputs-attentionmask-tokentypeids-changed"),f(Me,"class","relative group"),f(Pt,"id","migrating-from-pytorchpretrainedbert"),f(Pt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Pt,"href","#migrating-from-pytorchpretrainedbert"),f(Fe,"class","relative group"),f(It,"id","models-always-output-tuples"),f(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(It,"href","#models-always-output-tuples"),f(Be,"class","relative group"),f(jo,"href","https://huggingface.co/transformers/"),f(jo,"rel","nofollow"),f(jt,"id","serialization"),f(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(jt,"href","#serialization"),f(qe,"class","relative group"),f(Nt,"id","optimizers-bertadam-openaiadam-are-now-adamw-schedules-are-standard-pytorch-schedules"),f(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Nt,"href","#optimizers-bertadam-openaiadam-are-now-adamw-schedules-are-standard-pytorch-schedules"),f(He,"class","relative group"),f(Ho,"href","https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate"),f(Ho,"rel","nofollow")},m(t,n){e(document.head,Z),d(t,Zo,n),d(t,A,n),e(A,F),e(F,Lr),u(no,Lr,null),e(A,bc),e(A,Ir),e(Ir,wc),d(t,Bi,n),d(t,ve,n),e(ve,Ge),e(Ge,Sr),u(co,Sr,null),e(ve,yc),e(ve,Ue),e(Ue,kc),e(Ue,Mr),e(Mr,Tc),e(Ue,gc),e(Ue,jr),e(jr,$c),d(t,qi,n),d(t,er,n),e(er,Cc),d(t,Ni,n),d(t,Ee,n),e(Ee,Xe),e(Xe,Fr),u(ho,Fr,null),e(Ee,Oc),e(Ee,Br),e(Br,Dc),d(t,Hi,n),d(t,tr,n),e(tr,xc),d(t,Ri,n),d(t,or,n),e(or,Ac),d(t,Wi,n),d(t,Ke,n),e(Ke,qr),e(qr,zc),e(Ke,Pc),e(Ke,Nr),e(Nr,Lc),d(t,Gi,n),d(t,be,n),e(be,Je),e(Je,Hr),u(fo,Hr,null),e(be,Ic),e(be,Rr),e(Rr,Sc),d(t,Ui,n),d(t,Qe,n),e(Qe,po),e(po,Mc),e(po,Ve),e(Ve,jc),e(Ve,Wr),e(Wr,Fc),e(Ve,Bc),e(po,qc),e(Qe,Nc),e(Qe,we),e(we,Hc),e(we,Gr),e(Gr,Rc),e(we,Wc),e(we,Ur),e(Ur,Gc),e(we,Uc),d(t,Xi,n),d(t,Ye,n),e(Ye,Xc),e(Ye,Xr),e(Xr,Kc),e(Ye,Jc),d(t,Ki,n),u(mo,t,n),d(t,Ji,n),d(t,Ze,n),e(Ze,Qc),e(Ze,Kr),e(Kr,Vc),e(Ze,Yc),d(t,Qi,n),u(uo,t,n),d(t,Vi,n),d(t,ye,n),e(ye,et),e(et,Jr),u(_o,Jr,null),e(ye,Zc),e(ye,Qr),e(Qr,eh),d(t,Yi,n),d(t,z,n),e(z,th),e(z,Vr),e(Vr,oh),e(z,rh),e(z,Yr),e(Yr,ah),e(z,lh),e(z,Zr),e(Zr,sh),e(z,ih),d(t,Zi,n),d(t,tt,n),e(tt,nh),e(tt,ea),e(ea,dh),e(tt,ch),d(t,en,n),d(t,T,n),e(T,ta),e(ta,oa),e(oa,hh),e(T,fh),e(T,ra),e(ra,aa),e(aa,ph),e(T,mh),e(T,la),e(la,sa),e(sa,uh),e(T,_h),e(T,ia),e(ia,na),e(na,vh),e(T,Eh),e(T,da),e(da,ca),e(ca,bh),e(T,wh),e(T,ha),e(ha,fa),e(fa,yh),e(T,kh),e(T,pa),e(pa,ma),e(ma,Th),e(T,gh),e(T,ua),e(ua,_a),e(_a,$h),d(t,tn,n),d(t,ke,n),e(ke,ot),e(ot,va),u(vo,va,null),e(ke,Ch),e(ke,Ea),e(Ea,Oh),d(t,on,n),d(t,ee,n),e(ee,Dh),e(ee,ba),e(ba,xh),e(ee,Ah),e(ee,wa),e(wa,zh),e(ee,Ph),d(t,rn,n),d(t,rt,n),e(rt,Lh),e(rt,ya),e(ya,Ih),e(rt,Sh),d(t,an,n),u(Eo,t,n),d(t,ln,n),d(t,at,n),e(at,Mh),e(at,ka),e(ka,jh),e(at,Fh),d(t,sn,n),u(bo,t,n),d(t,nn,n),d(t,rr,n),e(rr,Bh),d(t,dn,n),u(wo,t,n),d(t,cn,n),d(t,Te,n),e(Te,lt),e(lt,Ta),u(yo,Ta,null),e(Te,qh),e(Te,ga),e(ga,Nh),d(t,hn,n),d(t,st,n),e(st,Hh),e(st,$a),e($a,Rh),e(st,Wh),d(t,fn,n),d(t,ar,n),e(ar,Gh),d(t,pn,n),d(t,ge,n),e(ge,it),e(it,Ca),u(ko,Ca,null),e(ge,Uh),e(ge,Oa),e(Oa,Xh),d(t,mn,n),d(t,nt,n),e(nt,Kh),e(nt,Da),e(Da,Jh),e(nt,Qh),d(t,un,n),d(t,dt,n),e(dt,Vh),e(dt,xa),e(xa,Yh),e(dt,Zh),d(t,_n,n),u(To,t,n),d(t,vn,n),d(t,ct,n),e(ct,ef),e(ct,Aa),e(Aa,tf),e(ct,of),d(t,En,n),u(go,t,n),d(t,bn,n),d(t,$e,n),e($e,ht),e(ht,za),u($o,za,null),e($e,rf),e($e,Ce),e(Ce,af),e(Ce,Pa),e(Pa,lf),e(Ce,sf),e(Ce,La),e(La,nf),e(Ce,df),d(t,wn,n),d(t,ft,n),e(ft,cf),e(ft,Co),e(Co,Ia),e(Ia,hf),e(Co,ff),e(ft,pf),d(t,yn,n),d(t,pt,n),e(pt,mf),e(pt,Sa),e(Sa,uf),e(pt,_f),d(t,kn,n),d(t,Oe,n),e(Oe,mt),e(mt,Ma),u(Oo,Ma,null),e(Oe,vf),e(Oe,ja),e(ja,Ef),d(t,Tn,n),d(t,P,n),e(P,bf),e(P,Fa),e(Fa,wf),e(P,yf),e(P,Ba),e(Ba,kf),e(P,Tf),e(P,qa),e(qa,gf),e(P,$f),d(t,gn,n),d(t,ut,n),e(ut,Cf),e(ut,Na),e(Na,Of),e(ut,Df),d(t,$n,n),u(Do,t,n),d(t,Cn,n),d(t,_t,n),e(_t,xf),e(_t,Ha),e(Ha,Af),e(_t,zf),d(t,On,n),u(xo,t,n),d(t,Dn,n),d(t,lr,n),e(lr,Pf),d(t,xn,n),u(Ao,t,n),d(t,An,n),d(t,De,n),e(De,vt),e(vt,Ra),u(zo,Ra,null),e(De,Lf),e(De,Wa),e(Wa,If),d(t,zn,n),d(t,Et,n),e(Et,Sf),e(Et,Po),e(Po,Mf),e(Et,jf),d(t,Pn,n),d(t,sr,n),e(sr,Ff),d(t,Ln,n),d(t,ir,n),e(ir,Bf),d(t,In,n),d(t,b,n),e(b,L),e(L,Ga),e(Ga,qf),e(L,Nf),e(L,Ua),e(Ua,Hf),e(L,Rf),e(L,Xa),e(Xa,Wf),e(L,Gf),e(L,Ka),e(Ka,Uf),e(L,Xf),e(b,Kf),e(b,I),e(I,Ja),e(Ja,Jf),e(I,Qf),e(I,Qa),e(Qa,Vf),e(I,Yf),e(I,Va),e(Va,Zf),e(I,ep),e(I,Ya),e(Ya,tp),e(I,op),e(b,rp),e(b,te),e(te,Za),e(Za,ap),e(te,lp),e(te,el),e(el,sp),e(te,ip),e(te,tl),e(tl,np),e(te,dp),e(b,cp),e(b,oe),e(oe,ol),e(ol,hp),e(oe,fp),e(oe,rl),e(rl,pp),e(oe,mp),e(oe,al),e(al,up),e(oe,_p),e(b,vp),e(b,re),e(re,ll),e(ll,Ep),e(re,bp),e(re,sl),e(sl,wp),e(re,yp),e(re,il),e(il,kp),e(re,Tp),e(b,gp),e(b,ae),e(ae,nl),e(nl,$p),e(ae,Cp),e(ae,dl),e(dl,Op),e(ae,Dp),e(ae,cl),e(cl,xp),e(ae,Ap),e(b,zp),e(b,le),e(le,hl),e(hl,Pp),e(le,Lp),e(le,fl),e(fl,Ip),e(le,Sp),e(le,pl),e(pl,Mp),e(le,jp),e(b,Fp),e(b,se),e(se,ml),e(ml,Bp),e(se,qp),e(se,ul),e(ul,Np),e(se,Hp),e(se,_l),e(_l,Rp),e(se,Wp),e(b,Gp),e(b,ie),e(ie,vl),e(vl,Up),e(ie,Xp),e(ie,El),e(El,Kp),e(ie,Jp),e(ie,bl),e(bl,Qp),e(ie,Vp),e(b,Yp),e(b,ne),e(ne,wl),e(wl,Zp),e(ne,em),e(ne,yl),e(yl,tm),e(ne,om),e(ne,kl),e(kl,rm),e(ne,am),e(b,lm),e(b,de),e(de,Tl),e(Tl,sm),e(de,im),e(de,gl),e(gl,nm),e(de,dm),e(de,$l),e($l,cm),e(de,hm),d(t,Sn,n),d(t,nr,n),e(nr,fm),d(t,Mn,n),d(t,S,n),e(S,bt),e(bt,Cl),e(Cl,pm),e(bt,mm),e(bt,Ol),e(Ol,um),e(bt,_m),e(S,vm),e(S,wt),e(wt,Dl),e(Dl,Em),e(wt,bm),e(wt,xl),e(xl,wm),e(wt,ym),e(S,km),e(S,yt),e(yt,Al),e(Al,Tm),e(yt,gm),e(yt,zl),e(zl,$m),e(yt,Cm),e(S,Om),e(S,kt),e(kt,Pl),e(Pl,Dm),e(kt,xm),e(kt,Ll),e(Ll,Am),e(kt,zm),d(t,jn,n),d(t,dr,n),e(dr,Pm),d(t,Fn,n),d(t,ce,n),e(ce,xe),e(xe,Lm),e(xe,Il),e(Il,Im),e(xe,Sm),e(xe,Sl),e(Sl,Mm),e(xe,jm),e(ce,Fm),e(ce,Ae),e(Ae,Bm),e(Ae,Ml),e(Ml,qm),e(Ae,Nm),e(Ae,jl),e(jl,Hm),e(Ae,Rm),e(ce,Wm),e(ce,ze),e(ze,Gm),e(ze,Fl),e(Fl,Um),e(ze,Xm),e(ze,Bl),e(Bl,Km),e(ze,Jm),d(t,Bn,n),d(t,Tt,n),e(Tt,Qm),e(Tt,ql),e(ql,Vm),e(Tt,Ym),d(t,qn,n),d(t,g,n),e(g,B),e(B,Zm),e(B,Nl),e(Nl,eu),e(B,tu),e(B,Hl),e(Hl,ou),e(B,ru),e(B,Rl),e(Rl,au),e(B,lu),e(g,su),e(g,q),e(q,iu),e(q,Wl),e(Wl,nu),e(q,du),e(q,Gl),e(Gl,cu),e(q,hu),e(q,Ul),e(Ul,fu),e(q,pu),e(g,mu),e(g,Pe),e(Pe,uu),e(Pe,Xl),e(Xl,_u),e(Pe,vu),e(Pe,Kl),e(Kl,Eu),e(Pe,bu),e(g,wu),e(g,N),e(N,yu),e(N,Jl),e(Jl,ku),e(N,Tu),e(N,Ql),e(Ql,gu),e(N,$u),e(N,Vl),e(Vl,Cu),e(N,Ou),e(g,Du),e(g,H),e(H,xu),e(H,Yl),e(Yl,Au),e(H,zu),e(H,Zl),e(Zl,Pu),e(H,Lu),e(H,es),e(es,Iu),e(H,Su),e(g,Mu),e(g,R),e(R,ju),e(R,ts),e(ts,Fu),e(R,Bu),e(R,os),e(os,qu),e(R,Nu),e(R,rs),e(rs,Hu),e(R,Ru),e(g,Wu),e(g,W),e(W,Gu),e(W,as),e(as,Uu),e(W,Xu),e(W,ls),e(ls,Ku),e(W,Ju),e(W,ss),e(ss,Qu),e(W,Vu),e(g,Yu),e(g,G),e(G,Zu),e(G,is),e(is,e_),e(G,t_),e(G,ns),e(ns,o_),e(G,r_),e(G,ds),e(ds,a_),e(G,l_),d(t,Nn,n),d(t,gt,n),e(gt,s_),e(gt,cs),e(cs,i_),e(gt,n_),d(t,Hn,n),d(t,D,n),e(D,U),e(U,d_),e(U,hs),e(hs,c_),e(U,h_),e(U,fs),e(fs,f_),e(U,p_),e(U,ps),e(ps,m_),e(U,u_),e(D,__),e(D,X),e(X,v_),e(X,ms),e(ms,E_),e(X,b_),e(X,us),e(us,w_),e(X,y_),e(X,_s),e(_s,k_),e(X,T_),e(D,g_),e(D,K),e(K,$_),e(K,vs),e(vs,C_),e(K,O_),e(K,Es),e(Es,D_),e(K,x_),e(K,bs),e(bs,A_),e(K,z_),e(D,P_),e(D,J),e(J,L_),e(J,ws),e(ws,I_),e(J,S_),e(J,ys),e(ys,M_),e(J,j_),e(J,ks),e(ks,F_),e(J,B_),e(D,q_),e(D,Q),e(Q,N_),e(Q,Ts),e(Ts,H_),e(Q,R_),e(Q,gs),e(gs,W_),e(Q,G_),e(Q,$s),e($s,U_),e(Q,X_),d(t,Rn,n),d(t,$t,n),e($t,K_),e($t,Cs),e(Cs,J_),e($t,Q_),d(t,Wn,n),d(t,cr,n),e(cr,V),e(V,V_),e(V,Os),e(Os,Y_),e(V,Z_),e(V,Ds),e(Ds,ev),e(V,tv),e(V,xs),e(xs,ov),e(V,rv),d(t,Gn,n),d(t,hr,n),e(hr,av),d(t,Un,n),d(t,Ct,n),e(Ct,Le),e(Le,lv),e(Le,As),e(As,sv),e(Le,iv),e(Le,zs),e(zs,nv),e(Le,dv),e(Ct,cv),e(Ct,Ie),e(Ie,hv),e(Ie,Ps),e(Ps,fv),e(Ie,pv),e(Ie,Ls),e(Ls,mv),e(Ie,uv),d(t,Xn,n),d(t,fr,n),e(fr,_v),d(t,Kn,n),d(t,pr,n),e(pr,Y),e(Y,vv),e(Y,Is),e(Is,Ev),e(Y,bv),e(Y,Ss),e(Ss,wv),e(Y,yv),e(Y,Ms),e(Ms,kv),e(Y,Tv),d(t,Jn,n),d(t,Se,n),e(Se,Ot),e(Ot,js),u(Lo,js,null),e(Se,gv),e(Se,Fs),e(Fs,$v),d(t,Qn,n),d(t,Dt,n),e(Dt,Cv),e(Dt,Bs),e(Bs,Ov),e(Dt,Dv),d(t,Vn,n),d(t,Me,n),e(Me,xt),e(xt,qs),u(Io,qs,null),e(Me,xv),e(Me,je),e(je,Av),e(je,Ns),e(Ns,zv),e(je,Pv),e(je,Hs),e(Hs,Lv),e(je,Iv),d(t,Yn,n),d(t,M,n),e(M,Sv),e(M,Rs),e(Rs,Mv),e(M,jv),e(M,Ws),e(Ws,Fv),e(M,Bv),e(M,Gs),e(Gs,qv),e(M,Nv),d(t,Zn,n),d(t,At,n),e(At,Hv),e(At,Us),e(Us,Rv),e(At,Wv),d(t,ed,n),d(t,zt,n),e(zt,Gv),e(zt,Xs),e(Xs,Uv),e(zt,Xv),d(t,td,n),d(t,Fe,n),e(Fe,Pt),e(Pt,Ks),u(So,Ks,null),e(Fe,Kv),e(Fe,Js),e(Js,Jv),d(t,od,n),d(t,Lt,n),e(Lt,Qv),e(Lt,Qs),e(Qs,Vv),e(Lt,Yv),d(t,rd,n),d(t,Be,n),e(Be,It),e(It,Vs),u(Mo,Vs,null),e(Be,Zv),e(Be,mr),e(mr,e1),e(mr,Ys),e(Ys,t1),d(t,ad,n),d(t,he,n),e(he,o1),e(he,Zs),e(Zs,r1),e(he,a1),e(he,ei),e(ei,l1),e(he,s1),d(t,ld,n),d(t,St,n),e(St,i1),e(St,jo),e(jo,n1),e(St,d1),d(t,sd,n),d(t,Mt,n),e(Mt,c1),e(Mt,ti),e(ti,h1),e(Mt,f1),d(t,id,n),d(t,fe,n),e(fe,p1),e(fe,oi),e(oi,m1),e(fe,u1),e(fe,ri),e(ri,_1),e(fe,v1),d(t,nd,n),u(Fo,t,n),d(t,dd,n),d(t,qe,n),e(qe,jt),e(jt,ai),u(Bo,ai,null),e(qe,E1),e(qe,li),e(li,b1),d(t,cd,n),d(t,Ft,n),e(Ft,w1),e(Ft,si),e(si,y1),e(Ft,k1),d(t,hd,n),d(t,Bt,n),e(Bt,ii),e(ii,Ne),e(Ne,T1),e(Ne,ni),e(ni,g1),e(Ne,$1),e(Ne,di),e(di,C1),e(Ne,O1),e(Bt,D1),e(Bt,ci),e(ci,w),e(w,x1),e(w,hi),e(hi,A1),e(w,z1),e(w,fi),e(fi,P1),e(w,L1),e(w,pi),e(pi,I1),e(w,S1),e(w,mi),e(mi,M1),e(w,j1),e(w,ui),e(ui,F1),e(w,B1),e(w,_i),e(_i,q1),e(w,N1),e(w,vi),e(vi,H1),e(w,R1),e(w,Ei),e(Ei,W1),e(w,G1),e(w,bi),e(bi,U1),e(w,X1),e(w,wi),e(wi,K1),e(w,J1),d(t,fd,n),d(t,qt,n),e(qt,Q1),e(qt,yi),e(yi,V1),e(qt,Y1),d(t,pd,n),d(t,ur,n),e(ur,Z1),d(t,md,n),u(qo,t,n),d(t,ud,n),d(t,He,n),e(He,Nt),e(Nt,ki),u(No,ki,null),e(He,eE),e(He,Ti),e(Ti,tE),d(t,_d,n),d(t,j,n),e(j,oE),e(j,gi),e(gi,rE),e(j,aE),e(j,$i),e($i,lE),e(j,sE),e(j,Ci),e(Ci,iE),e(j,nE),d(t,vd,n),d(t,pe,n),e(pe,Oi),e(Oi,dE),e(pe,cE),e(pe,Di),e(Di,hE),e(pe,fE),e(pe,xi),e(xi,pE),d(t,Ed,n),d(t,me,n),e(me,mE),e(me,Ai),e(Ai,uE),e(me,_E),e(me,zi),e(zi,vE),e(me,EE),d(t,bd,n),d(t,Ht,n),e(Ht,bE),e(Ht,Ho),e(Ho,wE),e(Ht,yE),d(t,wd,n),d(t,ue,n),e(ue,kE),e(ue,Pi),e(Pi,TE),e(ue,gE),e(ue,Li),e(Li,$E),e(ue,CE),d(t,yd,n),u(Ro,t,n),kd=!0},p:uw,i(t){kd||(_(no.$$.fragment,t),_(co.$$.fragment,t),_(ho.$$.fragment,t),_(fo.$$.fragment,t),_(mo.$$.fragment,t),_(uo.$$.fragment,t),_(_o.$$.fragment,t),_(vo.$$.fragment,t),_(Eo.$$.fragment,t),_(bo.$$.fragment,t),_(wo.$$.fragment,t),_(yo.$$.fragment,t),_(ko.$$.fragment,t),_(To.$$.fragment,t),_(go.$$.fragment,t),_($o.$$.fragment,t),_(Oo.$$.fragment,t),_(Do.$$.fragment,t),_(xo.$$.fragment,t),_(Ao.$$.fragment,t),_(zo.$$.fragment,t),_(Lo.$$.fragment,t),_(Io.$$.fragment,t),_(So.$$.fragment,t),_(Mo.$$.fragment,t),_(Fo.$$.fragment,t),_(Bo.$$.fragment,t),_(qo.$$.fragment,t),_(No.$$.fragment,t),_(Ro.$$.fragment,t),kd=!0)},o(t){v(no.$$.fragment,t),v(co.$$.fragment,t),v(ho.$$.fragment,t),v(fo.$$.fragment,t),v(mo.$$.fragment,t),v(uo.$$.fragment,t),v(_o.$$.fragment,t),v(vo.$$.fragment,t),v(Eo.$$.fragment,t),v(bo.$$.fragment,t),v(wo.$$.fragment,t),v(yo.$$.fragment,t),v(ko.$$.fragment,t),v(To.$$.fragment,t),v(go.$$.fragment,t),v($o.$$.fragment,t),v(Oo.$$.fragment,t),v(Do.$$.fragment,t),v(xo.$$.fragment,t),v(Ao.$$.fragment,t),v(zo.$$.fragment,t),v(Lo.$$.fragment,t),v(Io.$$.fragment,t),v(So.$$.fragment,t),v(Mo.$$.fragment,t),v(Fo.$$.fragment,t),v(Bo.$$.fragment,t),v(qo.$$.fragment,t),v(No.$$.fragment,t),v(Ro.$$.fragment,t),kd=!1},d(t){o(Z),t&&o(Zo),t&&o(A),E(no),t&&o(Bi),t&&o(ve),E(co),t&&o(qi),t&&o(er),t&&o(Ni),t&&o(Ee),E(ho),t&&o(Hi),t&&o(tr),t&&o(Ri),t&&o(or),t&&o(Wi),t&&o(Ke),t&&o(Gi),t&&o(be),E(fo),t&&o(Ui),t&&o(Qe),t&&o(Xi),t&&o(Ye),t&&o(Ki),E(mo,t),t&&o(Ji),t&&o(Ze),t&&o(Qi),E(uo,t),t&&o(Vi),t&&o(ye),E(_o),t&&o(Yi),t&&o(z),t&&o(Zi),t&&o(tt),t&&o(en),t&&o(T),t&&o(tn),t&&o(ke),E(vo),t&&o(on),t&&o(ee),t&&o(rn),t&&o(rt),t&&o(an),E(Eo,t),t&&o(ln),t&&o(at),t&&o(sn),E(bo,t),t&&o(nn),t&&o(rr),t&&o(dn),E(wo,t),t&&o(cn),t&&o(Te),E(yo),t&&o(hn),t&&o(st),t&&o(fn),t&&o(ar),t&&o(pn),t&&o(ge),E(ko),t&&o(mn),t&&o(nt),t&&o(un),t&&o(dt),t&&o(_n),E(To,t),t&&o(vn),t&&o(ct),t&&o(En),E(go,t),t&&o(bn),t&&o($e),E($o),t&&o(wn),t&&o(ft),t&&o(yn),t&&o(pt),t&&o(kn),t&&o(Oe),E(Oo),t&&o(Tn),t&&o(P),t&&o(gn),t&&o(ut),t&&o($n),E(Do,t),t&&o(Cn),t&&o(_t),t&&o(On),E(xo,t),t&&o(Dn),t&&o(lr),t&&o(xn),E(Ao,t),t&&o(An),t&&o(De),E(zo),t&&o(zn),t&&o(Et),t&&o(Pn),t&&o(sr),t&&o(Ln),t&&o(ir),t&&o(In),t&&o(b),t&&o(Sn),t&&o(nr),t&&o(Mn),t&&o(S),t&&o(jn),t&&o(dr),t&&o(Fn),t&&o(ce),t&&o(Bn),t&&o(Tt),t&&o(qn),t&&o(g),t&&o(Nn),t&&o(gt),t&&o(Hn),t&&o(D),t&&o(Rn),t&&o($t),t&&o(Wn),t&&o(cr),t&&o(Gn),t&&o(hr),t&&o(Un),t&&o(Ct),t&&o(Xn),t&&o(fr),t&&o(Kn),t&&o(pr),t&&o(Jn),t&&o(Se),E(Lo),t&&o(Qn),t&&o(Dt),t&&o(Vn),t&&o(Me),E(Io),t&&o(Yn),t&&o(M),t&&o(Zn),t&&o(At),t&&o(ed),t&&o(zt),t&&o(td),t&&o(Fe),E(So),t&&o(od),t&&o(Lt),t&&o(rd),t&&o(Be),E(Mo),t&&o(ad),t&&o(he),t&&o(ld),t&&o(St),t&&o(sd),t&&o(Mt),t&&o(id),t&&o(fe),t&&o(nd),E(Fo,t),t&&o(dd),t&&o(qe),E(Bo),t&&o(cd),t&&o(Ft),t&&o(hd),t&&o(Bt),t&&o(fd),t&&o(qt),t&&o(pd),t&&o(ur),t&&o(md),E(qo,t),t&&o(ud),t&&o(He),E(No),t&&o(_d),t&&o(j),t&&o(vd),t&&o(pe),t&&o(Ed),t&&o(me),t&&o(bd),t&&o(Ht),t&&o(wd),t&&o(ue),t&&o(yd),E(Ro,t)}}}const vw={local:"migrating-from-previous-packages",sections:[{local:"migrating-from-transformers-v3x-to-v4x",sections:[{local:"1-autotokenizers-and-pipelines-now-use-fast-rust-tokenizers-by-default",sections:[{local:"how-to-obtain-the-same-behavior-as-v3x-in-v4x",title:"How to obtain the same behavior as v3.x in v4.x"}],title:"1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default."},{local:"2-sentencepiece-is-removed-from-the-required-dependencies",sections:[{local:"how-to-obtain-the-same-behavior-as-v3x-in-v4x",title:"How to obtain the same behavior as v3.x in v4.x"}],title:"2. SentencePiece is removed from the required dependencies"},{local:"3-the-architecture-of-the-repo-has-been-updated-so-that-each-model-resides-in-its-folder",sections:[{local:"how-to-obtain-the-same-behavior-as-v3x-in-v4x",title:"How to obtain the same behavior as v3.x in v4.x"}],title:"3. The architecture of the repo has been updated so that each model resides in its folder"},{local:"4-switching-the-returndict-argument-to-true-by-default",sections:[{local:"how-to-obtain-the-same-behavior-as-v3x-in-v4x",title:"How to obtain the same behavior as v3.x in v4.x"}],title:"4. Switching the `return_dict` argument to `True` by default"},{local:"5-removed-some-deprecated-attributes",title:"5. Removed some deprecated attributes"}],title:"Migrating from transformers `v3.x` to `v4.x`"},{local:"migrating-from-pytorchtransformers-to-transformers",sections:[{local:"positional-order-of-some-models-keywords-inputs-attentionmask-tokentypeids-changed",title:"Positional order of some models' keywords inputs (`attention_mask`, `token_type_ids`...) changed"}],title:"Migrating from pytorch-transformers to \u{1F917} Transformers"},{local:"migrating-from-pytorchpretrainedbert",sections:[{local:"models-always-output-tuples",title:"Models always output `tuples`"},{local:"serialization",title:"Serialization"},{local:"optimizers-bertadam-openaiadam-are-now-adamw-schedules-are-standard-pytorch-schedules",title:"Optimizers: BertAdam & OpenAIAdam are now AdamW, schedules are standard PyTorch schedules"}],title:"Migrating from pytorch-pretrained-bert"}],title:"Migrating from previous packages"};function Ew(Ec,Z,Zo){let{fw:A}=Z;return Ec.$$set=F=>{"fw"in F&&Zo(0,A=F.fw)},[A]}class Tw extends hw{constructor(Z){super();fw(this,Z,Ew,_w,pw,{fw:0})}}export{Tw as default,vw as metadata};
253
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/preprocessing.mdx-daaa6c3c.js
import{S as jv,i as _v,s as gv,e as t,k as c,w as m,t as r,M as vv,c as n,d as a,m as h,a as l,x as d,h as o,b as u,N as bv,F as e,g as i,y as f,q as b,o as j,B as _}from"../chunks/vendor-4833417e.js";import{T as Ev}from"../chunks/Tip-fffd6df1.js";import{Y as wv}from"../chunks/Youtube-27813aed.js";import{I as k}from"../chunks/IconCopyLink-4b81c553.js";import{C as E}from"../chunks/CodeBlock-6a3d1b46.js";import{C as $v}from"../chunks/CodeBlockFw-27a176a0.js";import{D as yv}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function kv(In){let w,R,$,x,ns;return{c(){w=t("p"),R=r("If you plan on using a pretrained model, it\u2019s important to use the associated pretrained tokenizer. This ensures the text is split the same way as the pretraining corpus, and uses the same corresponding tokens-to-index (usually referrred to as the "),$=t("em"),x=r("vocab"),ns=r(") during pretraining.")},l(q){w=n(q,"P",{});var F=l(w);R=o(F,"If you plan on using a pretrained model, it\u2019s important to use the associated pretrained tokenizer. This ensures the text is split the same way as the pretraining corpus, and uses the same corresponding tokens-to-index (usually referrred to as the "),$=n(F,"EM",{});var Gs=l($);x=o(Gs,"vocab"),Gs.forEach(a),ns=o(F,") during pretraining."),F.forEach(a)},m(q,F){i(q,w,F),e(w,R),e(w,$),e($,x),e(w,ns)},d(q){q&&a(w)}}}function xv(In){let w,R,$,x,ns,q,F,Gs,oi,Uo,Ce,Mo,at,pi,Vo,H,Rn,ci,hi,Fn,ii,ui,Hn,mi,Ko,ls,Us,Bn,Oe,di,Jn,fi,Qo,Le,Xo,B,bi,tt,ji,_i,Wn,gi,vi,Zo,Ms,sp,J,Ei,nt,wi,$i,Yn,yi,ki,ep,rs,Vs,Gn,Se,xi,Un,Ti,ap,Ks,qi,lt,Di,Ai,tp,Ne,np,rt,zi,lp,Ie,rp,ot,Pi,op,W,pt,ct,Ci,Oi,Li,ht,it,Si,Ni,Ii,ut,mt,Ri,Fi,pp,Qs,Hi,Mn,Bi,Ji,cp,Re,hp,Y,Wi,Vn,Yi,Gi,Kn,Ui,Mi,ip,dt,Vi,up,Fe,mp,os,Xs,Qn,He,Ki,Xn,Qi,dp,Zs,Xi,Zn,Zi,su,fp,G,eu,sl,au,tu,el,nu,lu,bp,Be,jp,se,ru,al,ou,pu,_p,ps,ee,tl,Je,cu,nl,hu,gp,ft,iu,vp,U,uu,ll,mu,du,rl,fu,bu,Ep,We,wp,cs,ae,ol,Ye,ju,pl,_u,$p,bt,gu,yp,D,vu,cl,Eu,wu,hl,$u,yu,il,ku,xu,kp,Ge,xp,hs,te,ul,Ue,Tu,ml,qu,Tp,ne,Du,jt,Au,zu,qp,Me,Dp,M,Pu,Ve,Cu,Ou,Ke,Lu,Su,Ap,Qe,zp,V,Nu,dl,Iu,Ru,fl,Fu,Hu,Pp,Xe,Cp,_t,Bu,Op,K,gt,bl,Ju,Wu,Yu,vt,jl,Gu,Uu,Mu,Et,_l,Vu,Ku,Lp,is,le,gl,Ze,Qu,vl,Xu,Sp,re,Zu,sa,sm,em,Np,oe,am,ea,tm,nm,Ip,aa,Rp,wt,ta,lm,na,El,rm,om,Fp,la,Hp,ra,wl,pm,Bp,oa,Jp,pe,cm,$l,hm,im,Wp,us,ce,yl,pa,um,kl,mm,Yp,A,dm,xl,fm,bm,Tl,jm,_m,ql,gm,vm,Gp,he,Em,$t,wm,$m,Up,ca,Mp,Q,ym,Dl,km,xm,Al,Tm,qm,Vp,ha,Kp,ms,ie,zl,ia,Dm,Pl,Am,Qp,yt,zm,Xp,ua,Zp,kt,Pm,sc,ma,ec,xt,Cm,ac,da,tc,Tt,Om,nc,fa,lc,qt,Lm,rc,ds,ue,Cl,ba,Sm,Ol,Nm,oc,Dt,Im,pc,X,Rm,ja,Fm,Hm,Ll,Bm,Jm,cc,_a,hc,me,Wm,ga,Sl,Ym,Gm,ic,va,uc,At,zt,V1,mc,fs,de,Nl,Ea,Um,Il,Mm,dc,fe,Vm,Pt,Km,Qm,fc,wa,bc,bs,be,Rl,$a,Xm,Fl,Zm,jc,je,sd,ya,Hl,ed,ad,_c,Ct,S,td,ka,Bl,nd,ld,xa,Jl,rd,od,Ta,Wl,pd,cd,gc,qa,vc,Da,js,hd,Ot,Yl,id,ud,Gl,md,dd,Ec,Aa,wc,za,Pa,fd,Ca,Ul,bd,jd,$c,Oa,yc,La,Sa,_d,Ml,gd,vd,kc,Na,xc,Lt,Ed,Tc,Ia,qc,St,Nt,K1,Dc,_s,_e,Vl,Ra,wd,Kl,$d,Ac,It,yd,zc,ge,Ql,kd,xd,Xl,Td,Pc,ve,qd,Fa,Dd,Ad,Cc,Ha,Oc,Z,zd,Zl,Pd,Cd,sr,Od,Ld,Lc,Ba,Sc,ss,Sd,er,Nd,Id,ar,Rd,Fd,Nc,Ja,Ic,Ee,Hd,Rt,Bd,Jd,Rc,Wa,Fc,gs,we,tr,Ya,Wd,nr,Yd,Hc,Ft,Gd,Bc,Ga,Jc,Ht,vs,Ud,lr,Md,Vd,rr,Kd,Qd,Wc,Ua,Yc,Ma,Va,Xd,or,Zd,sf,Gc,Ka,Uc,es,ef,pr,af,tf,cr,nf,lf,Mc,Bt,rf,Vc,Es,$e,hr,Qa,of,ir,pf,Kc,z,cf,ur,hf,uf,mr,mf,df,dr,ff,bf,Qc,as,Xa,Jt,fr,jf,_f,gf,ws,ye,br,vf,Ef,jr,wf,$f,yf,P,_r,kf,xf,gr,Tf,qf,vr,Df,Af,Er,zf,Pf,Cf,ke,wr,Of,Lf,$r,Sf,Nf,If,Za,Wt,yr,Rf,Ff,Hf,N,T,kr,Bf,Jf,xr,Wf,Yf,Tr,Gf,Uf,qr,Mf,Vf,Dr,Kf,Qf,Xf,C,Ar,Zf,sb,zr,eb,ab,Pr,tb,nb,Cr,lb,rb,ob,O,Or,pb,cb,Lr,hb,ib,Sr,ub,mb,Nr,db,fb,bb,xe,Ir,jb,_b,Rr,gb,vb,Eb,Fr,ts,Hr,wb,$b,Br,yb,kb,Jr,xb,Tb,Xc,y,qb,Wr,Db,Ab,Yr,zb,Pb,Gr,Cb,Ob,Ur,Lb,Sb,Mr,Nb,Ib,Zc,Te,Vr,$s,Kr,Rb,Fb,Qr,Hb,Bb,Xr,Jb,Wb,g,ys,Zr,Yb,Gb,so,Ub,Mb,eo,ao,Vb,Kb,ks,sh,Qb,to,Xb,Zb,Yt,no,sj,ej,aj,xs,eh,tj,ah,nj,lo,ro,lj,rj,Ts,th,oj,oo,pj,cj,po,co,hj,ij,qs,nh,uj,ho,mj,dj,io,uo,fj,bj,Ds,mo,jj,_j,fo,gj,vj,Gt,bo,Ej,wj,$j,As,lh,yj,rh,kj,jo,_o,xj,Tj,zs,oh,qj,go,Dj,Aj,Ut,vo,zj,Pj,Cj,Ps,ph,Oj,ch,Lj,Eo,wo,Sj,Nj,Cs,hh,Ij,$o,Rj,Fj,Mt,yo,Hj,Bj,Jj,Os,ih,Wj,uh,Yj,ko,xo,Gj,Uj,Ls,mh,Mj,To,Vj,Kj,qo,Qj,Xj,Ss,Do,Zj,s1,Ao,e1,a1,Vt,zo,t1,n1,l1,Ns,dh,r1,fh,o1,Po,Co,p1,c1,Is,bh,h1,Oo,i1,u1,Kt,Lo,m1,d1,f1,Rs,jh,b1,_h,j1,So,No,_1,g1,Fs,gh,v1,Io,E1,w1,Ro,$1,y1,Hs,vh,k1,Fo,x1,T1,Qt,Ho,q1,D1,A1,Bs,Eh,z1,wh,P1,Bo,Jo,C1,$h;return q=new k({}),Ce=new yv({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/preprocessing.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/preprocessing.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/preprocessing.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/preprocessing.ipynb"}]}}),Oe=new k({}),Le=new wv({props:{id:"Yffk5aydLzg"}}),Ms=new Ev({props:{$$slots:{default:[kv]},$$scope:{ctx:In}}}),Se=new k({}),Ne=new E({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>)`}}),Ie=new E({props:{code:`encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") print(encoded_input)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_input = tokenizer(<span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoded_input) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">2079</span>, <span class="hljs-number">2025</span>, <span class="hljs-number">19960</span>, <span class="hljs-number">10362</span>, <span class="hljs-number">1999</span>, <span class="hljs-number">1996</span>, <span class="hljs-number">3821</span>, <span class="hljs-number">1997</span>, <span class="hljs-number">16657</span>, <span class="hljs-number">1010</span>, <span class="hljs-number">2005</span>, <span class="hljs-number">2027</span>, <span class="hljs-number">2024</span>, <span class="hljs-number">11259</span>, <span class="hljs-number">1998</span>, <span class="hljs-number">4248</span>, <span class="hljs-number">2000</span>, <span class="hljs-number">4963</span>, <span class="hljs-number">1012</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}`}}),Re=new E({props:{code:'tokenizer.decode(encoded_input["input_ids"])',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.decode(encoded_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-string">&#x27;[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]&#x27;</span>`}}),Fe=new E({props:{code:`batch_sentences = [ "But what about second breakfast?", "Don't think he knows about second breakfast, Pip.", "What about elevensies?", ] encoded_inputs = tokenizer(batch_sentences) print(encoded_inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>batch_sentences = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;But what about second breakfast?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Don&#x27;t think he knows about second breakfast, Pip.&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;What about elevensies?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_inputs = tokenizer(batch_sentences) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoded_inputs) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [[<span class="hljs-number">101</span>, <span class="hljs-number">1252</span>, <span class="hljs-number">1184</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">1248</span>, <span class="hljs-number">6462</span>, <span class="hljs-number">136</span>, <span class="hljs-number">102</span>], [<span class="hljs-number">101</span>, <span class="hljs-number">1790</span>, <span class="hljs-number">112</span>, <span class="hljs-number">189</span>, <span class="hljs-number">1341</span>, <span class="hljs-number">1119</span>, <span class="hljs-number">3520</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">1248</span>, <span class="hljs-number">6462</span>, <span class="hljs-number">117</span>, <span class="hljs-number">21902</span>, <span class="hljs-number">1643</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], [<span class="hljs-number">101</span>, <span class="hljs-number">1327</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">5450</span>, <span class="hljs-number">23434</span>, <span class="hljs-number">136</span>, <span class="hljs-number">102</span>]], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]]}`}}),He=new k({}),Be=new E({props:{code:`batch_sentences = [ "But what about second breakfast?", "Don't think he knows about second breakfast, Pip.", "What about elevensies?", ] encoded_input = tokenizer(batch_sentences, padding=True) print(encoded_input)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>batch_sentences = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;But what about second breakfast?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Don&#x27;t think he knows about second breakfast, Pip.&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;What about elevensies?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_input = tokenizer(batch_sentences, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoded_input) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [[<span class="hljs-number">101</span>, <span class="hljs-number">1252</span>, <span class="hljs-number">1184</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">1248</span>, <span class="hljs-number">6462</span>, <span class="hljs-number">136</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">101</span>, <span class="hljs-number">1790</span>, <span class="hljs-number">112</span>, <span class="hljs-number">189</span>, <span class="hljs-number">1341</span>, <span class="hljs-number">1119</span>, <span class="hljs-number">3520</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">1248</span>, <span class="hljs-number">6462</span>, <span class="hljs-number">117</span>, <span class="hljs-number">21902</span>, <span class="hljs-number">1643</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], [<span class="hljs-number">101</span>, <span class="hljs-number">1327</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">5450</span>, <span class="hljs-number">23434</span>, <span class="hljs-number">136</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]]}`}}),Je=new k({}),We=new E({props:{code:`batch_sentences = [ "But what about second breakfast?", "Don't think he knows about second breakfast, Pip.", "What about elevensies?", ] encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) print(encoded_input)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>batch_sentences = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;But what about second breakfast?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Don&#x27;t think he knows about second breakfast, Pip.&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;What about elevensies?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_input = tokenizer(batch_sentences, padding=<span class="hljs-literal">True</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoded_input) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [[<span class="hljs-number">101</span>, <span class="hljs-number">1252</span>, <span class="hljs-number">1184</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">1248</span>, <span class="hljs-number">6462</span>, <span class="hljs-number">136</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">101</span>, <span class="hljs-number">1790</span>, <span class="hljs-number">112</span>, <span class="hljs-number">189</span>, <span class="hljs-number">1341</span>, <span class="hljs-number">1119</span>, <span class="hljs-number">3520</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">1248</span>, <span class="hljs-number">6462</span>, <span class="hljs-number">117</span>, <span class="hljs-number">21902</span>, <span class="hljs-number">1643</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], [<span class="hljs-number">101</span>, <span class="hljs-number">1327</span>, <span class="hljs-number">1164</span>, <span class="hljs-number">5450</span>, <span class="hljs-number">23434</span>, <span class="hljs-number">136</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]]}`}}),Ye=new k({}),Ge=new $v({props:{group1:{id:"pt",code:`batch_sentences = [ "But what about second breakfast?", "Don't think he knows about second breakfast, Pip.", "What about elevensies?", ] encoded_input = tokenizer(batch, padding=True, truncation=True, return_tensors="pt") print(encoded_input)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>batch_sentences = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;But what about second breakfast?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Don&#x27;t think he knows about second breakfast, Pip.&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;What about elevensies?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_input = tokenizer(batch, padding=<span class="hljs-literal">True</span>, truncation=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoded_input) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: tensor([[ <span class="hljs-number">101</span>, <span class="hljs-number">153</span>, <span class="hljs-number">7719</span>, <span class="hljs-number">21490</span>, <span class="hljs-number">1122</span>, <span class="hljs-number">1114</span>, <span class="hljs-number">9582</span>, <span class="hljs-number">1623</span>, <span class="hljs-number">102</span>], [ <span class="hljs-number">101</span>, <span class="hljs-number">5226</span>, <span class="hljs-number">1122</span>, <span class="hljs-number">9649</span>, <span class="hljs-number">1199</span>, <span class="hljs-number">2610</span>, <span class="hljs-number">1236</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>]]), <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: tensor([[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]]), <span class="hljs-string">&#x27;attention_mask&#x27;</span>: tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>]])}`},group2:{id:"tf",code:`batch_sentences = [ "But what about second breakfast?", "Don't think he knows about second breakfast, Pip.", "What about elevensies?", ] encoded_input = tokenizer(batch, padding=True, truncation=True, return_tensors="tf") print(encoded_input)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>batch_sentences = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;But what about second breakfast?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Don&#x27;t think he knows about second breakfast, Pip.&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;What about elevensies?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_input = tokenizer(batch, padding=<span class="hljs-literal">True</span>, truncation=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoded_input) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: &lt;tf.Tensor: shape=(<span class="hljs-number">2</span>, <span class="hljs-number">9</span>), dtype=int32, numpy= array([[ <span class="hljs-number">101</span>, <span class="hljs-number">153</span>, <span class="hljs-number">7719</span>, <span class="hljs-number">21490</span>, <span class="hljs-number">1122</span>, <span class="hljs-number">1114</span>, <span class="hljs-number">9582</span>, <span class="hljs-number">1623</span>, <span class="hljs-number">102</span>], [ <span class="hljs-number">101</span>, <span class="hljs-number">5226</span>, <span class="hljs-number">1122</span>, <span class="hljs-number">9649</span>, <span class="hljs-number">1199</span>, <span class="hljs-number">2610</span>, <span class="hljs-number">1236</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>]], dtype=int32)&gt;, <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: &lt;tf.Tensor: shape=(<span class="hljs-number">2</span>, <span class="hljs-number">9</span>), dtype=int32, numpy= array([[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]], dtype=int32)&gt;, <span class="hljs-string">&#x27;attention_mask&#x27;</span>: &lt;tf.Tensor: shape=(<span class="hljs-number">2</span>, <span class="hljs-number">9</span>), dtype=int32, numpy= array([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>]], dtype=int32)&gt;}`}}}),Ue=new k({}),Me=new E({props:{code:"pip install datasets",highlighted:"pip install datasets"}}),Qe=new E({props:{code:`from datasets import load_dataset, Audio dataset = load_dataset("superb", "ks")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, <span class="hljs-string">&quot;ks&quot;</span>)`}}),Xe=new E({props:{code:'dataset["train"][0]["audio"]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>] {<span class="hljs-string">&#x27;array&#x27;</span>: array([ <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , ..., -<span class="hljs-number">0.00592041</span>, -<span class="hljs-number">0.00405884</span>, -<span class="hljs-number">0.00253296</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/05734a36d88019a09725c20cc024e1c4e7982e37d7d55c0c1ca1742ea1cdd47f/_background_noise_/doing_the_dishes.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}`}}),Ze=new k({}),aa=new E({props:{code:`lj_speech = load_dataset("lj_speech", split="train") lj_speech[0]["audio"]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech = load_dataset(<span class="hljs-string">&quot;lj_speech&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>] {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">7.3242188e-04</span>, -<span class="hljs-number">7.6293945e-04</span>, -<span class="hljs-number">6.4086914e-04</span>, ..., <span class="hljs-number">7.3242188e-04</span>, <span class="hljs-number">2.1362305e-04</span>, <span class="hljs-number">6.1035156e-05</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">22050</span>}`}}),la=new E({props:{code:'lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000))',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech = lj_speech.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=<span class="hljs-number">16_000</span>))'}}),oa=new E({props:{code:'lj_speech[0]["audio"]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>] {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">0.00064146</span>, -<span class="hljs-number">0.00074657</span>, -<span class="hljs-number">0.00068768</span>, ..., <span class="hljs-number">0.00068341</span>, <span class="hljs-number">0.00014045</span>, <span class="hljs-number">0.</span> ], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}`}}),pa=new k({}),ca=new E({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)`}}),ha=new E({props:{code:`audio_input = [dataset["train"][0]["audio"]["array"]] feature_extractor(audio_input, sampling_rate=16000)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>audio_input = [dataset[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor(audio_input, sampling_rate=<span class="hljs-number">16000</span>) {<span class="hljs-string">&#x27;input_values&#x27;</span>: [array([ <span class="hljs-number">0.00045439</span>, <span class="hljs-number">0.00045439</span>, <span class="hljs-number">0.00045439</span>, ..., -<span class="hljs-number">0.1578519</span> , -<span class="hljs-number">0.10807519</span>, -<span class="hljs-number">0.06727459</span>], dtype=float32)]}`}}),ia=new k({}),ua=new E({props:{code:`dataset["train"][0]["audio"]["array"].shape dataset["train"][1]["audio"]["array"].shape`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>].shape (<span class="hljs-number">1522930</span>,) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">1</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>].shape (<span class="hljs-number">988891</span>,)`}}),ma=new E({props:{code:`def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=16000, padding=True, max_length=1000000, truncation=True, ) return inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> audio_arrays = [x[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;audio&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor( <span class="hljs-meta">... </span> audio_arrays, <span class="hljs-meta">... </span> sampling_rate=<span class="hljs-number">16000</span>, <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">1000000</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs`}}),da=new E({props:{code:'processed_dataset = preprocess_function(dataset["train"][:5])',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>processed_dataset = preprocess_function(dataset[<span class="hljs-string">&quot;train&quot;</span>][:<span class="hljs-number">5</span>])'}}),fa=new E({props:{code:`processed_dataset["input_values"][0].shape processed_dataset["input_values"][1].shape`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>processed_dataset[<span class="hljs-string">&quot;input_values&quot;</span>][<span class="hljs-number">0</span>].shape (<span class="hljs-number">1000000</span>,) <span class="hljs-meta">&gt;&gt;&gt; </span>processed_dataset[<span class="hljs-string">&quot;input_values&quot;</span>][<span class="hljs-number">1</span>].shape (<span class="hljs-number">1000000</span>,)`}}),ba=new k({}),_a=new E({props:{code:`from datasets import load_dataset dataset = load_dataset("food101", split="train[:100]")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;food101&quot;</span>, split=<span class="hljs-string">&quot;train[:100]&quot;</span>)`}}),va=new E({props:{code:'dataset[0]["image"]',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;image&quot;</span>]'}}),Ea=new k({}),wa=new E({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>)`}}),$a=new k({}),qa=new E({props:{code:`from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) _transforms = Compose( [RandomResizedCrop(feature_extractor.size), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize] )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torchvision.transforms <span class="hljs-keyword">import</span> Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor <span class="hljs-meta">&gt;&gt;&gt; </span>normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) <span class="hljs-meta">&gt;&gt;&gt; </span>_transforms = Compose( <span class="hljs-meta">... </span> [RandomResizedCrop(feature_extractor.size), ColorJitter(brightness=<span class="hljs-number">0.5</span>, hue=<span class="hljs-number">0.5</span>), ToTensor(), normalize] <span class="hljs-meta">... </span>)`}}),Aa=new E({props:{code:`def transforms(examples): examples["pixel_values"] = [_transforms(image.convert("RGB")) for image in examples["image"]] return examples`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">transforms</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;pixel_values&quot;</span>] = [_transforms(image.convert(<span class="hljs-string">&quot;RGB&quot;</span>)) <span class="hljs-keyword">for</span> image <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> examples`}}),Oa=new E({props:{code:"dataset.set_transform(transforms)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>dataset.set_transform(transforms)'}}),Na=new E({props:{code:'dataset[0]["image"]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;image&quot;</span>] {<span class="hljs-string">&#x27;image&#x27;</span>: &lt;PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at <span class="hljs-number">0x7F1A7B0630D0</span>&gt;, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">6</span>, <span class="hljs-string">&#x27;pixel_values&#x27;</span>: tensor([[[ <span class="hljs-number">0.0353</span>, <span class="hljs-number">0.0745</span>, <span class="hljs-number">0.1216</span>, ..., -<span class="hljs-number">0.9922</span>, -<span class="hljs-number">0.9922</span>, -<span class="hljs-number">0.9922</span>], [-<span class="hljs-number">0.0196</span>, <span class="hljs-number">0.0667</span>, <span class="hljs-number">0.1294</span>, ..., -<span class="hljs-number">0.9765</span>, -<span class="hljs-number">0.9843</span>, -<span class="hljs-number">0.9922</span>], [ <span class="hljs-number">0.0196</span>, <span class="hljs-number">0.0824</span>, <span class="hljs-number">0.1137</span>, ..., -<span class="hljs-number">0.9765</span>, -<span class="hljs-number">0.9686</span>, -<span class="hljs-number">0.8667</span>], ..., [ <span class="hljs-number">0.0275</span>, <span class="hljs-number">0.0745</span>, <span class="hljs-number">0.0510</span>, ..., -<span class="hljs-number">0.1137</span>, -<span class="hljs-number">0.1216</span>, -<span class="hljs-number">0.0824</span>], [ <span class="hljs-number">0.0667</span>, <span class="hljs-number">0.0824</span>, <span class="hljs-number">0.0667</span>, ..., -<span class="hljs-number">0.0588</span>, -<span class="hljs-number">0.0745</span>, -<span class="hljs-number">0.0980</span>], [ <span class="hljs-number">0.0353</span>, <span class="hljs-number">0.0353</span>, <span class="hljs-number">0.0431</span>, ..., -<span class="hljs-number">0.0039</span>, -<span class="hljs-number">0.0039</span>, -<span class="hljs-number">0.0588</span>]], [[ <span class="hljs-number">0.2078</span>, <span class="hljs-number">0.2471</span>, <span class="hljs-number">0.2863</span>, ..., -<span class="hljs-number">0.9451</span>, -<span class="hljs-number">0.9373</span>, -<span class="hljs-number">0.9451</span>], [ <span class="hljs-number">0.1608</span>, <span class="hljs-number">0.2471</span>, <span class="hljs-number">0.3098</span>, ..., -<span class="hljs-number">0.9373</span>, -<span class="hljs-number">0.9451</span>, -<span class="hljs-number">0.9373</span>], [ <span class="hljs-number">0.2078</span>, <span class="hljs-number">0.2706</span>, <span class="hljs-number">0.3020</span>, ..., -<span class="hljs-number">0.9608</span>, -<span class="hljs-number">0.9373</span>, -<span class="hljs-number">0.8275</span>], ..., [-<span class="hljs-number">0.0353</span>, <span class="hljs-number">0.0118</span>, -<span class="hljs-number">0.0039</span>, ..., -<span class="hljs-number">0.2392</span>, -<span class="hljs-number">0.2471</span>, -<span class="hljs-number">0.2078</span>], [ <span class="hljs-number">0.0196</span>, <span class="hljs-number">0.0353</span>, <span class="hljs-number">0.0196</span>, ..., -<span class="hljs-number">0.1843</span>, -<span class="hljs-number">0.2000</span>, -<span class="hljs-number">0.2235</span>], [-<span class="hljs-number">0.0118</span>, -<span class="hljs-number">0.0039</span>, -<span class="hljs-number">0.0039</span>, ..., -<span class="hljs-number">0.0980</span>, -<span class="hljs-number">0.0980</span>, -<span class="hljs-number">0.1529</span>]], [[ <span class="hljs-number">0.3961</span>, <span class="hljs-number">0.4431</span>, <span class="hljs-number">0.4980</span>, ..., -<span class="hljs-number">0.9216</span>, -<span class="hljs-number">0.9137</span>, -<span class="hljs-number">0.9216</span>], [ <span class="hljs-number">0.3569</span>, <span class="hljs-number">0.4510</span>, <span class="hljs-number">0.5216</span>, ..., -<span class="hljs-number">0.9059</span>, -<span class="hljs-number">0.9137</span>, -<span class="hljs-number">0.9137</span>], [ <span class="hljs-number">0.4118</span>, <span class="hljs-number">0.4745</span>, <span class="hljs-number">0.5216</span>, ..., -<span class="hljs-number">0.9137</span>, -<span class="hljs-number">0.8902</span>, -<span class="hljs-number">0.7804</span>], ..., [-<span class="hljs-number">0.2314</span>, -<span class="hljs-number">0.1922</span>, -<span class="hljs-number">0.2078</span>, ..., -<span class="hljs-number">0.4196</span>, -<span class="hljs-number">0.4275</span>, -<span class="hljs-number">0.3882</span>], [-<span class="hljs-number">0.1843</span>, -<span class="hljs-number">0.1686</span>, -<span class="hljs-number">0.2000</span>, ..., -<span class="hljs-number">0.3647</span>, -<span class="hljs-number">0.3804</span>, -<span class="hljs-number">0.4039</span>], [-<span class="hljs-number">0.1922</span>, -<span class="hljs-number">0.1922</span>, -<span class="hljs-number">0.1922</span>, ..., -<span class="hljs-number">0.2941</span>, -<span class="hljs-number">0.2863</span>, -<span class="hljs-number">0.3412</span>]]])}`}}),Ia=new E({props:{code:`import numpy as np import matplotlib.pyplot as plt img = dataset[0]["pixel_values"] plt.imshow(img.permute(1, 2, 0))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> matplotlib.pyplot <span class="hljs-keyword">as</span> plt <span class="hljs-meta">&gt;&gt;&gt; </span>img = dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;pixel_values&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>plt.imshow(img.permute(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">0</span>))`}}),Ra=new k({}),Ha=new E({props:{code:`from datasets import load_dataset lj_speech = load_dataset("lj_speech", split="train")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech = load_dataset(<span class="hljs-string">&quot;lj_speech&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)`}}),Ba=new E({props:{code:'lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"])',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech = lj_speech.<span class="hljs-built_in">map</span>(remove_columns=[<span class="hljs-string">&quot;file&quot;</span>, <span class="hljs-string">&quot;id&quot;</span>, <span class="hljs-string">&quot;normalized_text&quot;</span>])'}}),Ja=new E({props:{code:`lj_speech[0]["audio"] lj_speech[0]["text"]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>] {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">7.3242188e-04</span>, -<span class="hljs-number">7.6293945e-04</span>, -<span class="hljs-number">6.4086914e-04</span>, ..., <span class="hljs-number">7.3242188e-04</span>, <span class="hljs-number">2.1362305e-04</span>, <span class="hljs-number">6.1035156e-05</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">22050</span>} <span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>] <span class="hljs-string">&#x27;Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition&#x27;</span>`}}),Wa=new E({props:{code:'lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000))',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>lj_speech = lj_speech.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=<span class="hljs-number">16_000</span>))'}}),Ya=new k({}),Ga=new E({props:{code:`from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>)`}}),Ua=new E({props:{code:`def prepare_dataset(example): audio = example["audio"] example["input_values"] = processor(audio["array"], sampling_rate=16000) with processor.as_target_processor(): example["labels"] = processor(example["text"]).input_ids return example`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">prepare_dataset</span>(<span class="hljs-params">example</span>): <span class="hljs-meta">... </span> audio = example[<span class="hljs-string">&quot;audio&quot;</span>] <span class="hljs-meta">... </span> example[<span class="hljs-string">&quot;input_values&quot;</span>] = processor(audio[<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=<span class="hljs-number">16000</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> example[<span class="hljs-string">&quot;labels&quot;</span>] = processor(example[<span class="hljs-string">&quot;text&quot;</span>]).input_ids <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> example`}}),Ka=new E({props:{code:"prepare_dataset(lj_speech[0])",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>prepare_dataset(lj_speech[<span class="hljs-number">0</span>])'}}),Qa=new k({}),{c(){w=t("meta"),R=c(),$=t("h1"),x=t("a"),ns=t("span"),m(q.$$.fragment),F=c(),Gs=t("span"),oi=r("Preprocess"),Uo=c(),m(Ce.$$.fragment),Mo=c(),at=t("p"),pi=r("Before you can use your data in a model, the data needs to be processed into an acceptable format for the model. A model does not understand raw text, images or audio. These inputs need to be converted into numbers and assembled into tensors. In this tutorial, you will:"),Vo=c(),H=t("ul"),Rn=t("li"),ci=r("Preprocess textual data with a tokenizer."),hi=c(),Fn=t("li"),ii=r("Preprocess image or audio data with a feature extractor."),ui=c(),Hn=t("li"),mi=r("Preprocess data for a multimodal task with a processor."),Ko=c(),ls=t("h2"),Us=t("a"),Bn=t("span"),m(Oe.$$.fragment),di=c(),Jn=t("span"),fi=r("NLP"),Qo=c(),m(Le.$$.fragment),Xo=c(),B=t("p"),bi=r("The main tool for processing textual data is a "),tt=t("a"),ji=r("tokenizer"),_i=r(". A tokenizer starts by splitting text into "),Wn=t("em"),gi=r("tokens"),vi=r(" according to a set of rules. The tokens are converted into numbers, which are used to build tensors as input to a model. Any additional inputs required by a model are also added by the tokenizer."),Zo=c(),m(Ms.$$.fragment),sp=c(),J=t("p"),Ei=r("Get started quickly by loading a pretrained tokenizer with the "),nt=t("a"),wi=r("AutoTokenizer"),$i=r(" class. This downloads the "),Yn=t("em"),yi=r("vocab"),ki=r(" used when a model is pretrained."),ep=c(),rs=t("h3"),Vs=t("a"),Gn=t("span"),m(Se.$$.fragment),xi=c(),Un=t("span"),Ti=r("Tokenize"),ap=c(),Ks=t("p"),qi=r("Load a pretrained tokenizer with "),lt=t("a"),Di=r("AutoTokenizer.from_pretrained()"),Ai=r(":"),tp=c(),m(Ne.$$.fragment),np=c(),rt=t("p"),zi=r("Then pass your sentence to the tokenizer:"),lp=c(),m(Ie.$$.fragment),rp=c(),ot=t("p"),Pi=r("The tokenizer returns a dictionary with three important itmes:"),op=c(),W=t("ul"),pt=t("li"),ct=t("a"),Ci=r("input_ids"),Oi=r(" are the indices corresponding to each token in the sentence."),Li=c(),ht=t("li"),it=t("a"),Si=r("attention_mask"),Ni=r(" indicates whether a token should be attended to or not."),Ii=c(),ut=t("li"),mt=t("a"),Ri=r("token_type_ids"),Fi=r(" identifies which sequence a token belongs to when there is more than one sequence."),pp=c(),Qs=t("p"),Hi=r("You can decode the "),Mn=t("code"),Bi=r("input_ids"),Ji=r(" to return the original input:"),cp=c(),m(Re.$$.fragment),hp=c(),Y=t("p"),Wi=r("As you can see, the tokenizer added two special tokens - "),Vn=t("code"),Yi=r("CLS"),Gi=r(" and "),Kn=t("code"),Ui=r("SEP"),Mi=r(` (classifier and separator) - to the sentence. Not all models need special tokens, but if they do, the tokenizer will automatically add them for you.`),ip=c(),dt=t("p"),Vi=r("If there are several sentences you want to process, pass the sentences as a list to the tokenizer:"),up=c(),m(Fe.$$.fragment),mp=c(),os=t("h3"),Xs=t("a"),Qn=t("span"),m(He.$$.fragment),Ki=c(),Xn=t("span"),Qi=r("Pad"),dp=c(),Zs=t("p"),Xi=r("This brings us to an important topic. When you process a batch of sentences, they aren\u2019t always the same length. This is a problem because tensors, the input to the model, need to have a uniform shape. Padding is a strategy for ensuring tensors are rectangular by adding a special "),Zn=t("em"),Zi=r("padding token"),su=r(" to sentences with fewer tokens."),fp=c(),G=t("p"),eu=r("Set the "),sl=t("code"),au=r("padding"),tu=r(" parameter to "),el=t("code"),nu=r("True"),lu=r(" to pad the shorter sequences in the batch to match the longest sequence:"),bp=c(),m(Be.$$.fragment),jp=c(),se=t("p"),ru=r("Notice the tokenizer padded the first and third sentences with a "),al=t("code"),ou=r("0"),pu=r(" because they are shorter!"),_p=c(),ps=t("h3"),ee=t("a"),tl=t("span"),m(Je.$$.fragment),cu=c(),nl=t("span"),hu=r("Truncation"),gp=c(),ft=t("p"),iu=r("On the other end of the spectrum, sometimes a sequence may be too long for a model to handle. In this case, you will need to truncate the sequence to a shorter length."),vp=c(),U=t("p"),uu=r("Set the "),ll=t("code"),mu=r("truncation"),du=r(" parameter to "),rl=t("code"),fu=r("True"),bu=r(" to truncate a sequence to the maximum length accepted by the model:"),Ep=c(),m(We.$$.fragment),wp=c(),cs=t("h3"),ae=t("a"),ol=t("span"),m(Ye.$$.fragment),ju=c(),pl=t("span"),_u=r("Build tensors"),$p=c(),bt=t("p"),gu=r("Finally, you want the tokenizer to return the actual tensors that are fed to the model."),yp=c(),D=t("p"),vu=r("Set the "),cl=t("code"),Eu=r("return_tensors"),wu=r(" parameter to either "),hl=t("code"),$u=r("pt"),yu=r(" for PyTorch, or "),il=t("code"),ku=r("tf"),xu=r(" for TensorFlow:"),kp=c(),m(Ge.$$.fragment),xp=c(),hs=t("h2"),te=t("a"),ul=t("span"),m(Ue.$$.fragment),Tu=c(),ml=t("span"),qu=r("Audio"),Tp=c(),ne=t("p"),Du=r("Audio inputs are preprocessed differently than textual inputs, but the end goal remains the same: create numerical sequences the model can understand. A "),jt=t("a"),Au=r("feature extractor"),zu=r(" is designed for the express purpose of extracting features from raw image or audio data and converting them into tensors. Before you begin, install \u{1F917} Datasets to load an audio dataset to experiment with:"),qp=c(),m(Me.$$.fragment),Dp=c(),M=t("p"),Pu=r("Load the keyword spotting task from the "),Ve=t("a"),Cu=r("SUPERB"),Ou=r(" benchmark (see the \u{1F917} "),Ke=t("a"),Lu=r("Datasets tutorial"),Su=r(" for more details on how to load a dataset):"),Ap=c(),m(Qe.$$.fragment),zp=c(),V=t("p"),Nu=r("Access the first element of the "),dl=t("code"),Iu=r("audio"),Ru=r(" column to take a look at the input. Calling the "),fl=t("code"),Fu=r("audio"),Hu=r(" column will automatically load and resample the audio file:"),Pp=c(),m(Xe.$$.fragment),Cp=c(),_t=t("p"),Bu=r("This returns three items:"),Op=c(),K=t("ul"),gt=t("li"),bl=t("code"),Ju=r("array"),Wu=r(" is the speech signal loaded - and potentially resampled - as a 1D array."),Yu=c(),vt=t("li"),jl=t("code"),Gu=r("path"),Uu=r(" points to the location of the audio file."),Mu=c(),Et=t("li"),_l=t("code"),Vu=r("sampling_rate"),Ku=r(" refers to how many data points in the speech signal are measured per second."),Lp=c(),is=t("h3"),le=t("a"),gl=t("span"),m(Ze.$$.fragment),Qu=c(),vl=t("span"),Xu=r("Resample"),Sp=c(),re=t("p"),Zu=r("For this tutorial, you will use the "),sa=t("a"),sm=r("Wav2Vec2"),em=r(" model. As you can see from the model card, the Wav2Vec2 model is pretrained on 16kHz sampled speech audio. It is important your audio data\u2019s sampling rate matches the sampling rate of the dataset used to pretrain the model. If your data\u2019s sampling rate isn\u2019t the same, then you need to resample your audio data."),Np=c(),oe=t("p"),am=r("For example, load the "),ea=t("a"),tm=r("LJ Speech"),nm=r(" dataset which has a sampling rate of 22050kHz. In order to use the Wav2Vec2 model with this dataset, downsample the sampling rate to 16kHz:"),Ip=c(),m(aa.$$.fragment),Rp=c(),wt=t("ol"),ta=t("li"),lm=r("Use \u{1F917} Datasets\u2019 "),na=t("a"),El=t("code"),rm=r("cast_column"),om=r(" method to downsample the sampling rate to 16kHz:"),Fp=c(),m(la.$$.fragment),Hp=c(),ra=t("ol"),wl=t("li"),pm=r("Load the audio file:"),Bp=c(),m(oa.$$.fragment),Jp=c(),pe=t("p"),cm=r("As you can see, the "),$l=t("code"),hm=r("sampling_rate"),im=r(" was downsampled to 16kHz. Now that you know how resampling works, let\u2019s return to our previous example with the SUPERB dataset!"),Wp=c(),us=t("h3"),ce=t("a"),yl=t("span"),m(pa.$$.fragment),um=c(),kl=t("span"),mm=r("Feature extractor"),Yp=c(),A=t("p"),dm=r("The next step is to load a feature extractor to normalize and pad the input. When padding textual data, a "),xl=t("code"),fm=r("0"),bm=r(" is added for shorter sequences. The same idea applies to audio data, and the audio feature extractor will add a "),Tl=t("code"),jm=r("0"),_m=r(" - interpreted as silence - to "),ql=t("code"),gm=r("array"),vm=r("."),Gp=c(),he=t("p"),Em=r("Load the feature extractor with "),$t=t("a"),wm=r("AutoFeatureExtractor.from_pretrained()"),$m=r(":"),Up=c(),m(ca.$$.fragment),Mp=c(),Q=t("p"),ym=r("Pass the audio "),Dl=t("code"),km=r("array"),xm=r(" to the feature extractor. We also recommend adding the "),Al=t("code"),Tm=r("sampling_rate"),qm=r(" argument in the feature extractor in order to better debug any silent errors that may occur."),Vp=c(),m(ha.$$.fragment),Kp=c(),ms=t("h3"),ie=t("a"),zl=t("span"),m(ia.$$.fragment),Dm=c(),Pl=t("span"),Am=r("Pad and truncate"),Qp=c(),yt=t("p"),zm=r("Just like the tokenizer, you can apply padding or truncation to handle variable sequences in a batch. Take a look at the sequence length of these two audio samples:"),Xp=c(),m(ua.$$.fragment),Zp=c(),kt=t("p"),Pm=r("As you can see, the first sample has a longer sequence than the second sample. Let\u2019s create a function that will preprocess the dataset. Specify a maximum sample length, and the feature extractor will either pad or truncate the sequences to match it:"),sc=c(),m(ma.$$.fragment),ec=c(),xt=t("p"),Cm=r("Apply the function to the the first few examples in the dataset:"),ac=c(),m(da.$$.fragment),tc=c(),Tt=t("p"),Om=r("Now take another look at the processed sample lengths:"),nc=c(),m(fa.$$.fragment),lc=c(),qt=t("p"),Lm=r("The lengths of the first two samples now match the maximum length you specified."),rc=c(),ds=t("h2"),ue=t("a"),Cl=t("span"),m(ba.$$.fragment),Sm=c(),Ol=t("span"),Nm=r("Vision"),oc=c(),Dt=t("p"),Im=r("A feature extractor is also used to process images for vision tasks. Once again, the goal is to convert the raw image into a batch of tensors as input."),pc=c(),X=t("p"),Rm=r("Let\u2019s load the "),ja=t("a"),Fm=r("food101"),Hm=r(" dataset for this tutorial. Use \u{1F917} Datasets "),Ll=t("code"),Bm=r("split"),Jm=r(" parameter to only load a small sample from the training split since the dataset is quite large:"),cc=c(),m(_a.$$.fragment),hc=c(),me=t("p"),Wm=r("Next, take a look at the image with \u{1F917} Datasets "),ga=t("a"),Sl=t("code"),Ym=r("Image"),Gm=r(" feature:"),ic=c(),m(va.$$.fragment),uc=c(),At=t("p"),zt=t("img"),mc=c(),fs=t("h3"),de=t("a"),Nl=t("span"),m(Ea.$$.fragment),Um=c(),Il=t("span"),Mm=r("Feature extractor"),dc=c(),fe=t("p"),Vm=r("Load the feature extractor with "),Pt=t("a"),Km=r("AutoFeatureExtractor.from_pretrained()"),Qm=r(":"),fc=c(),m(wa.$$.fragment),bc=c(),bs=t("h3"),be=t("a"),Rl=t("span"),m($a.$$.fragment),Xm=c(),Fl=t("span"),Zm=r("Data augmentation"),jc=c(),je=t("p"),sd=r("For vision tasks, it is common to add some type of data augmentation to the images as a part of preprocessing. You can add augmentations with any library you\u2019d like, but in this tutorial, you will use torchvision\u2019s "),ya=t("a"),Hl=t("code"),ed=r("transforms"),ad=r(" module."),_c=c(),Ct=t("ol"),S=t("li"),td=r("Normalize the image and use "),ka=t("a"),Bl=t("code"),nd=r("Compose"),ld=r(" to chain some transforms - "),xa=t("a"),Jl=t("code"),rd=r("RandomResizedCrop"),od=r(" and "),Ta=t("a"),Wl=t("code"),pd=r("ColorJitter"),cd=r(" - together:"),gc=c(),m(qa.$$.fragment),vc=c(),Da=t("ol"),js=t("li"),hd=r("The model accepts "),Ot=t("a"),Yl=t("code"),id=r("pixel_values"),ud=r(" as it\u2019s input. This value is generated by the feature extractor. Create a function that generates "),Gl=t("code"),md=r("pixel_values"),dd=r(" from the transforms:"),Ec=c(),m(Aa.$$.fragment),wc=c(),za=t("ol"),Pa=t("li"),fd=r("Then use \u{1F917} Datasets "),Ca=t("a"),Ul=t("code"),bd=r("set_transform"),jd=r(" to apply the transforms on-the-fly:"),$c=c(),m(Oa.$$.fragment),yc=c(),La=t("ol"),Sa=t("li"),_d=r("Now when you access the image, you will notice the feature extractor has added the model input "),Ml=t("code"),gd=r("pixel_values"),vd=r(":"),kc=c(),m(Na.$$.fragment),xc=c(),Lt=t("p"),Ed=r("Here is what the image looks like after you preprocess it. Just as you\u2019d expect from the applied transforms, the image has been randomly cropped and it\u2019s color properties are different."),Tc=c(),m(Ia.$$.fragment),qc=c(),St=t("p"),Nt=t("img"),Dc=c(),_s=t("h2"),_e=t("a"),Vl=t("span"),m(Ra.$$.fragment),wd=c(),Kl=t("span"),$d=r("Multimodal"),Ac=c(),It=t("p"),yd=r("For multimodal tasks. you will use a combination of everything you\u2019ve learned so far and apply your skills to a automatic speech recognition (ASR) task. This means you will need a:"),zc=c(),ge=t("ul"),Ql=t("li"),kd=r("Feature extractor to preprocess the audio data."),xd=c(),Xl=t("li"),Td=r("Tokenizer to process the text."),Pc=c(),ve=t("p"),qd=r("Let\u2019s return to the "),Fa=t("a"),Dd=r("LJ Speech"),Ad=r(" dataset:"),Cc=c(),m(Ha.$$.fragment),Oc=c(),Z=t("p"),zd=r("Since you are mainly interested in the "),Zl=t("code"),Pd=r("audio"),Cd=r(" and "),sr=t("code"),Od=r("text"),Ld=r(" column, remove the other columns:"),Lc=c(),m(Ba.$$.fragment),Sc=c(),ss=t("p"),Sd=r("Now take a look at the "),er=t("code"),Nd=r("audio"),Id=r(" and "),ar=t("code"),Rd=r("text"),Fd=r(" columns:"),Nc=c(),m(Ja.$$.fragment),Ic=c(),Ee=t("p"),Hd=r("Remember from the earlier section on processing audio data, you should always "),Rt=t("a"),Bd=r("resample"),Jd=r(" your audio data\u2019s sampling rate to match the sampling rate of the dataset used to pretrain a model:"),Rc=c(),m(Wa.$$.fragment),Fc=c(),gs=t("h3"),we=t("a"),tr=t("span"),m(Ya.$$.fragment),Wd=c(),nr=t("span"),Yd=r("Processor"),Hc=c(),Ft=t("p"),Gd=r("A processor combines a feature extractor and tokenizer. Load a processor with [`AutoProcessor.from_pretrained]:"),Bc=c(),m(Ga.$$.fragment),Jc=c(),Ht=t("ol"),vs=t("li"),Ud=r("Create a function to process the audio data to "),lr=t("code"),Md=r("input_values"),Vd=r(", and tokenizes the text to "),rr=t("code"),Kd=r("labels"),Qd=r(". These are your inputs to the model:"),Wc=c(),m(Ua.$$.fragment),Yc=c(),Ma=t("ol"),Va=t("li"),Xd=r("Apply the "),or=t("code"),Zd=r("prepare_dataset"),sf=r(" function to a sample:"),Gc=c(),m(Ka.$$.fragment),Uc=c(),es=t("p"),ef=r("Notice the processor has added "),pr=t("code"),af=r("input_values"),tf=r(" and "),cr=t("code"),nf=r("labels"),lf=r(". The sampling rate has also been correctly downsampled to 16kHz."),Mc=c(),Bt=t("p"),rf=r("Awesome, you should now be able to preprocess data for any modality and even combine different modalities! In the next tutorial, learn how to fine-tune a model on your newly preprocessed data."),Vc=c(),Es=t("h2"),$e=t("a"),hr=t("span"),m(Qa.$$.fragment),of=c(),ir=t("span"),pf=r("Everything you always wanted to know about padding and truncation"),Kc=c(),z=t("p"),cf=r(`We have seen the commands that will work for most cases (pad your batch to the length of the maximum sentence and truncate to the maximum length the model can accept). However, the API supports more strategies if you need them. The three arguments you need to know for this are `),ur=t("code"),hf=r("padding"),uf=r(", "),mr=t("code"),mf=r("truncation"),df=r(" and "),dr=t("code"),ff=r("max_length"),bf=r("."),Qc=c(),as=t("ul"),Xa=t("li"),Jt=t("p"),fr=t("code"),jf=r("padding"),_f=r(" controls the padding. It can be a boolean or a string which should be:"),gf=c(),ws=t("ul"),ye=t("li"),br=t("code"),vf=r("True"),Ef=r(" or "),jr=t("code"),wf=r("'longest'"),$f=r(` to pad to the longest sequence in the batch (doing no padding if you only provide a single sequence).`),yf=c(),P=t("li"),_r=t("code"),kf=r("'max_length'"),xf=r(" to pad to a length specified by the "),gr=t("code"),Tf=r("max_length"),qf=r(` argument or the maximum length accepted by the model if no `),vr=t("code"),Df=r("max_length"),Af=r(" is provided ("),Er=t("code"),zf=r("max_length=None"),Pf=r(`). If you only provide a single sequence, padding will still be applied to it.`),Cf=c(),ke=t("li"),wr=t("code"),Of=r("False"),Lf=r(" or "),$r=t("code"),Sf=r("'do_not_pad'"),Nf=r(` to not pad the sequences. As we have seen before, this is the default behavior.`),If=c(),Za=t("li"),Wt=t("p"),yr=t("code"),Rf=r("truncation"),Ff=r(" controls the truncation. It can be a boolean or a string which should be:"),Hf=c(),N=t("ul"),T=t("li"),kr=t("code"),Bf=r("True"),Jf=r(" or "),xr=t("code"),Wf=r("'longest_first'"),Yf=r(" truncate to a maximum length specified by the "),Tr=t("code"),Gf=r("max_length"),Uf=r(` argument or the maximum length accepted by the model if no `),qr=t("code"),Mf=r("max_length"),Vf=r(" is provided ("),Dr=t("code"),Kf=r("max_length=None"),Qf=r(`). This will truncate token by token, removing a token from the longest sequence in the pair until the proper length is reached.`),Xf=c(),C=t("li"),Ar=t("code"),Zf=r("'only_second'"),sb=r(" truncate to a maximum length specified by the "),zr=t("code"),eb=r("max_length"),ab=r(` argument or the maximum length accepted by the model if no `),Pr=t("code"),tb=r("max_length"),nb=r(" is provided ("),Cr=t("code"),lb=r("max_length=None"),rb=r(`). This will only truncate the second sentence of a pair if a pair of sequence (or a batch of pairs of sequences) is provided.`),ob=c(),O=t("li"),Or=t("code"),pb=r("'only_first'"),cb=r(" truncate to a maximum length specified by the "),Lr=t("code"),hb=r("max_length"),ib=r(` argument or the maximum length accepted by the model if no `),Sr=t("code"),ub=r("max_length"),mb=r(" is provided ("),Nr=t("code"),db=r("max_length=None"),fb=r(`). This will only truncate the first sentence of a pair if a pair of sequence (or a batch of pairs of sequences) is provided.`),bb=c(),xe=t("li"),Ir=t("code"),jb=r("False"),_b=r(" or "),Rr=t("code"),gb=r("'do_not_truncate'"),vb=r(` to not truncate the sequences. As we have seen before, this is the default behavior.`),Eb=c(),Fr=t("li"),ts=t("p"),Hr=t("code"),wb=r("max_length"),$b=r(" to control the length of the padding/truncation. It can be an integer or "),Br=t("code"),yb=r("None"),kb=r(`, in which case it will default to the maximum length the model can accept. If the model has no specific maximum input length, truncation/padding to `),Jr=t("code"),xb=r("max_length"),Tb=r(" is deactivated."),Xc=c(),y=t("p"),qb=r(`Here is a table summarizing the recommend way to setup padding and truncation. If you use pair of inputs sequence in any of the following examples, you can replace `),Wr=t("code"),Db=r("truncation=True"),Ab=r(" by a "),Yr=t("code"),zb=r("STRATEGY"),Pb=r(` selected in `),Gr=t("code"),Cb=r("['only_first', 'only_second', 'longest_first']"),Ob=r(", i.e. "),Ur=t("code"),Lb=r("truncation='only_second'"),Sb=r(" or "),Mr=t("code"),Nb=r("truncation= 'longest_first'"),Ib=r(" to control how both sequence in the pair are truncated as detailed before."),Zc=c(),Te=t("table"),Vr=t("thead"),$s=t("tr"),Kr=t("th"),Rb=r("Truncation"),Fb=c(),Qr=t("th"),Hb=r("Padding"),Bb=c(),Xr=t("th"),Jb=r("Instruction"),Wb=c(),g=t("tbody"),ys=t("tr"),Zr=t("td"),Yb=r("no truncation"),Gb=c(),so=t("td"),Ub=r("no padding"),Mb=c(),eo=t("td"),ao=t("code"),Vb=r("tokenizer(batch_sentences)"),Kb=c(),ks=t("tr"),sh=t("td"),Qb=c(),to=t("td"),Xb=r("padding to max sequence in batch"),Zb=c(),Yt=t("td"),no=t("code"),sj=r("tokenizer(batch_sentences, padding=True)"),ej=r(" or"),aj=c(),xs=t("tr"),eh=t("td"),tj=c(),ah=t("td"),nj=c(),lo=t("td"),ro=t("code"),lj=r("tokenizer(batch_sentences, padding='longest')"),rj=c(),Ts=t("tr"),th=t("td"),oj=c(),oo=t("td"),pj=r("padding to max model input length"),cj=c(),po=t("td"),co=t("code"),hj=r("tokenizer(batch_sentences, padding='max_length')"),ij=c(),qs=t("tr"),nh=t("td"),uj=c(),ho=t("td"),mj=r("padding to specific length"),dj=c(),io=t("td"),uo=t("code"),fj=r("tokenizer(batch_sentences, padding='max_length', max_length=42)"),bj=c(),Ds=t("tr"),mo=t("td"),jj=r("truncation to max model input length"),_j=c(),fo=t("td"),gj=r("no padding"),vj=c(),Gt=t("td"),bo=t("code"),Ej=r("tokenizer(batch_sentences, truncation=True)"),wj=r(" or"),$j=c(),As=t("tr"),lh=t("td"),yj=c(),rh=t("td"),kj=c(),jo=t("td"),_o=t("code"),xj=r("tokenizer(batch_sentences, truncation=STRATEGY)"),Tj=c(),zs=t("tr"),oh=t("td"),qj=c(),go=t("td"),Dj=r("padding to max sequence in batch"),Aj=c(),Ut=t("td"),vo=t("code"),zj=r("tokenizer(batch_sentences, padding=True, truncation=True)"),Pj=r(" or"),Cj=c(),Ps=t("tr"),ph=t("td"),Oj=c(),ch=t("td"),Lj=c(),Eo=t("td"),wo=t("code"),Sj=r("tokenizer(batch_sentences, padding=True, truncation=STRATEGY)"),Nj=c(),Cs=t("tr"),hh=t("td"),Ij=c(),$o=t("td"),Rj=r("padding to max model input length"),Fj=c(),Mt=t("td"),yo=t("code"),Hj=r("tokenizer(batch_sentences, padding='max_length', truncation=True)"),Bj=r(" or"),Jj=c(),Os=t("tr"),ih=t("td"),Wj=c(),uh=t("td"),Yj=c(),ko=t("td"),xo=t("code"),Gj=r("tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY)"),Uj=c(),Ls=t("tr"),mh=t("td"),Mj=c(),To=t("td"),Vj=r("padding to specific length"),Kj=c(),qo=t("td"),Qj=r("Not possible"),Xj=c(),Ss=t("tr"),Do=t("td"),Zj=r("truncation to specific length"),s1=c(),Ao=t("td"),e1=r("no padding"),a1=c(),Vt=t("td"),zo=t("code"),t1=r("tokenizer(batch_sentences, truncation=True, max_length=42)"),n1=r(" or"),l1=c(),Ns=t("tr"),dh=t("td"),r1=c(),fh=t("td"),o1=c(),Po=t("td"),Co=t("code"),p1=r("tokenizer(batch_sentences, truncation=STRATEGY, max_length=42)"),c1=c(),Is=t("tr"),bh=t("td"),h1=c(),Oo=t("td"),i1=r("padding to max sequence in batch"),u1=c(),Kt=t("td"),Lo=t("code"),m1=r("tokenizer(batch_sentences, padding=True, truncation=True, max_length=42)"),d1=r(" or"),f1=c(),Rs=t("tr"),jh=t("td"),b1=c(),_h=t("td"),j1=c(),So=t("td"),No=t("code"),_1=r("tokenizer(batch_sentences, padding=True, truncation=STRATEGY, max_length=42)"),g1=c(),Fs=t("tr"),gh=t("td"),v1=c(),Io=t("td"),E1=r("padding to max model input length"),w1=c(),Ro=t("td"),$1=r("Not possible"),y1=c(),Hs=t("tr"),vh=t("td"),k1=c(),Fo=t("td"),x1=r("padding to specific length"),T1=c(),Qt=t("td"),Ho=t("code"),q1=r("tokenizer(batch_sentences, padding='max_length', truncation=True, max_length=42)"),D1=r(" or"),A1=c(),Bs=t("tr"),Eh=t("td"),z1=c(),wh=t("td"),P1=c(),Bo=t("td"),Jo=t("code"),C1=r("tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY, max_length=42)"),this.h()},l(s){const p=vv('[data-svelte="svelte-1phssyn"]',document.head);w=n(p,"META",{name:!0,content:!0}),p.forEach(a),R=h(s),$=n(s,"H1",{class:!0});var st=l($);x=n(st,"A",{id:!0,class:!0,href:!0});var Q1=l(x);ns=n(Q1,"SPAN",{});var X1=l(ns);d(q.$$.fragment,X1),X1.forEach(a),Q1.forEach(a),F=h(st),Gs=n(st,"SPAN",{});var Z1=l(Gs);oi=o(Z1,"Preprocess"),Z1.forEach(a),st.forEach(a),Uo=h(s),d(Ce.$$.fragment,s),Mo=h(s),at=n(s,"P",{});var s0=l(at);pi=o(s0,"Before you can use your data in a model, the data needs to be processed into an acceptable format for the model. A model does not understand raw text, images or audio. These inputs need to be converted into numbers and assembled into tensors. In this tutorial, you will:"),s0.forEach(a),Vo=h(s),H=n(s,"UL",{});var Xt=l(H);Rn=n(Xt,"LI",{});var e0=l(Rn);ci=o(e0,"Preprocess textual data with a tokenizer."),e0.forEach(a),hi=h(Xt),Fn=n(Xt,"LI",{});var a0=l(Fn);ii=o(a0,"Preprocess image or audio data with a feature extractor."),a0.forEach(a),ui=h(Xt),Hn=n(Xt,"LI",{});var t0=l(Hn);mi=o(t0,"Preprocess data for a multimodal task with a processor."),t0.forEach(a),Xt.forEach(a),Ko=h(s),ls=n(s,"H2",{class:!0});var yh=l(ls);Us=n(yh,"A",{id:!0,class:!0,href:!0});var n0=l(Us);Bn=n(n0,"SPAN",{});var l0=l(Bn);d(Oe.$$.fragment,l0),l0.forEach(a),n0.forEach(a),di=h(yh),Jn=n(yh,"SPAN",{});var r0=l(Jn);fi=o(r0,"NLP"),r0.forEach(a),yh.forEach(a),Qo=h(s),d(Le.$$.fragment,s),Xo=h(s),B=n(s,"P",{});var Zt=l(B);bi=o(Zt,"The main tool for processing textual data is a "),tt=n(Zt,"A",{href:!0});var o0=l(tt);ji=o(o0,"tokenizer"),o0.forEach(a),_i=o(Zt,". A tokenizer starts by splitting text into "),Wn=n(Zt,"EM",{});var p0=l(Wn);gi=o(p0,"tokens"),p0.forEach(a),vi=o(Zt," according to a set of rules. The tokens are converted into numbers, which are used to build tensors as input to a model. Any additional inputs required by a model are also added by the tokenizer."),Zt.forEach(a),Zo=h(s),d(Ms.$$.fragment,s),sp=h(s),J=n(s,"P",{});var sn=l(J);Ei=o(sn,"Get started quickly by loading a pretrained tokenizer with the "),nt=n(sn,"A",{href:!0});var c0=l(nt);wi=o(c0,"AutoTokenizer"),c0.forEach(a),$i=o(sn," class. This downloads the "),Yn=n(sn,"EM",{});var h0=l(Yn);yi=o(h0,"vocab"),h0.forEach(a),ki=o(sn," used when a model is pretrained."),sn.forEach(a),ep=h(s),rs=n(s,"H3",{class:!0});var kh=l(rs);Vs=n(kh,"A",{id:!0,class:!0,href:!0});var i0=l(Vs);Gn=n(i0,"SPAN",{});var u0=l(Gn);d(Se.$$.fragment,u0),u0.forEach(a),i0.forEach(a),xi=h(kh),Un=n(kh,"SPAN",{});var m0=l(Un);Ti=o(m0,"Tokenize"),m0.forEach(a),kh.forEach(a),ap=h(s),Ks=n(s,"P",{});var xh=l(Ks);qi=o(xh,"Load a pretrained tokenizer with "),lt=n(xh,"A",{href:!0});var d0=l(lt);Di=o(d0,"AutoTokenizer.from_pretrained()"),d0.forEach(a),Ai=o(xh,":"),xh.forEach(a),tp=h(s),d(Ne.$$.fragment,s),np=h(s),rt=n(s,"P",{});var f0=l(rt);zi=o(f0,"Then pass your sentence to the tokenizer:"),f0.forEach(a),lp=h(s),d(Ie.$$.fragment,s),rp=h(s),ot=n(s,"P",{});var b0=l(ot);Pi=o(b0,"The tokenizer returns a dictionary with three important itmes:"),b0.forEach(a),op=h(s),W=n(s,"UL",{});var en=l(W);pt=n(en,"LI",{});var O1=l(pt);ct=n(O1,"A",{href:!0});var j0=l(ct);Ci=o(j0,"input_ids"),j0.forEach(a),Oi=o(O1," are the indices corresponding to each token in the sentence."),O1.forEach(a),Li=h(en),ht=n(en,"LI",{});var L1=l(ht);it=n(L1,"A",{href:!0});var _0=l(it);Si=o(_0,"attention_mask"),_0.forEach(a),Ni=o(L1," indicates whether a token should be attended to or not."),L1.forEach(a),Ii=h(en),ut=n(en,"LI",{});var S1=l(ut);mt=n(S1,"A",{href:!0});var g0=l(mt);Ri=o(g0,"token_type_ids"),g0.forEach(a),Fi=o(S1," identifies which sequence a token belongs to when there is more than one sequence."),S1.forEach(a),en.forEach(a),pp=h(s),Qs=n(s,"P",{});var Th=l(Qs);Hi=o(Th,"You can decode the "),Mn=n(Th,"CODE",{});var v0=l(Mn);Bi=o(v0,"input_ids"),v0.forEach(a),Ji=o(Th," to return the original input:"),Th.forEach(a),cp=h(s),d(Re.$$.fragment,s),hp=h(s),Y=n(s,"P",{});var an=l(Y);Wi=o(an,"As you can see, the tokenizer added two special tokens - "),Vn=n(an,"CODE",{});var E0=l(Vn);Yi=o(E0,"CLS"),E0.forEach(a),Gi=o(an," and "),Kn=n(an,"CODE",{});var w0=l(Kn);Ui=o(w0,"SEP"),w0.forEach(a),Mi=o(an,` (classifier and separator) - to the sentence. Not all models need special tokens, but if they do, the tokenizer will automatically add them for you.`),an.forEach(a),ip=h(s),dt=n(s,"P",{});var $0=l(dt);Vi=o($0,"If there are several sentences you want to process, pass the sentences as a list to the tokenizer:"),$0.forEach(a),up=h(s),d(Fe.$$.fragment,s),mp=h(s),os=n(s,"H3",{class:!0});var qh=l(os);Xs=n(qh,"A",{id:!0,class:!0,href:!0});var y0=l(Xs);Qn=n(y0,"SPAN",{});var k0=l(Qn);d(He.$$.fragment,k0),k0.forEach(a),y0.forEach(a),Ki=h(qh),Xn=n(qh,"SPAN",{});var x0=l(Xn);Qi=o(x0,"Pad"),x0.forEach(a),qh.forEach(a),dp=h(s),Zs=n(s,"P",{});var Dh=l(Zs);Xi=o(Dh,"This brings us to an important topic. When you process a batch of sentences, they aren\u2019t always the same length. This is a problem because tensors, the input to the model, need to have a uniform shape. Padding is a strategy for ensuring tensors are rectangular by adding a special "),Zn=n(Dh,"EM",{});var T0=l(Zn);Zi=o(T0,"padding token"),T0.forEach(a),su=o(Dh," to sentences with fewer tokens."),Dh.forEach(a),fp=h(s),G=n(s,"P",{});var tn=l(G);eu=o(tn,"Set the "),sl=n(tn,"CODE",{});var q0=l(sl);au=o(q0,"padding"),q0.forEach(a),tu=o(tn," parameter to "),el=n(tn,"CODE",{});var D0=l(el);nu=o(D0,"True"),D0.forEach(a),lu=o(tn," to pad the shorter sequences in the batch to match the longest sequence:"),tn.forEach(a),bp=h(s),d(Be.$$.fragment,s),jp=h(s),se=n(s,"P",{});var Ah=l(se);ru=o(Ah,"Notice the tokenizer padded the first and third sentences with a "),al=n(Ah,"CODE",{});var A0=l(al);ou=o(A0,"0"),A0.forEach(a),pu=o(Ah," because they are shorter!"),Ah.forEach(a),_p=h(s),ps=n(s,"H3",{class:!0});var zh=l(ps);ee=n(zh,"A",{id:!0,class:!0,href:!0});var z0=l(ee);tl=n(z0,"SPAN",{});var P0=l(tl);d(Je.$$.fragment,P0),P0.forEach(a),z0.forEach(a),cu=h(zh),nl=n(zh,"SPAN",{});var C0=l(nl);hu=o(C0,"Truncation"),C0.forEach(a),zh.forEach(a),gp=h(s),ft=n(s,"P",{});var O0=l(ft);iu=o(O0,"On the other end of the spectrum, sometimes a sequence may be too long for a model to handle. In this case, you will need to truncate the sequence to a shorter length."),O0.forEach(a),vp=h(s),U=n(s,"P",{});var nn=l(U);uu=o(nn,"Set the "),ll=n(nn,"CODE",{});var L0=l(ll);mu=o(L0,"truncation"),L0.forEach(a),du=o(nn," parameter to "),rl=n(nn,"CODE",{});var S0=l(rl);fu=o(S0,"True"),S0.forEach(a),bu=o(nn," to truncate a sequence to the maximum length accepted by the model:"),nn.forEach(a),Ep=h(s),d(We.$$.fragment,s),wp=h(s),cs=n(s,"H3",{class:!0});var Ph=l(cs);ae=n(Ph,"A",{id:!0,class:!0,href:!0});var N0=l(ae);ol=n(N0,"SPAN",{});var I0=l(ol);d(Ye.$$.fragment,I0),I0.forEach(a),N0.forEach(a),ju=h(Ph),pl=n(Ph,"SPAN",{});var R0=l(pl);_u=o(R0,"Build tensors"),R0.forEach(a),Ph.forEach(a),$p=h(s),bt=n(s,"P",{});var F0=l(bt);gu=o(F0,"Finally, you want the tokenizer to return the actual tensors that are fed to the model."),F0.forEach(a),yp=h(s),D=n(s,"P",{});var qe=l(D);vu=o(qe,"Set the "),cl=n(qe,"CODE",{});var H0=l(cl);Eu=o(H0,"return_tensors"),H0.forEach(a),wu=o(qe," parameter to either "),hl=n(qe,"CODE",{});var B0=l(hl);$u=o(B0,"pt"),B0.forEach(a),yu=o(qe," for PyTorch, or "),il=n(qe,"CODE",{});var J0=l(il);ku=o(J0,"tf"),J0.forEach(a),xu=o(qe," for TensorFlow:"),qe.forEach(a),kp=h(s),d(Ge.$$.fragment,s),xp=h(s),hs=n(s,"H2",{class:!0});var Ch=l(hs);te=n(Ch,"A",{id:!0,class:!0,href:!0});var W0=l(te);ul=n(W0,"SPAN",{});var Y0=l(ul);d(Ue.$$.fragment,Y0),Y0.forEach(a),W0.forEach(a),Tu=h(Ch),ml=n(Ch,"SPAN",{});var G0=l(ml);qu=o(G0,"Audio"),G0.forEach(a),Ch.forEach(a),Tp=h(s),ne=n(s,"P",{});var Oh=l(ne);Du=o(Oh,"Audio inputs are preprocessed differently than textual inputs, but the end goal remains the same: create numerical sequences the model can understand. A "),jt=n(Oh,"A",{href:!0});var U0=l(jt);Au=o(U0,"feature extractor"),U0.forEach(a),zu=o(Oh," is designed for the express purpose of extracting features from raw image or audio data and converting them into tensors. Before you begin, install \u{1F917} Datasets to load an audio dataset to experiment with:"),Oh.forEach(a),qp=h(s),d(Me.$$.fragment,s),Dp=h(s),M=n(s,"P",{});var ln=l(M);Pu=o(ln,"Load the keyword spotting task from the "),Ve=n(ln,"A",{href:!0,rel:!0});var M0=l(Ve);Cu=o(M0,"SUPERB"),M0.forEach(a),Ou=o(ln," benchmark (see the \u{1F917} "),Ke=n(ln,"A",{href:!0,rel:!0});var V0=l(Ke);Lu=o(V0,"Datasets tutorial"),V0.forEach(a),Su=o(ln," for more details on how to load a dataset):"),ln.forEach(a),Ap=h(s),d(Qe.$$.fragment,s),zp=h(s),V=n(s,"P",{});var rn=l(V);Nu=o(rn,"Access the first element of the "),dl=n(rn,"CODE",{});var K0=l(dl);Iu=o(K0,"audio"),K0.forEach(a),Ru=o(rn," column to take a look at the input. Calling the "),fl=n(rn,"CODE",{});var Q0=l(fl);Fu=o(Q0,"audio"),Q0.forEach(a),Hu=o(rn," column will automatically load and resample the audio file:"),rn.forEach(a),Pp=h(s),d(Xe.$$.fragment,s),Cp=h(s),_t=n(s,"P",{});var X0=l(_t);Bu=o(X0,"This returns three items:"),X0.forEach(a),Op=h(s),K=n(s,"UL",{});var on=l(K);gt=n(on,"LI",{});var N1=l(gt);bl=n(N1,"CODE",{});var Z0=l(bl);Ju=o(Z0,"array"),Z0.forEach(a),Wu=o(N1," is the speech signal loaded - and potentially resampled - as a 1D array."),N1.forEach(a),Yu=h(on),vt=n(on,"LI",{});var I1=l(vt);jl=n(I1,"CODE",{});var s_=l(jl);Gu=o(s_,"path"),s_.forEach(a),Uu=o(I1," points to the location of the audio file."),I1.forEach(a),Mu=h(on),Et=n(on,"LI",{});var R1=l(Et);_l=n(R1,"CODE",{});var e_=l(_l);Vu=o(e_,"sampling_rate"),e_.forEach(a),Ku=o(R1," refers to how many data points in the speech signal are measured per second."),R1.forEach(a),on.forEach(a),Lp=h(s),is=n(s,"H3",{class:!0});var Lh=l(is);le=n(Lh,"A",{id:!0,class:!0,href:!0});var a_=l(le);gl=n(a_,"SPAN",{});var t_=l(gl);d(Ze.$$.fragment,t_),t_.forEach(a),a_.forEach(a),Qu=h(Lh),vl=n(Lh,"SPAN",{});var n_=l(vl);Xu=o(n_,"Resample"),n_.forEach(a),Lh.forEach(a),Sp=h(s),re=n(s,"P",{});var Sh=l(re);Zu=o(Sh,"For this tutorial, you will use the "),sa=n(Sh,"A",{href:!0,rel:!0});var l_=l(sa);sm=o(l_,"Wav2Vec2"),l_.forEach(a),em=o(Sh," model. As you can see from the model card, the Wav2Vec2 model is pretrained on 16kHz sampled speech audio. It is important your audio data\u2019s sampling rate matches the sampling rate of the dataset used to pretrain the model. If your data\u2019s sampling rate isn\u2019t the same, then you need to resample your audio data."),Sh.forEach(a),Np=h(s),oe=n(s,"P",{});var Nh=l(oe);am=o(Nh,"For example, load the "),ea=n(Nh,"A",{href:!0,rel:!0});var r_=l(ea);tm=o(r_,"LJ Speech"),r_.forEach(a),nm=o(Nh," dataset which has a sampling rate of 22050kHz. In order to use the Wav2Vec2 model with this dataset, downsample the sampling rate to 16kHz:"),Nh.forEach(a),Ip=h(s),d(aa.$$.fragment,s),Rp=h(s),wt=n(s,"OL",{});var o_=l(wt);ta=n(o_,"LI",{});var Ih=l(ta);lm=o(Ih,"Use \u{1F917} Datasets\u2019 "),na=n(Ih,"A",{href:!0,rel:!0});var p_=l(na);El=n(p_,"CODE",{});var c_=l(El);rm=o(c_,"cast_column"),c_.forEach(a),p_.forEach(a),om=o(Ih," method to downsample the sampling rate to 16kHz:"),Ih.forEach(a),o_.forEach(a),Fp=h(s),d(la.$$.fragment,s),Hp=h(s),ra=n(s,"OL",{start:!0});var h_=l(ra);wl=n(h_,"LI",{});var i_=l(wl);pm=o(i_,"Load the audio file:"),i_.forEach(a),h_.forEach(a),Bp=h(s),d(oa.$$.fragment,s),Jp=h(s),pe=n(s,"P",{});var Rh=l(pe);cm=o(Rh,"As you can see, the "),$l=n(Rh,"CODE",{});var u_=l($l);hm=o(u_,"sampling_rate"),u_.forEach(a),im=o(Rh," was downsampled to 16kHz. Now that you know how resampling works, let\u2019s return to our previous example with the SUPERB dataset!"),Rh.forEach(a),Wp=h(s),us=n(s,"H3",{class:!0});var Fh=l(us);ce=n(Fh,"A",{id:!0,class:!0,href:!0});var m_=l(ce);yl=n(m_,"SPAN",{});var d_=l(yl);d(pa.$$.fragment,d_),d_.forEach(a),m_.forEach(a),um=h(Fh),kl=n(Fh,"SPAN",{});var f_=l(kl);mm=o(f_,"Feature extractor"),f_.forEach(a),Fh.forEach(a),Yp=h(s),A=n(s,"P",{});var De=l(A);dm=o(De,"The next step is to load a feature extractor to normalize and pad the input. When padding textual data, a "),xl=n(De,"CODE",{});var b_=l(xl);fm=o(b_,"0"),b_.forEach(a),bm=o(De," is added for shorter sequences. The same idea applies to audio data, and the audio feature extractor will add a "),Tl=n(De,"CODE",{});var j_=l(Tl);jm=o(j_,"0"),j_.forEach(a),_m=o(De," - interpreted as silence - to "),ql=n(De,"CODE",{});var __=l(ql);gm=o(__,"array"),__.forEach(a),vm=o(De,"."),De.forEach(a),Gp=h(s),he=n(s,"P",{});var Hh=l(he);Em=o(Hh,"Load the feature extractor with "),$t=n(Hh,"A",{href:!0});var g_=l($t);wm=o(g_,"AutoFeatureExtractor.from_pretrained()"),g_.forEach(a),$m=o(Hh,":"),Hh.forEach(a),Up=h(s),d(ca.$$.fragment,s),Mp=h(s),Q=n(s,"P",{});var pn=l(Q);ym=o(pn,"Pass the audio "),Dl=n(pn,"CODE",{});var v_=l(Dl);km=o(v_,"array"),v_.forEach(a),xm=o(pn," to the feature extractor. We also recommend adding the "),Al=n(pn,"CODE",{});var E_=l(Al);Tm=o(E_,"sampling_rate"),E_.forEach(a),qm=o(pn," argument in the feature extractor in order to better debug any silent errors that may occur."),pn.forEach(a),Vp=h(s),d(ha.$$.fragment,s),Kp=h(s),ms=n(s,"H3",{class:!0});var Bh=l(ms);ie=n(Bh,"A",{id:!0,class:!0,href:!0});var w_=l(ie);zl=n(w_,"SPAN",{});var $_=l(zl);d(ia.$$.fragment,$_),$_.forEach(a),w_.forEach(a),Dm=h(Bh),Pl=n(Bh,"SPAN",{});var y_=l(Pl);Am=o(y_,"Pad and truncate"),y_.forEach(a),Bh.forEach(a),Qp=h(s),yt=n(s,"P",{});var k_=l(yt);zm=o(k_,"Just like the tokenizer, you can apply padding or truncation to handle variable sequences in a batch. Take a look at the sequence length of these two audio samples:"),k_.forEach(a),Xp=h(s),d(ua.$$.fragment,s),Zp=h(s),kt=n(s,"P",{});var x_=l(kt);Pm=o(x_,"As you can see, the first sample has a longer sequence than the second sample. Let\u2019s create a function that will preprocess the dataset. Specify a maximum sample length, and the feature extractor will either pad or truncate the sequences to match it:"),x_.forEach(a),sc=h(s),d(ma.$$.fragment,s),ec=h(s),xt=n(s,"P",{});var T_=l(xt);Cm=o(T_,"Apply the function to the the first few examples in the dataset:"),T_.forEach(a),ac=h(s),d(da.$$.fragment,s),tc=h(s),Tt=n(s,"P",{});var q_=l(Tt);Om=o(q_,"Now take another look at the processed sample lengths:"),q_.forEach(a),nc=h(s),d(fa.$$.fragment,s),lc=h(s),qt=n(s,"P",{});var D_=l(qt);Lm=o(D_,"The lengths of the first two samples now match the maximum length you specified."),D_.forEach(a),rc=h(s),ds=n(s,"H2",{class:!0});var Jh=l(ds);ue=n(Jh,"A",{id:!0,class:!0,href:!0});var A_=l(ue);Cl=n(A_,"SPAN",{});var z_=l(Cl);d(ba.$$.fragment,z_),z_.forEach(a),A_.forEach(a),Sm=h(Jh),Ol=n(Jh,"SPAN",{});var P_=l(Ol);Nm=o(P_,"Vision"),P_.forEach(a),Jh.forEach(a),oc=h(s),Dt=n(s,"P",{});var C_=l(Dt);Im=o(C_,"A feature extractor is also used to process images for vision tasks. Once again, the goal is to convert the raw image into a batch of tensors as input."),C_.forEach(a),pc=h(s),X=n(s,"P",{});var cn=l(X);Rm=o(cn,"Let\u2019s load the "),ja=n(cn,"A",{href:!0,rel:!0});var O_=l(ja);Fm=o(O_,"food101"),O_.forEach(a),Hm=o(cn," dataset for this tutorial. Use \u{1F917} Datasets "),Ll=n(cn,"CODE",{});var L_=l(Ll);Bm=o(L_,"split"),L_.forEach(a),Jm=o(cn," parameter to only load a small sample from the training split since the dataset is quite large:"),cn.forEach(a),cc=h(s),d(_a.$$.fragment,s),hc=h(s),me=n(s,"P",{});var Wh=l(me);Wm=o(Wh,"Next, take a look at the image with \u{1F917} Datasets "),ga=n(Wh,"A",{href:!0,rel:!0});var S_=l(ga);Sl=n(S_,"CODE",{});var N_=l(Sl);Ym=o(N_,"Image"),N_.forEach(a),S_.forEach(a),Gm=o(Wh," feature:"),Wh.forEach(a),ic=h(s),d(va.$$.fragment,s),uc=h(s),At=n(s,"P",{});var I_=l(At);zt=n(I_,"IMG",{src:!0,alt:!0}),I_.forEach(a),mc=h(s),fs=n(s,"H3",{class:!0});var Yh=l(fs);de=n(Yh,"A",{id:!0,class:!0,href:!0});var R_=l(de);Nl=n(R_,"SPAN",{});var F_=l(Nl);d(Ea.$$.fragment,F_),F_.forEach(a),R_.forEach(a),Um=h(Yh),Il=n(Yh,"SPAN",{});var H_=l(Il);Mm=o(H_,"Feature extractor"),H_.forEach(a),Yh.forEach(a),dc=h(s),fe=n(s,"P",{});var Gh=l(fe);Vm=o(Gh,"Load the feature extractor with "),Pt=n(Gh,"A",{href:!0});var B_=l(Pt);Km=o(B_,"AutoFeatureExtractor.from_pretrained()"),B_.forEach(a),Qm=o(Gh,":"),Gh.forEach(a),fc=h(s),d(wa.$$.fragment,s),bc=h(s),bs=n(s,"H3",{class:!0});var Uh=l(bs);be=n(Uh,"A",{id:!0,class:!0,href:!0});var J_=l(be);Rl=n(J_,"SPAN",{});var W_=l(Rl);d($a.$$.fragment,W_),W_.forEach(a),J_.forEach(a),Xm=h(Uh),Fl=n(Uh,"SPAN",{});var Y_=l(Fl);Zm=o(Y_,"Data augmentation"),Y_.forEach(a),Uh.forEach(a),jc=h(s),je=n(s,"P",{});var Mh=l(je);sd=o(Mh,"For vision tasks, it is common to add some type of data augmentation to the images as a part of preprocessing. You can add augmentations with any library you\u2019d like, but in this tutorial, you will use torchvision\u2019s "),ya=n(Mh,"A",{href:!0,rel:!0});var G_=l(ya);Hl=n(G_,"CODE",{});var U_=l(Hl);ed=o(U_,"transforms"),U_.forEach(a),G_.forEach(a),ad=o(Mh," module."),Mh.forEach(a),_c=h(s),Ct=n(s,"OL",{});var M_=l(Ct);S=n(M_,"LI",{});var Ae=l(S);td=o(Ae,"Normalize the image and use "),ka=n(Ae,"A",{href:!0,rel:!0});var V_=l(ka);Bl=n(V_,"CODE",{});var K_=l(Bl);nd=o(K_,"Compose"),K_.forEach(a),V_.forEach(a),ld=o(Ae," to chain some transforms - "),xa=n(Ae,"A",{href:!0,rel:!0});var Q_=l(xa);Jl=n(Q_,"CODE",{});var X_=l(Jl);rd=o(X_,"RandomResizedCrop"),X_.forEach(a),Q_.forEach(a),od=o(Ae," and "),Ta=n(Ae,"A",{href:!0,rel:!0});var Z_=l(Ta);Wl=n(Z_,"CODE",{});var sg=l(Wl);pd=o(sg,"ColorJitter"),sg.forEach(a),Z_.forEach(a),cd=o(Ae," - together:"),Ae.forEach(a),M_.forEach(a),gc=h(s),d(qa.$$.fragment,s),vc=h(s),Da=n(s,"OL",{start:!0});var eg=l(Da);js=n(eg,"LI",{});var hn=l(js);hd=o(hn,"The model accepts "),Ot=n(hn,"A",{href:!0});var ag=l(Ot);Yl=n(ag,"CODE",{});var tg=l(Yl);id=o(tg,"pixel_values"),tg.forEach(a),ag.forEach(a),ud=o(hn," as it\u2019s input. This value is generated by the feature extractor. Create a function that generates "),Gl=n(hn,"CODE",{});var ng=l(Gl);md=o(ng,"pixel_values"),ng.forEach(a),dd=o(hn," from the transforms:"),hn.forEach(a),eg.forEach(a),Ec=h(s),d(Aa.$$.fragment,s),wc=h(s),za=n(s,"OL",{start:!0});var lg=l(za);Pa=n(lg,"LI",{});var Vh=l(Pa);fd=o(Vh,"Then use \u{1F917} Datasets "),Ca=n(Vh,"A",{href:!0,rel:!0});var rg=l(Ca);Ul=n(rg,"CODE",{});var og=l(Ul);bd=o(og,"set_transform"),og.forEach(a),rg.forEach(a),jd=o(Vh," to apply the transforms on-the-fly:"),Vh.forEach(a),lg.forEach(a),$c=h(s),d(Oa.$$.fragment,s),yc=h(s),La=n(s,"OL",{start:!0});var pg=l(La);Sa=n(pg,"LI",{});var Kh=l(Sa);_d=o(Kh,"Now when you access the image, you will notice the feature extractor has added the model input "),Ml=n(Kh,"CODE",{});var cg=l(Ml);gd=o(cg,"pixel_values"),cg.forEach(a),vd=o(Kh,":"),Kh.forEach(a),pg.forEach(a),kc=h(s),d(Na.$$.fragment,s),xc=h(s),Lt=n(s,"P",{});var hg=l(Lt);Ed=o(hg,"Here is what the image looks like after you preprocess it. Just as you\u2019d expect from the applied transforms, the image has been randomly cropped and it\u2019s color properties are different."),hg.forEach(a),Tc=h(s),d(Ia.$$.fragment,s),qc=h(s),St=n(s,"P",{});var ig=l(St);Nt=n(ig,"IMG",{src:!0,alt:!0}),ig.forEach(a),Dc=h(s),_s=n(s,"H2",{class:!0});var Qh=l(_s);_e=n(Qh,"A",{id:!0,class:!0,href:!0});var ug=l(_e);Vl=n(ug,"SPAN",{});var mg=l(Vl);d(Ra.$$.fragment,mg),mg.forEach(a),ug.forEach(a),wd=h(Qh),Kl=n(Qh,"SPAN",{});var dg=l(Kl);$d=o(dg,"Multimodal"),dg.forEach(a),Qh.forEach(a),Ac=h(s),It=n(s,"P",{});var fg=l(It);yd=o(fg,"For multimodal tasks. you will use a combination of everything you\u2019ve learned so far and apply your skills to a automatic speech recognition (ASR) task. This means you will need a:"),fg.forEach(a),zc=h(s),ge=n(s,"UL",{});var Xh=l(ge);Ql=n(Xh,"LI",{});var bg=l(Ql);kd=o(bg,"Feature extractor to preprocess the audio data."),bg.forEach(a),xd=h(Xh),Xl=n(Xh,"LI",{});var jg=l(Xl);Td=o(jg,"Tokenizer to process the text."),jg.forEach(a),Xh.forEach(a),Pc=h(s),ve=n(s,"P",{});var Zh=l(ve);qd=o(Zh,"Let\u2019s return to the "),Fa=n(Zh,"A",{href:!0,rel:!0});var _g=l(Fa);Dd=o(_g,"LJ Speech"),_g.forEach(a),Ad=o(Zh," dataset:"),Zh.forEach(a),Cc=h(s),d(Ha.$$.fragment,s),Oc=h(s),Z=n(s,"P",{});var un=l(Z);zd=o(un,"Since you are mainly interested in the "),Zl=n(un,"CODE",{});var gg=l(Zl);Pd=o(gg,"audio"),gg.forEach(a),Cd=o(un," and "),sr=n(un,"CODE",{});var vg=l(sr);Od=o(vg,"text"),vg.forEach(a),Ld=o(un," column, remove the other columns:"),un.forEach(a),Lc=h(s),d(Ba.$$.fragment,s),Sc=h(s),ss=n(s,"P",{});var mn=l(ss);Sd=o(mn,"Now take a look at the "),er=n(mn,"CODE",{});var Eg=l(er);Nd=o(Eg,"audio"),Eg.forEach(a),Id=o(mn," and "),ar=n(mn,"CODE",{});var wg=l(ar);Rd=o(wg,"text"),wg.forEach(a),Fd=o(mn," columns:"),mn.forEach(a),Nc=h(s),d(Ja.$$.fragment,s),Ic=h(s),Ee=n(s,"P",{});var si=l(Ee);Hd=o(si,"Remember from the earlier section on processing audio data, you should always "),Rt=n(si,"A",{href:!0});var $g=l(Rt);Bd=o($g,"resample"),$g.forEach(a),Jd=o(si," your audio data\u2019s sampling rate to match the sampling rate of the dataset used to pretrain a model:"),si.forEach(a),Rc=h(s),d(Wa.$$.fragment,s),Fc=h(s),gs=n(s,"H3",{class:!0});var ei=l(gs);we=n(ei,"A",{id:!0,class:!0,href:!0});var yg=l(we);tr=n(yg,"SPAN",{});var kg=l(tr);d(Ya.$$.fragment,kg),kg.forEach(a),yg.forEach(a),Wd=h(ei),nr=n(ei,"SPAN",{});var xg=l(nr);Yd=o(xg,"Processor"),xg.forEach(a),ei.forEach(a),Hc=h(s),Ft=n(s,"P",{});var Tg=l(Ft);Gd=o(Tg,"A processor combines a feature extractor and tokenizer. Load a processor with [`AutoProcessor.from_pretrained]:"),Tg.forEach(a),Bc=h(s),d(Ga.$$.fragment,s),Jc=h(s),Ht=n(s,"OL",{});var qg=l(Ht);vs=n(qg,"LI",{});var dn=l(vs);Ud=o(dn,"Create a function to process the audio data to "),lr=n(dn,"CODE",{});var Dg=l(lr);Md=o(Dg,"input_values"),Dg.forEach(a),Vd=o(dn,", and tokenizes the text to "),rr=n(dn,"CODE",{});var Ag=l(rr);Kd=o(Ag,"labels"),Ag.forEach(a),Qd=o(dn,". These are your inputs to the model:"),dn.forEach(a),qg.forEach(a),Wc=h(s),d(Ua.$$.fragment,s),Yc=h(s),Ma=n(s,"OL",{start:!0});var zg=l(Ma);Va=n(zg,"LI",{});var ai=l(Va);Xd=o(ai,"Apply the "),or=n(ai,"CODE",{});var Pg=l(or);Zd=o(Pg,"prepare_dataset"),Pg.forEach(a),sf=o(ai," function to a sample:"),ai.forEach(a),zg.forEach(a),Gc=h(s),d(Ka.$$.fragment,s),Uc=h(s),es=n(s,"P",{});var fn=l(es);ef=o(fn,"Notice the processor has added "),pr=n(fn,"CODE",{});var Cg=l(pr);af=o(Cg,"input_values"),Cg.forEach(a),tf=o(fn," and "),cr=n(fn,"CODE",{});var Og=l(cr);nf=o(Og,"labels"),Og.forEach(a),lf=o(fn,". The sampling rate has also been correctly downsampled to 16kHz."),fn.forEach(a),Mc=h(s),Bt=n(s,"P",{});var Lg=l(Bt);rf=o(Lg,"Awesome, you should now be able to preprocess data for any modality and even combine different modalities! In the next tutorial, learn how to fine-tune a model on your newly preprocessed data."),Lg.forEach(a),Vc=h(s),Es=n(s,"H2",{class:!0});var ti=l(Es);$e=n(ti,"A",{id:!0,class:!0,href:!0});var Sg=l($e);hr=n(Sg,"SPAN",{});var Ng=l(hr);d(Qa.$$.fragment,Ng),Ng.forEach(a),Sg.forEach(a),of=h(ti),ir=n(ti,"SPAN",{});var Ig=l(ir);pf=o(Ig,"Everything you always wanted to know about padding and truncation"),Ig.forEach(a),ti.forEach(a),Kc=h(s),z=n(s,"P",{});var ze=l(z);cf=o(ze,`We have seen the commands that will work for most cases (pad your batch to the length of the maximum sentence and truncate to the maximum length the model can accept). However, the API supports more strategies if you need them. The three arguments you need to know for this are `),ur=n(ze,"CODE",{});var Rg=l(ur);hf=o(Rg,"padding"),Rg.forEach(a),uf=o(ze,", "),mr=n(ze,"CODE",{});var Fg=l(mr);mf=o(Fg,"truncation"),Fg.forEach(a),df=o(ze," and "),dr=n(ze,"CODE",{});var Hg=l(dr);ff=o(Hg,"max_length"),Hg.forEach(a),bf=o(ze,"."),ze.forEach(a),Qc=h(s),as=n(s,"UL",{});var bn=l(as);Xa=n(bn,"LI",{});var ni=l(Xa);Jt=n(ni,"P",{});var F1=l(Jt);fr=n(F1,"CODE",{});var Bg=l(fr);jf=o(Bg,"padding"),Bg.forEach(a),_f=o(F1," controls the padding. It can be a boolean or a string which should be:"),F1.forEach(a),gf=h(ni),ws=n(ni,"UL",{});var jn=l(ws);ye=n(jn,"LI",{});var Wo=l(ye);br=n(Wo,"CODE",{});var Jg=l(br);vf=o(Jg,"True"),Jg.forEach(a),Ef=o(Wo," or "),jr=n(Wo,"CODE",{});var Wg=l(jr);wf=o(Wg,"'longest'"),Wg.forEach(a),$f=o(Wo,` to pad to the longest sequence in the batch (doing no padding if you only provide a single sequence).`),Wo.forEach(a),yf=h(jn),P=n(jn,"LI",{});var Js=l(P);_r=n(Js,"CODE",{});var Yg=l(_r);kf=o(Yg,"'max_length'"),Yg.forEach(a),xf=o(Js," to pad to a length specified by the "),gr=n(Js,"CODE",{});var Gg=l(gr);Tf=o(Gg,"max_length"),Gg.forEach(a),qf=o(Js,` argument or the maximum length accepted by the model if no `),vr=n(Js,"CODE",{});var Ug=l(vr);Df=o(Ug,"max_length"),Ug.forEach(a),Af=o(Js," is provided ("),Er=n(Js,"CODE",{});var Mg=l(Er);zf=o(Mg,"max_length=None"),Mg.forEach(a),Pf=o(Js,`). If you only provide a single sequence, padding will still be applied to it.`),Js.forEach(a),Cf=h(jn),ke=n(jn,"LI",{});var Yo=l(ke);wr=n(Yo,"CODE",{});var Vg=l(wr);Of=o(Vg,"False"),Vg.forEach(a),Lf=o(Yo," or "),$r=n(Yo,"CODE",{});var Kg=l($r);Sf=o(Kg,"'do_not_pad'"),Kg.forEach(a),Nf=o(Yo,` to not pad the sequences. As we have seen before, this is the default behavior.`),Yo.forEach(a),jn.forEach(a),ni.forEach(a),If=h(bn),Za=n(bn,"LI",{});var li=l(Za);Wt=n(li,"P",{});var H1=l(Wt);yr=n(H1,"CODE",{});var Qg=l(yr);Rf=o(Qg,"truncation"),Qg.forEach(a),Ff=o(H1," controls the truncation. It can be a boolean or a string which should be:"),H1.forEach(a),Hf=h(li),N=n(li,"UL",{});var Pe=l(N);T=n(Pe,"LI",{});var I=l(T);kr=n(I,"CODE",{});var Xg=l(kr);Bf=o(Xg,"True"),Xg.forEach(a),Jf=o(I," or "),xr=n(I,"CODE",{});var Zg=l(xr);Wf=o(Zg,"'longest_first'"),Zg.forEach(a),Yf=o(I," truncate to a maximum length specified by the "),Tr=n(I,"CODE",{});var s2=l(Tr);Gf=o(s2,"max_length"),s2.forEach(a),Uf=o(I,` argument or the maximum length accepted by the model if no `),qr=n(I,"CODE",{});var e2=l(qr);Mf=o(e2,"max_length"),e2.forEach(a),Vf=o(I," is provided ("),Dr=n(I,"CODE",{});var a2=l(Dr);Kf=o(a2,"max_length=None"),a2.forEach(a),Qf=o(I,`). This will truncate token by token, removing a token from the longest sequence in the pair until the proper length is reached.`),I.forEach(a),Xf=h(Pe),C=n(Pe,"LI",{});var Ws=l(C);Ar=n(Ws,"CODE",{});var t2=l(Ar);Zf=o(t2,"'only_second'"),t2.forEach(a),sb=o(Ws," truncate to a maximum length specified by the "),zr=n(Ws,"CODE",{});var n2=l(zr);eb=o(n2,"max_length"),n2.forEach(a),ab=o(Ws,` argument or the maximum length accepted by the model if no `),Pr=n(Ws,"CODE",{});var l2=l(Pr);tb=o(l2,"max_length"),l2.forEach(a),nb=o(Ws," is provided ("),Cr=n(Ws,"CODE",{});var r2=l(Cr);lb=o(r2,"max_length=None"),r2.forEach(a),rb=o(Ws,`). This will only truncate the second sentence of a pair if a pair of sequence (or a batch of pairs of sequences) is provided.`),Ws.forEach(a),ob=h(Pe),O=n(Pe,"LI",{});var Ys=l(O);Or=n(Ys,"CODE",{});var o2=l(Or);pb=o(o2,"'only_first'"),o2.forEach(a),cb=o(Ys," truncate to a maximum length specified by the "),Lr=n(Ys,"CODE",{});var p2=l(Lr);hb=o(p2,"max_length"),p2.forEach(a),ib=o(Ys,` argument or the maximum length accepted by the model if no `),Sr=n(Ys,"CODE",{});var c2=l(Sr);ub=o(c2,"max_length"),c2.forEach(a),mb=o(Ys," is provided ("),Nr=n(Ys,"CODE",{});var h2=l(Nr);db=o(h2,"max_length=None"),h2.forEach(a),fb=o(Ys,`). This will only truncate the first sentence of a pair if a pair of sequence (or a batch of pairs of sequences) is provided.`),Ys.forEach(a),bb=h(Pe),xe=n(Pe,"LI",{});var Go=l(xe);Ir=n(Go,"CODE",{});var i2=l(Ir);jb=o(i2,"False"),i2.forEach(a),_b=o(Go," or "),Rr=n(Go,"CODE",{});var u2=l(Rr);gb=o(u2,"'do_not_truncate'"),u2.forEach(a),vb=o(Go,` to not truncate the sequences. As we have seen before, this is the default behavior.`),Go.forEach(a),Pe.forEach(a),li.forEach(a),Eb=h(bn),Fr=n(bn,"LI",{});var m2=l(Fr);ts=n(m2,"P",{});var et=l(ts);Hr=n(et,"CODE",{});var d2=l(Hr);wb=o(d2,"max_length"),d2.forEach(a),$b=o(et," to control the length of the padding/truncation. It can be an integer or "),Br=n(et,"CODE",{});var f2=l(Br);yb=o(f2,"None"),f2.forEach(a),kb=o(et,`, in which case it will default to the maximum length the model can accept. If the model has no specific maximum input length, truncation/padding to `),Jr=n(et,"CODE",{});var b2=l(Jr);xb=o(b2,"max_length"),b2.forEach(a),Tb=o(et," is deactivated."),et.forEach(a),m2.forEach(a),bn.forEach(a),Xc=h(s),y=n(s,"P",{});var L=l(y);qb=o(L,`Here is a table summarizing the recommend way to setup padding and truncation. If you use pair of inputs sequence in any of the following examples, you can replace `),Wr=n(L,"CODE",{});var j2=l(Wr);Db=o(j2,"truncation=True"),j2.forEach(a),Ab=o(L," by a "),Yr=n(L,"CODE",{});var _2=l(Yr);zb=o(_2,"STRATEGY"),_2.forEach(a),Pb=o(L,` selected in `),Gr=n(L,"CODE",{});var g2=l(Gr);Cb=o(g2,"['only_first', 'only_second', 'longest_first']"),g2.forEach(a),Ob=o(L,", i.e. "),Ur=n(L,"CODE",{});var v2=l(Ur);Lb=o(v2,"truncation='only_second'"),v2.forEach(a),Sb=o(L," or "),Mr=n(L,"CODE",{});var E2=l(Mr);Nb=o(E2,"truncation= 'longest_first'"),E2.forEach(a),Ib=o(L," to control how both sequence in the pair are truncated as detailed before."),L.forEach(a),Zc=h(s),Te=n(s,"TABLE",{});var ri=l(Te);Vr=n(ri,"THEAD",{});var w2=l(Vr);$s=n(w2,"TR",{});var _n=l($s);Kr=n(_n,"TH",{});var $2=l(Kr);Rb=o($2,"Truncation"),$2.forEach(a),Fb=h(_n),Qr=n(_n,"TH",{});var y2=l(Qr);Hb=o(y2,"Padding"),y2.forEach(a),Bb=h(_n),Xr=n(_n,"TH",{});var k2=l(Xr);Jb=o(k2,"Instruction"),k2.forEach(a),_n.forEach(a),w2.forEach(a),Wb=h(ri),g=n(ri,"TBODY",{});var v=l(g);ys=n(v,"TR",{});var gn=l(ys);Zr=n(gn,"TD",{});var x2=l(Zr);Yb=o(x2,"no truncation"),x2.forEach(a),Gb=h(gn),so=n(gn,"TD",{});var T2=l(so);Ub=o(T2,"no padding"),T2.forEach(a),Mb=h(gn),eo=n(gn,"TD",{});var q2=l(eo);ao=n(q2,"CODE",{});var D2=l(ao);Vb=o(D2,"tokenizer(batch_sentences)"),D2.forEach(a),q2.forEach(a),gn.forEach(a),Kb=h(v),ks=n(v,"TR",{});var vn=l(ks);sh=n(vn,"TD",{}),l(sh).forEach(a),Qb=h(vn),to=n(vn,"TD",{});var A2=l(to);Xb=o(A2,"padding to max sequence in batch"),A2.forEach(a),Zb=h(vn),Yt=n(vn,"TD",{});var B1=l(Yt);no=n(B1,"CODE",{});var z2=l(no);sj=o(z2,"tokenizer(batch_sentences, padding=True)"),z2.forEach(a),ej=o(B1," or"),B1.forEach(a),vn.forEach(a),aj=h(v),xs=n(v,"TR",{});var En=l(xs);eh=n(En,"TD",{}),l(eh).forEach(a),tj=h(En),ah=n(En,"TD",{}),l(ah).forEach(a),nj=h(En),lo=n(En,"TD",{});var P2=l(lo);ro=n(P2,"CODE",{});var C2=l(ro);lj=o(C2,"tokenizer(batch_sentences, padding='longest')"),C2.forEach(a),P2.forEach(a),En.forEach(a),rj=h(v),Ts=n(v,"TR",{});var wn=l(Ts);th=n(wn,"TD",{}),l(th).forEach(a),oj=h(wn),oo=n(wn,"TD",{});var O2=l(oo);pj=o(O2,"padding to max model input length"),O2.forEach(a),cj=h(wn),po=n(wn,"TD",{});var L2=l(po);co=n(L2,"CODE",{});var S2=l(co);hj=o(S2,"tokenizer(batch_sentences, padding='max_length')"),S2.forEach(a),L2.forEach(a),wn.forEach(a),ij=h(v),qs=n(v,"TR",{});var $n=l(qs);nh=n($n,"TD",{}),l(nh).forEach(a),uj=h($n),ho=n($n,"TD",{});var N2=l(ho);mj=o(N2,"padding to specific length"),N2.forEach(a),dj=h($n),io=n($n,"TD",{});var I2=l(io);uo=n(I2,"CODE",{});var R2=l(uo);fj=o(R2,"tokenizer(batch_sentences, padding='max_length', max_length=42)"),R2.forEach(a),I2.forEach(a),$n.forEach(a),bj=h(v),Ds=n(v,"TR",{});var yn=l(Ds);mo=n(yn,"TD",{});var F2=l(mo);jj=o(F2,"truncation to max model input length"),F2.forEach(a),_j=h(yn),fo=n(yn,"TD",{});var H2=l(fo);gj=o(H2,"no padding"),H2.forEach(a),vj=h(yn),Gt=n(yn,"TD",{});var J1=l(Gt);bo=n(J1,"CODE",{});var B2=l(bo);Ej=o(B2,"tokenizer(batch_sentences, truncation=True)"),B2.forEach(a),wj=o(J1," or"),J1.forEach(a),yn.forEach(a),$j=h(v),As=n(v,"TR",{});var kn=l(As);lh=n(kn,"TD",{}),l(lh).forEach(a),yj=h(kn),rh=n(kn,"TD",{}),l(rh).forEach(a),kj=h(kn),jo=n(kn,"TD",{});var J2=l(jo);_o=n(J2,"CODE",{});var W2=l(_o);xj=o(W2,"tokenizer(batch_sentences, truncation=STRATEGY)"),W2.forEach(a),J2.forEach(a),kn.forEach(a),Tj=h(v),zs=n(v,"TR",{});var xn=l(zs);oh=n(xn,"TD",{}),l(oh).forEach(a),qj=h(xn),go=n(xn,"TD",{});var Y2=l(go);Dj=o(Y2,"padding to max sequence in batch"),Y2.forEach(a),Aj=h(xn),Ut=n(xn,"TD",{});var W1=l(Ut);vo=n(W1,"CODE",{});var G2=l(vo);zj=o(G2,"tokenizer(batch_sentences, padding=True, truncation=True)"),G2.forEach(a),Pj=o(W1," or"),W1.forEach(a),xn.forEach(a),Cj=h(v),Ps=n(v,"TR",{});var Tn=l(Ps);ph=n(Tn,"TD",{}),l(ph).forEach(a),Oj=h(Tn),ch=n(Tn,"TD",{}),l(ch).forEach(a),Lj=h(Tn),Eo=n(Tn,"TD",{});var U2=l(Eo);wo=n(U2,"CODE",{});var M2=l(wo);Sj=o(M2,"tokenizer(batch_sentences, padding=True, truncation=STRATEGY)"),M2.forEach(a),U2.forEach(a),Tn.forEach(a),Nj=h(v),Cs=n(v,"TR",{});var qn=l(Cs);hh=n(qn,"TD",{}),l(hh).forEach(a),Ij=h(qn),$o=n(qn,"TD",{});var V2=l($o);Rj=o(V2,"padding to max model input length"),V2.forEach(a),Fj=h(qn),Mt=n(qn,"TD",{});var Y1=l(Mt);yo=n(Y1,"CODE",{});var K2=l(yo);Hj=o(K2,"tokenizer(batch_sentences, padding='max_length', truncation=True)"),K2.forEach(a),Bj=o(Y1," or"),Y1.forEach(a),qn.forEach(a),Jj=h(v),Os=n(v,"TR",{});var Dn=l(Os);ih=n(Dn,"TD",{}),l(ih).forEach(a),Wj=h(Dn),uh=n(Dn,"TD",{}),l(uh).forEach(a),Yj=h(Dn),ko=n(Dn,"TD",{});var Q2=l(ko);xo=n(Q2,"CODE",{});var X2=l(xo);Gj=o(X2,"tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY)"),X2.forEach(a),Q2.forEach(a),Dn.forEach(a),Uj=h(v),Ls=n(v,"TR",{});var An=l(Ls);mh=n(An,"TD",{}),l(mh).forEach(a),Mj=h(An),To=n(An,"TD",{});var Z2=l(To);Vj=o(Z2,"padding to specific length"),Z2.forEach(a),Kj=h(An),qo=n(An,"TD",{});var sv=l(qo);Qj=o(sv,"Not possible"),sv.forEach(a),An.forEach(a),Xj=h(v),Ss=n(v,"TR",{});var zn=l(Ss);Do=n(zn,"TD",{});var ev=l(Do);Zj=o(ev,"truncation to specific length"),ev.forEach(a),s1=h(zn),Ao=n(zn,"TD",{});var av=l(Ao);e1=o(av,"no padding"),av.forEach(a),a1=h(zn),Vt=n(zn,"TD",{});var G1=l(Vt);zo=n(G1,"CODE",{});var tv=l(zo);t1=o(tv,"tokenizer(batch_sentences, truncation=True, max_length=42)"),tv.forEach(a),n1=o(G1," or"),G1.forEach(a),zn.forEach(a),l1=h(v),Ns=n(v,"TR",{});var Pn=l(Ns);dh=n(Pn,"TD",{}),l(dh).forEach(a),r1=h(Pn),fh=n(Pn,"TD",{}),l(fh).forEach(a),o1=h(Pn),Po=n(Pn,"TD",{});var nv=l(Po);Co=n(nv,"CODE",{});var lv=l(Co);p1=o(lv,"tokenizer(batch_sentences, truncation=STRATEGY, max_length=42)"),lv.forEach(a),nv.forEach(a),Pn.forEach(a),c1=h(v),Is=n(v,"TR",{});var Cn=l(Is);bh=n(Cn,"TD",{}),l(bh).forEach(a),h1=h(Cn),Oo=n(Cn,"TD",{});var rv=l(Oo);i1=o(rv,"padding to max sequence in batch"),rv.forEach(a),u1=h(Cn),Kt=n(Cn,"TD",{});var U1=l(Kt);Lo=n(U1,"CODE",{});var ov=l(Lo);m1=o(ov,"tokenizer(batch_sentences, padding=True, truncation=True, max_length=42)"),ov.forEach(a),d1=o(U1," or"),U1.forEach(a),Cn.forEach(a),f1=h(v),Rs=n(v,"TR",{});var On=l(Rs);jh=n(On,"TD",{}),l(jh).forEach(a),b1=h(On),_h=n(On,"TD",{}),l(_h).forEach(a),j1=h(On),So=n(On,"TD",{});var pv=l(So);No=n(pv,"CODE",{});var cv=l(No);_1=o(cv,"tokenizer(batch_sentences, padding=True, truncation=STRATEGY, max_length=42)"),cv.forEach(a),pv.forEach(a),On.forEach(a),g1=h(v),Fs=n(v,"TR",{});var Ln=l(Fs);gh=n(Ln,"TD",{}),l(gh).forEach(a),v1=h(Ln),Io=n(Ln,"TD",{});var hv=l(Io);E1=o(hv,"padding to max model input length"),hv.forEach(a),w1=h(Ln),Ro=n(Ln,"TD",{});var iv=l(Ro);$1=o(iv,"Not possible"),iv.forEach(a),Ln.forEach(a),y1=h(v),Hs=n(v,"TR",{});var Sn=l(Hs);vh=n(Sn,"TD",{}),l(vh).forEach(a),k1=h(Sn),Fo=n(Sn,"TD",{});var uv=l(Fo);x1=o(uv,"padding to specific length"),uv.forEach(a),T1=h(Sn),Qt=n(Sn,"TD",{});var M1=l(Qt);Ho=n(M1,"CODE",{});var mv=l(Ho);q1=o(mv,"tokenizer(batch_sentences, padding='max_length', truncation=True, max_length=42)"),mv.forEach(a),D1=o(M1," or"),M1.forEach(a),Sn.forEach(a),A1=h(v),Bs=n(v,"TR",{});var Nn=l(Bs);Eh=n(Nn,"TD",{}),l(Eh).forEach(a),z1=h(Nn),wh=n(Nn,"TD",{}),l(wh).forEach(a),P1=h(Nn),Bo=n(Nn,"TD",{});var dv=l(Bo);Jo=n(dv,"CODE",{});var fv=l(Jo);C1=o(fv,"tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY, max_length=42)"),fv.forEach(a),dv.forEach(a),Nn.forEach(a),v.forEach(a),ri.forEach(a),this.h()},h(){u(w,"name","hf:doc:metadata"),u(w,"content",JSON.stringify(Tv)),u(x,"id","preprocess"),u(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(x,"href","#preprocess"),u($,"class","relative group"),u(Us,"id","nlp"),u(Us,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Us,"href","#nlp"),u(ls,"class","relative group"),u(tt,"href","main_classes/tokenizer"),u(nt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),u(Vs,"id","tokenize"),u(Vs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Vs,"href","#tokenize"),u(rs,"class","relative group"),u(lt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer.from_pretrained"),u(ct,"href","glossary#input-ids"),u(it,"href","glossary#attention-mask"),u(mt,"href","glossary#token-type-ids"),u(Xs,"id","pad"),u(Xs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Xs,"href","#pad"),u(os,"class","relative group"),u(ee,"id","truncation"),u(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ee,"href","#truncation"),u(ps,"class","relative group"),u(ae,"id","build-tensors"),u(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ae,"href","#build-tensors"),u(cs,"class","relative group"),u(te,"id","audio"),u(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(te,"href","#audio"),u(hs,"class","relative group"),u(jt,"href","main_classes/feature_extractor"),u(Ve,"href","https://huggingface.co/datasets/superb"),u(Ve,"rel","nofollow"),u(Ke,"href","https://huggingface.co/docs/datasets/load_hub.html"),u(Ke,"rel","nofollow"),u(le,"id","resample"),u(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(le,"href","#resample"),u(is,"class","relative group"),u(sa,"href","https://huggingface.co/facebook/wav2vec2-base"),u(sa,"rel","nofollow"),u(ea,"href","https://huggingface.co/datasets/lj_speech"),u(ea,"rel","nofollow"),u(na,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.cast_column"),u(na,"rel","nofollow"),u(ra,"start","2"),u(ce,"id","feature-extractor"),u(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ce,"href","#feature-extractor"),u(us,"class","relative group"),u($t,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor.from_pretrained"),u(ie,"id","pad-and-truncate"),u(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ie,"href","#pad-and-truncate"),u(ms,"class","relative group"),u(ue,"id","vision"),u(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ue,"href","#vision"),u(ds,"class","relative group"),u(ja,"href","https://huggingface.co/datasets/food101"),u(ja,"rel","nofollow"),u(ga,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image"),u(ga,"rel","nofollow"),bv(zt.src,V1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png")||u(zt,"src",V1),u(zt,"alt","vision-preprocess-tutorial.png"),u(de,"id","feature-extractor"),u(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(de,"href","#feature-extractor"),u(fs,"class","relative group"),u(Pt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor.from_pretrained"),u(be,"id","data-augmentation"),u(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(be,"href","#data-augmentation"),u(bs,"class","relative group"),u(ya,"href","https://pytorch.org/vision/stable/transforms.html"),u(ya,"rel","nofollow"),u(ka,"href","https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html"),u(ka,"rel","nofollow"),u(xa,"href","https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html"),u(xa,"rel","nofollow"),u(Ta,"href","https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html"),u(Ta,"rel","nofollow"),u(Ot,"href","model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values"),u(Da,"start","2"),u(Ca,"href","https://huggingface.co/docs/datasets/process.html#format-transform"),u(Ca,"rel","nofollow"),u(za,"start","3"),u(La,"start","4"),bv(Nt.src,K1="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png")||u(Nt,"src",K1),u(Nt,"alt","preprocessed_image"),u(_e,"id","multimodal"),u(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(_e,"href","#multimodal"),u(_s,"class","relative group"),u(Fa,"href","https://huggingface.co/datasets/lj_speech"),u(Fa,"rel","nofollow"),u(Rt,"href","preprocessing#audio"),u(we,"id","processor"),u(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(we,"href","#processor"),u(gs,"class","relative group"),u(Ma,"start","2"),u($e,"id","everything-you-always-wanted-to-know-about-padding-and-truncation"),u($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u($e,"href","#everything-you-always-wanted-to-know-about-padding-and-truncation"),u(Es,"class","relative group")},m(s,p){e(document.head,w),i(s,R,p),i(s,$,p),e($,x),e(x,ns),f(q,ns,null),e($,F),e($,Gs),e(Gs,oi),i(s,Uo,p),f(Ce,s,p),i(s,Mo,p),i(s,at,p),e(at,pi),i(s,Vo,p),i(s,H,p),e(H,Rn),e(Rn,ci),e(H,hi),e(H,Fn),e(Fn,ii),e(H,ui),e(H,Hn),e(Hn,mi),i(s,Ko,p),i(s,ls,p),e(ls,Us),e(Us,Bn),f(Oe,Bn,null),e(ls,di),e(ls,Jn),e(Jn,fi),i(s,Qo,p),f(Le,s,p),i(s,Xo,p),i(s,B,p),e(B,bi),e(B,tt),e(tt,ji),e(B,_i),e(B,Wn),e(Wn,gi),e(B,vi),i(s,Zo,p),f(Ms,s,p),i(s,sp,p),i(s,J,p),e(J,Ei),e(J,nt),e(nt,wi),e(J,$i),e(J,Yn),e(Yn,yi),e(J,ki),i(s,ep,p),i(s,rs,p),e(rs,Vs),e(Vs,Gn),f(Se,Gn,null),e(rs,xi),e(rs,Un),e(Un,Ti),i(s,ap,p),i(s,Ks,p),e(Ks,qi),e(Ks,lt),e(lt,Di),e(Ks,Ai),i(s,tp,p),f(Ne,s,p),i(s,np,p),i(s,rt,p),e(rt,zi),i(s,lp,p),f(Ie,s,p),i(s,rp,p),i(s,ot,p),e(ot,Pi),i(s,op,p),i(s,W,p),e(W,pt),e(pt,ct),e(ct,Ci),e(pt,Oi),e(W,Li),e(W,ht),e(ht,it),e(it,Si),e(ht,Ni),e(W,Ii),e(W,ut),e(ut,mt),e(mt,Ri),e(ut,Fi),i(s,pp,p),i(s,Qs,p),e(Qs,Hi),e(Qs,Mn),e(Mn,Bi),e(Qs,Ji),i(s,cp,p),f(Re,s,p),i(s,hp,p),i(s,Y,p),e(Y,Wi),e(Y,Vn),e(Vn,Yi),e(Y,Gi),e(Y,Kn),e(Kn,Ui),e(Y,Mi),i(s,ip,p),i(s,dt,p),e(dt,Vi),i(s,up,p),f(Fe,s,p),i(s,mp,p),i(s,os,p),e(os,Xs),e(Xs,Qn),f(He,Qn,null),e(os,Ki),e(os,Xn),e(Xn,Qi),i(s,dp,p),i(s,Zs,p),e(Zs,Xi),e(Zs,Zn),e(Zn,Zi),e(Zs,su),i(s,fp,p),i(s,G,p),e(G,eu),e(G,sl),e(sl,au),e(G,tu),e(G,el),e(el,nu),e(G,lu),i(s,bp,p),f(Be,s,p),i(s,jp,p),i(s,se,p),e(se,ru),e(se,al),e(al,ou),e(se,pu),i(s,_p,p),i(s,ps,p),e(ps,ee),e(ee,tl),f(Je,tl,null),e(ps,cu),e(ps,nl),e(nl,hu),i(s,gp,p),i(s,ft,p),e(ft,iu),i(s,vp,p),i(s,U,p),e(U,uu),e(U,ll),e(ll,mu),e(U,du),e(U,rl),e(rl,fu),e(U,bu),i(s,Ep,p),f(We,s,p),i(s,wp,p),i(s,cs,p),e(cs,ae),e(ae,ol),f(Ye,ol,null),e(cs,ju),e(cs,pl),e(pl,_u),i(s,$p,p),i(s,bt,p),e(bt,gu),i(s,yp,p),i(s,D,p),e(D,vu),e(D,cl),e(cl,Eu),e(D,wu),e(D,hl),e(hl,$u),e(D,yu),e(D,il),e(il,ku),e(D,xu),i(s,kp,p),f(Ge,s,p),i(s,xp,p),i(s,hs,p),e(hs,te),e(te,ul),f(Ue,ul,null),e(hs,Tu),e(hs,ml),e(ml,qu),i(s,Tp,p),i(s,ne,p),e(ne,Du),e(ne,jt),e(jt,Au),e(ne,zu),i(s,qp,p),f(Me,s,p),i(s,Dp,p),i(s,M,p),e(M,Pu),e(M,Ve),e(Ve,Cu),e(M,Ou),e(M,Ke),e(Ke,Lu),e(M,Su),i(s,Ap,p),f(Qe,s,p),i(s,zp,p),i(s,V,p),e(V,Nu),e(V,dl),e(dl,Iu),e(V,Ru),e(V,fl),e(fl,Fu),e(V,Hu),i(s,Pp,p),f(Xe,s,p),i(s,Cp,p),i(s,_t,p),e(_t,Bu),i(s,Op,p),i(s,K,p),e(K,gt),e(gt,bl),e(bl,Ju),e(gt,Wu),e(K,Yu),e(K,vt),e(vt,jl),e(jl,Gu),e(vt,Uu),e(K,Mu),e(K,Et),e(Et,_l),e(_l,Vu),e(Et,Ku),i(s,Lp,p),i(s,is,p),e(is,le),e(le,gl),f(Ze,gl,null),e(is,Qu),e(is,vl),e(vl,Xu),i(s,Sp,p),i(s,re,p),e(re,Zu),e(re,sa),e(sa,sm),e(re,em),i(s,Np,p),i(s,oe,p),e(oe,am),e(oe,ea),e(ea,tm),e(oe,nm),i(s,Ip,p),f(aa,s,p),i(s,Rp,p),i(s,wt,p),e(wt,ta),e(ta,lm),e(ta,na),e(na,El),e(El,rm),e(ta,om),i(s,Fp,p),f(la,s,p),i(s,Hp,p),i(s,ra,p),e(ra,wl),e(wl,pm),i(s,Bp,p),f(oa,s,p),i(s,Jp,p),i(s,pe,p),e(pe,cm),e(pe,$l),e($l,hm),e(pe,im),i(s,Wp,p),i(s,us,p),e(us,ce),e(ce,yl),f(pa,yl,null),e(us,um),e(us,kl),e(kl,mm),i(s,Yp,p),i(s,A,p),e(A,dm),e(A,xl),e(xl,fm),e(A,bm),e(A,Tl),e(Tl,jm),e(A,_m),e(A,ql),e(ql,gm),e(A,vm),i(s,Gp,p),i(s,he,p),e(he,Em),e(he,$t),e($t,wm),e(he,$m),i(s,Up,p),f(ca,s,p),i(s,Mp,p),i(s,Q,p),e(Q,ym),e(Q,Dl),e(Dl,km),e(Q,xm),e(Q,Al),e(Al,Tm),e(Q,qm),i(s,Vp,p),f(ha,s,p),i(s,Kp,p),i(s,ms,p),e(ms,ie),e(ie,zl),f(ia,zl,null),e(ms,Dm),e(ms,Pl),e(Pl,Am),i(s,Qp,p),i(s,yt,p),e(yt,zm),i(s,Xp,p),f(ua,s,p),i(s,Zp,p),i(s,kt,p),e(kt,Pm),i(s,sc,p),f(ma,s,p),i(s,ec,p),i(s,xt,p),e(xt,Cm),i(s,ac,p),f(da,s,p),i(s,tc,p),i(s,Tt,p),e(Tt,Om),i(s,nc,p),f(fa,s,p),i(s,lc,p),i(s,qt,p),e(qt,Lm),i(s,rc,p),i(s,ds,p),e(ds,ue),e(ue,Cl),f(ba,Cl,null),e(ds,Sm),e(ds,Ol),e(Ol,Nm),i(s,oc,p),i(s,Dt,p),e(Dt,Im),i(s,pc,p),i(s,X,p),e(X,Rm),e(X,ja),e(ja,Fm),e(X,Hm),e(X,Ll),e(Ll,Bm),e(X,Jm),i(s,cc,p),f(_a,s,p),i(s,hc,p),i(s,me,p),e(me,Wm),e(me,ga),e(ga,Sl),e(Sl,Ym),e(me,Gm),i(s,ic,p),f(va,s,p),i(s,uc,p),i(s,At,p),e(At,zt),i(s,mc,p),i(s,fs,p),e(fs,de),e(de,Nl),f(Ea,Nl,null),e(fs,Um),e(fs,Il),e(Il,Mm),i(s,dc,p),i(s,fe,p),e(fe,Vm),e(fe,Pt),e(Pt,Km),e(fe,Qm),i(s,fc,p),f(wa,s,p),i(s,bc,p),i(s,bs,p),e(bs,be),e(be,Rl),f($a,Rl,null),e(bs,Xm),e(bs,Fl),e(Fl,Zm),i(s,jc,p),i(s,je,p),e(je,sd),e(je,ya),e(ya,Hl),e(Hl,ed),e(je,ad),i(s,_c,p),i(s,Ct,p),e(Ct,S),e(S,td),e(S,ka),e(ka,Bl),e(Bl,nd),e(S,ld),e(S,xa),e(xa,Jl),e(Jl,rd),e(S,od),e(S,Ta),e(Ta,Wl),e(Wl,pd),e(S,cd),i(s,gc,p),f(qa,s,p),i(s,vc,p),i(s,Da,p),e(Da,js),e(js,hd),e(js,Ot),e(Ot,Yl),e(Yl,id),e(js,ud),e(js,Gl),e(Gl,md),e(js,dd),i(s,Ec,p),f(Aa,s,p),i(s,wc,p),i(s,za,p),e(za,Pa),e(Pa,fd),e(Pa,Ca),e(Ca,Ul),e(Ul,bd),e(Pa,jd),i(s,$c,p),f(Oa,s,p),i(s,yc,p),i(s,La,p),e(La,Sa),e(Sa,_d),e(Sa,Ml),e(Ml,gd),e(Sa,vd),i(s,kc,p),f(Na,s,p),i(s,xc,p),i(s,Lt,p),e(Lt,Ed),i(s,Tc,p),f(Ia,s,p),i(s,qc,p),i(s,St,p),e(St,Nt),i(s,Dc,p),i(s,_s,p),e(_s,_e),e(_e,Vl),f(Ra,Vl,null),e(_s,wd),e(_s,Kl),e(Kl,$d),i(s,Ac,p),i(s,It,p),e(It,yd),i(s,zc,p),i(s,ge,p),e(ge,Ql),e(Ql,kd),e(ge,xd),e(ge,Xl),e(Xl,Td),i(s,Pc,p),i(s,ve,p),e(ve,qd),e(ve,Fa),e(Fa,Dd),e(ve,Ad),i(s,Cc,p),f(Ha,s,p),i(s,Oc,p),i(s,Z,p),e(Z,zd),e(Z,Zl),e(Zl,Pd),e(Z,Cd),e(Z,sr),e(sr,Od),e(Z,Ld),i(s,Lc,p),f(Ba,s,p),i(s,Sc,p),i(s,ss,p),e(ss,Sd),e(ss,er),e(er,Nd),e(ss,Id),e(ss,ar),e(ar,Rd),e(ss,Fd),i(s,Nc,p),f(Ja,s,p),i(s,Ic,p),i(s,Ee,p),e(Ee,Hd),e(Ee,Rt),e(Rt,Bd),e(Ee,Jd),i(s,Rc,p),f(Wa,s,p),i(s,Fc,p),i(s,gs,p),e(gs,we),e(we,tr),f(Ya,tr,null),e(gs,Wd),e(gs,nr),e(nr,Yd),i(s,Hc,p),i(s,Ft,p),e(Ft,Gd),i(s,Bc,p),f(Ga,s,p),i(s,Jc,p),i(s,Ht,p),e(Ht,vs),e(vs,Ud),e(vs,lr),e(lr,Md),e(vs,Vd),e(vs,rr),e(rr,Kd),e(vs,Qd),i(s,Wc,p),f(Ua,s,p),i(s,Yc,p),i(s,Ma,p),e(Ma,Va),e(Va,Xd),e(Va,or),e(or,Zd),e(Va,sf),i(s,Gc,p),f(Ka,s,p),i(s,Uc,p),i(s,es,p),e(es,ef),e(es,pr),e(pr,af),e(es,tf),e(es,cr),e(cr,nf),e(es,lf),i(s,Mc,p),i(s,Bt,p),e(Bt,rf),i(s,Vc,p),i(s,Es,p),e(Es,$e),e($e,hr),f(Qa,hr,null),e(Es,of),e(Es,ir),e(ir,pf),i(s,Kc,p),i(s,z,p),e(z,cf),e(z,ur),e(ur,hf),e(z,uf),e(z,mr),e(mr,mf),e(z,df),e(z,dr),e(dr,ff),e(z,bf),i(s,Qc,p),i(s,as,p),e(as,Xa),e(Xa,Jt),e(Jt,fr),e(fr,jf),e(Jt,_f),e(Xa,gf),e(Xa,ws),e(ws,ye),e(ye,br),e(br,vf),e(ye,Ef),e(ye,jr),e(jr,wf),e(ye,$f),e(ws,yf),e(ws,P),e(P,_r),e(_r,kf),e(P,xf),e(P,gr),e(gr,Tf),e(P,qf),e(P,vr),e(vr,Df),e(P,Af),e(P,Er),e(Er,zf),e(P,Pf),e(ws,Cf),e(ws,ke),e(ke,wr),e(wr,Of),e(ke,Lf),e(ke,$r),e($r,Sf),e(ke,Nf),e(as,If),e(as,Za),e(Za,Wt),e(Wt,yr),e(yr,Rf),e(Wt,Ff),e(Za,Hf),e(Za,N),e(N,T),e(T,kr),e(kr,Bf),e(T,Jf),e(T,xr),e(xr,Wf),e(T,Yf),e(T,Tr),e(Tr,Gf),e(T,Uf),e(T,qr),e(qr,Mf),e(T,Vf),e(T,Dr),e(Dr,Kf),e(T,Qf),e(N,Xf),e(N,C),e(C,Ar),e(Ar,Zf),e(C,sb),e(C,zr),e(zr,eb),e(C,ab),e(C,Pr),e(Pr,tb),e(C,nb),e(C,Cr),e(Cr,lb),e(C,rb),e(N,ob),e(N,O),e(O,Or),e(Or,pb),e(O,cb),e(O,Lr),e(Lr,hb),e(O,ib),e(O,Sr),e(Sr,ub),e(O,mb),e(O,Nr),e(Nr,db),e(O,fb),e(N,bb),e(N,xe),e(xe,Ir),e(Ir,jb),e(xe,_b),e(xe,Rr),e(Rr,gb),e(xe,vb),e(as,Eb),e(as,Fr),e(Fr,ts),e(ts,Hr),e(Hr,wb),e(ts,$b),e(ts,Br),e(Br,yb),e(ts,kb),e(ts,Jr),e(Jr,xb),e(ts,Tb),i(s,Xc,p),i(s,y,p),e(y,qb),e(y,Wr),e(Wr,Db),e(y,Ab),e(y,Yr),e(Yr,zb),e(y,Pb),e(y,Gr),e(Gr,Cb),e(y,Ob),e(y,Ur),e(Ur,Lb),e(y,Sb),e(y,Mr),e(Mr,Nb),e(y,Ib),i(s,Zc,p),i(s,Te,p),e(Te,Vr),e(Vr,$s),e($s,Kr),e(Kr,Rb),e($s,Fb),e($s,Qr),e(Qr,Hb),e($s,Bb),e($s,Xr),e(Xr,Jb),e(Te,Wb),e(Te,g),e(g,ys),e(ys,Zr),e(Zr,Yb),e(ys,Gb),e(ys,so),e(so,Ub),e(ys,Mb),e(ys,eo),e(eo,ao),e(ao,Vb),e(g,Kb),e(g,ks),e(ks,sh),e(ks,Qb),e(ks,to),e(to,Xb),e(ks,Zb),e(ks,Yt),e(Yt,no),e(no,sj),e(Yt,ej),e(g,aj),e(g,xs),e(xs,eh),e(xs,tj),e(xs,ah),e(xs,nj),e(xs,lo),e(lo,ro),e(ro,lj),e(g,rj),e(g,Ts),e(Ts,th),e(Ts,oj),e(Ts,oo),e(oo,pj),e(Ts,cj),e(Ts,po),e(po,co),e(co,hj),e(g,ij),e(g,qs),e(qs,nh),e(qs,uj),e(qs,ho),e(ho,mj),e(qs,dj),e(qs,io),e(io,uo),e(uo,fj),e(g,bj),e(g,Ds),e(Ds,mo),e(mo,jj),e(Ds,_j),e(Ds,fo),e(fo,gj),e(Ds,vj),e(Ds,Gt),e(Gt,bo),e(bo,Ej),e(Gt,wj),e(g,$j),e(g,As),e(As,lh),e(As,yj),e(As,rh),e(As,kj),e(As,jo),e(jo,_o),e(_o,xj),e(g,Tj),e(g,zs),e(zs,oh),e(zs,qj),e(zs,go),e(go,Dj),e(zs,Aj),e(zs,Ut),e(Ut,vo),e(vo,zj),e(Ut,Pj),e(g,Cj),e(g,Ps),e(Ps,ph),e(Ps,Oj),e(Ps,ch),e(Ps,Lj),e(Ps,Eo),e(Eo,wo),e(wo,Sj),e(g,Nj),e(g,Cs),e(Cs,hh),e(Cs,Ij),e(Cs,$o),e($o,Rj),e(Cs,Fj),e(Cs,Mt),e(Mt,yo),e(yo,Hj),e(Mt,Bj),e(g,Jj),e(g,Os),e(Os,ih),e(Os,Wj),e(Os,uh),e(Os,Yj),e(Os,ko),e(ko,xo),e(xo,Gj),e(g,Uj),e(g,Ls),e(Ls,mh),e(Ls,Mj),e(Ls,To),e(To,Vj),e(Ls,Kj),e(Ls,qo),e(qo,Qj),e(g,Xj),e(g,Ss),e(Ss,Do),e(Do,Zj),e(Ss,s1),e(Ss,Ao),e(Ao,e1),e(Ss,a1),e(Ss,Vt),e(Vt,zo),e(zo,t1),e(Vt,n1),e(g,l1),e(g,Ns),e(Ns,dh),e(Ns,r1),e(Ns,fh),e(Ns,o1),e(Ns,Po),e(Po,Co),e(Co,p1),e(g,c1),e(g,Is),e(Is,bh),e(Is,h1),e(Is,Oo),e(Oo,i1),e(Is,u1),e(Is,Kt),e(Kt,Lo),e(Lo,m1),e(Kt,d1),e(g,f1),e(g,Rs),e(Rs,jh),e(Rs,b1),e(Rs,_h),e(Rs,j1),e(Rs,So),e(So,No),e(No,_1),e(g,g1),e(g,Fs),e(Fs,gh),e(Fs,v1),e(Fs,Io),e(Io,E1),e(Fs,w1),e(Fs,Ro),e(Ro,$1),e(g,y1),e(g,Hs),e(Hs,vh),e(Hs,k1),e(Hs,Fo),e(Fo,x1),e(Hs,T1),e(Hs,Qt),e(Qt,Ho),e(Ho,q1),e(Qt,D1),e(g,A1),e(g,Bs),e(Bs,Eh),e(Bs,z1),e(Bs,wh),e(Bs,P1),e(Bs,Bo),e(Bo,Jo),e(Jo,C1),$h=!0},p(s,[p]){const st={};p&2&&(st.$$scope={dirty:p,ctx:s}),Ms.$set(st)},i(s){$h||(b(q.$$.fragment,s),b(Ce.$$.fragment,s),b(Oe.$$.fragment,s),b(Le.$$.fragment,s),b(Ms.$$.fragment,s),b(Se.$$.fragment,s),b(Ne.$$.fragment,s),b(Ie.$$.fragment,s),b(Re.$$.fragment,s),b(Fe.$$.fragment,s),b(He.$$.fragment,s),b(Be.$$.fragment,s),b(Je.$$.fragment,s),b(We.$$.fragment,s),b(Ye.$$.fragment,s),b(Ge.$$.fragment,s),b(Ue.$$.fragment,s),b(Me.$$.fragment,s),b(Qe.$$.fragment,s),b(Xe.$$.fragment,s),b(Ze.$$.fragment,s),b(aa.$$.fragment,s),b(la.$$.fragment,s),b(oa.$$.fragment,s),b(pa.$$.fragment,s),b(ca.$$.fragment,s),b(ha.$$.fragment,s),b(ia.$$.fragment,s),b(ua.$$.fragment,s),b(ma.$$.fragment,s),b(da.$$.fragment,s),b(fa.$$.fragment,s),b(ba.$$.fragment,s),b(_a.$$.fragment,s),b(va.$$.fragment,s),b(Ea.$$.fragment,s),b(wa.$$.fragment,s),b($a.$$.fragment,s),b(qa.$$.fragment,s),b(Aa.$$.fragment,s),b(Oa.$$.fragment,s),b(Na.$$.fragment,s),b(Ia.$$.fragment,s),b(Ra.$$.fragment,s),b(Ha.$$.fragment,s),b(Ba.$$.fragment,s),b(Ja.$$.fragment,s),b(Wa.$$.fragment,s),b(Ya.$$.fragment,s),b(Ga.$$.fragment,s),b(Ua.$$.fragment,s),b(Ka.$$.fragment,s),b(Qa.$$.fragment,s),$h=!0)},o(s){j(q.$$.fragment,s),j(Ce.$$.fragment,s),j(Oe.$$.fragment,s),j(Le.$$.fragment,s),j(Ms.$$.fragment,s),j(Se.$$.fragment,s),j(Ne.$$.fragment,s),j(Ie.$$.fragment,s),j(Re.$$.fragment,s),j(Fe.$$.fragment,s),j(He.$$.fragment,s),j(Be.$$.fragment,s),j(Je.$$.fragment,s),j(We.$$.fragment,s),j(Ye.$$.fragment,s),j(Ge.$$.fragment,s),j(Ue.$$.fragment,s),j(Me.$$.fragment,s),j(Qe.$$.fragment,s),j(Xe.$$.fragment,s),j(Ze.$$.fragment,s),j(aa.$$.fragment,s),j(la.$$.fragment,s),j(oa.$$.fragment,s),j(pa.$$.fragment,s),j(ca.$$.fragment,s),j(ha.$$.fragment,s),j(ia.$$.fragment,s),j(ua.$$.fragment,s),j(ma.$$.fragment,s),j(da.$$.fragment,s),j(fa.$$.fragment,s),j(ba.$$.fragment,s),j(_a.$$.fragment,s),j(va.$$.fragment,s),j(Ea.$$.fragment,s),j(wa.$$.fragment,s),j($a.$$.fragment,s),j(qa.$$.fragment,s),j(Aa.$$.fragment,s),j(Oa.$$.fragment,s),j(Na.$$.fragment,s),j(Ia.$$.fragment,s),j(Ra.$$.fragment,s),j(Ha.$$.fragment,s),j(Ba.$$.fragment,s),j(Ja.$$.fragment,s),j(Wa.$$.fragment,s),j(Ya.$$.fragment,s),j(Ga.$$.fragment,s),j(Ua.$$.fragment,s),j(Ka.$$.fragment,s),j(Qa.$$.fragment,s),$h=!1},d(s){a(w),s&&a(R),s&&a($),_(q),s&&a(Uo),_(Ce,s),s&&a(Mo),s&&a(at),s&&a(Vo),s&&a(H),s&&a(Ko),s&&a(ls),_(Oe),s&&a(Qo),_(Le,s),s&&a(Xo),s&&a(B),s&&a(Zo),_(Ms,s),s&&a(sp),s&&a(J),s&&a(ep),s&&a(rs),_(Se),s&&a(ap),s&&a(Ks),s&&a(tp),_(Ne,s),s&&a(np),s&&a(rt),s&&a(lp),_(Ie,s),s&&a(rp),s&&a(ot),s&&a(op),s&&a(W),s&&a(pp),s&&a(Qs),s&&a(cp),_(Re,s),s&&a(hp),s&&a(Y),s&&a(ip),s&&a(dt),s&&a(up),_(Fe,s),s&&a(mp),s&&a(os),_(He),s&&a(dp),s&&a(Zs),s&&a(fp),s&&a(G),s&&a(bp),_(Be,s),s&&a(jp),s&&a(se),s&&a(_p),s&&a(ps),_(Je),s&&a(gp),s&&a(ft),s&&a(vp),s&&a(U),s&&a(Ep),_(We,s),s&&a(wp),s&&a(cs),_(Ye),s&&a($p),s&&a(bt),s&&a(yp),s&&a(D),s&&a(kp),_(Ge,s),s&&a(xp),s&&a(hs),_(Ue),s&&a(Tp),s&&a(ne),s&&a(qp),_(Me,s),s&&a(Dp),s&&a(M),s&&a(Ap),_(Qe,s),s&&a(zp),s&&a(V),s&&a(Pp),_(Xe,s),s&&a(Cp),s&&a(_t),s&&a(Op),s&&a(K),s&&a(Lp),s&&a(is),_(Ze),s&&a(Sp),s&&a(re),s&&a(Np),s&&a(oe),s&&a(Ip),_(aa,s),s&&a(Rp),s&&a(wt),s&&a(Fp),_(la,s),s&&a(Hp),s&&a(ra),s&&a(Bp),_(oa,s),s&&a(Jp),s&&a(pe),s&&a(Wp),s&&a(us),_(pa),s&&a(Yp),s&&a(A),s&&a(Gp),s&&a(he),s&&a(Up),_(ca,s),s&&a(Mp),s&&a(Q),s&&a(Vp),_(ha,s),s&&a(Kp),s&&a(ms),_(ia),s&&a(Qp),s&&a(yt),s&&a(Xp),_(ua,s),s&&a(Zp),s&&a(kt),s&&a(sc),_(ma,s),s&&a(ec),s&&a(xt),s&&a(ac),_(da,s),s&&a(tc),s&&a(Tt),s&&a(nc),_(fa,s),s&&a(lc),s&&a(qt),s&&a(rc),s&&a(ds),_(ba),s&&a(oc),s&&a(Dt),s&&a(pc),s&&a(X),s&&a(cc),_(_a,s),s&&a(hc),s&&a(me),s&&a(ic),_(va,s),s&&a(uc),s&&a(At),s&&a(mc),s&&a(fs),_(Ea),s&&a(dc),s&&a(fe),s&&a(fc),_(wa,s),s&&a(bc),s&&a(bs),_($a),s&&a(jc),s&&a(je),s&&a(_c),s&&a(Ct),s&&a(gc),_(qa,s),s&&a(vc),s&&a(Da),s&&a(Ec),_(Aa,s),s&&a(wc),s&&a(za),s&&a($c),_(Oa,s),s&&a(yc),s&&a(La),s&&a(kc),_(Na,s),s&&a(xc),s&&a(Lt),s&&a(Tc),_(Ia,s),s&&a(qc),s&&a(St),s&&a(Dc),s&&a(_s),_(Ra),s&&a(Ac),s&&a(It),s&&a(zc),s&&a(ge),s&&a(Pc),s&&a(ve),s&&a(Cc),_(Ha,s),s&&a(Oc),s&&a(Z),s&&a(Lc),_(Ba,s),s&&a(Sc),s&&a(ss),s&&a(Nc),_(Ja,s),s&&a(Ic),s&&a(Ee),s&&a(Rc),_(Wa,s),s&&a(Fc),s&&a(gs),_(Ya),s&&a(Hc),s&&a(Ft),s&&a(Bc),_(Ga,s),s&&a(Jc),s&&a(Ht),s&&a(Wc),_(Ua,s),s&&a(Yc),s&&a(Ma),s&&a(Gc),_(Ka,s),s&&a(Uc),s&&a(es),s&&a(Mc),s&&a(Bt),s&&a(Vc),s&&a(Es),_(Qa),s&&a(Kc),s&&a(z),s&&a(Qc),s&&a(as),s&&a(Xc),s&&a(y),s&&a(Zc),s&&a(Te)}}}const Tv={local:"preprocess",sections:[{local:"nlp",sections:[{local:"tokenize",title:"Tokenize"},{local:"pad",title:"Pad"},{local:"truncation",title:"Truncation"},{local:"build-tensors",title:"Build tensors"}],title:"NLP"},{local:"audio",sections:[{local:"resample",title:"Resample"},{local:"feature-extractor",title:"Feature extractor"},{local:"pad-and-truncate",title:"Pad and truncate"}],title:"Audio"},{local:"vision",sections:[{local:"feature-extractor",title:"Feature extractor"},{local:"data-augmentation",title:"Data augmentation"}],title:"Vision"},{local:"multimodal",sections:[{local:"processor",title:"Processor"}],title:"Multimodal"},{local:"everything-you-always-wanted-to-know-about-padding-and-truncation",title:"Everything you always wanted to know about padding and truncation"}],title:"Preprocess"};function qv(In,w,R){let{fw:$}=w;return In.$$set=x=>{"fw"in x&&R(0,$=x.fw)},[$]}class Nv extends jv{constructor(w){super();_v(this,w,qv,xv,gv,{fw:0})}}export{Nv as default,Tv as metadata};
254
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/glossary.mdx-04c6e6d1.js
import{S as xh,i as Ph,s as Ah,e as l,k as h,w as m,t as n,M as Ih,c as r,d as s,m as d,a as i,x as b,h as a,b as c,F as t,g as p,y as _,L as Ch,q as k,o as g,B as v}from"../chunks/vendor-4833417e.js";import{Y as $i}from"../chunks/Youtube-27813aed.js";import{I as B}from"../chunks/IconCopyLink-4b81c553.js";import{C as E}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function Dh(Xa){let I,Je,q,A,Vs,ge,Ya,Us,Qa,an,L,Q,Xs,ve,Ja,Ys,Ka,on,u,Qs,Za,eo,Js,so,to,Ks,no,ao,Zs,oo,lo,et,ro,io,st,po,ho,tt,co,uo,nt,fo,mo,at,bo,_o,ot,ko,go,lt,vo,wo,rt,yo,$o,S,Eo,Ke,jo,qo,Ze,To,zo,xo,it,Po,Ao,pt,Io,ln,F,J,ht,we,Co,dt,Do,rn,es,Mo,pn,ss,hn,N,K,ct,ye,Bo,ut,Lo,dn,Z,So,ft,Fo,No,cn,$e,un,ee,Ro,Ee,Oo,Ho,fn,je,mn,ts,Go,bn,qe,_n,ns,Wo,kn,Te,gn,se,Vo,ze,Uo,Xo,vn,xe,wn,as,Yo,yn,Pe,$n,os,Qo,En,ls,Jo,jn,Ae,qn,rs,Ko,Tn,Ie,zn,te,Zo,is,el,sl,xn,ps,Pn,R,ne,mt,Ce,tl,bt,nl,An,hs,al,In,De,Cn,ds,ol,Dn,cs,ll,Mn,Me,Bn,us,rl,Ln,Be,Sn,fs,il,Fn,ms,pl,Nn,Le,Rn,bs,hl,On,Se,Hn,T,dl,_s,cl,ul,_t,fl,ml,kt,bl,_l,Gn,Fe,Wn,ks,Vn,O,ae,gt,Ne,kl,vt,gl,Un,gs,vl,Xn,Re,Yn,C,wl,wt,yl,$l,yt,El,jl,Qn,Oe,Jn,oe,ql,$t,Tl,zl,Kn,He,Zn,vs,xl,ea,Ge,sa,ws,Pl,ta,ys,Al,na,We,aa,D,Il,Et,Cl,Dl,jt,Ml,Bl,oa,M,Ll,$s,Sl,Fl,qt,Nl,Rl,la,Es,ra,H,le,Tt,Ve,Ol,zt,Hl,ia,re,Gl,xt,Wl,Vl,pa,ie,Ul,Pt,Xl,Yl,ha,pe,Ql,At,Jl,Kl,da,js,ca,G,he,It,Ue,Zl,Ct,er,ua,qs,sr,fa,Ts,tr,ma,z,W,nr,zs,ar,or,Dt,lr,rr,ir,V,pr,xs,hr,dr,Mt,cr,ur,fr,U,mr,Ps,br,_r,Bt,kr,gr,vr,$,wr,As,yr,$r,Is,Er,jr,Lt,qr,Tr,St,zr,xr,Ft,Pr,Ar,Nt,Ir,Cr,ba,de,Dr,Cs,Mr,Br,_a,Ds,ka,X,ce,Rt,Xe,Lr,Ot,Sr,ga,Ms,Fr,va,x,Nr,Ht,Rr,Or,Gt,Hr,Gr,Wt,Wr,Vr,wa,Bs,Ur,ya,Ls,$a,Y,ue,Vt,Ye,Xr,Ut,Yr,Ea,fe,Qr,Xt,Jr,Kr,ja,w,Zr,Yt,ei,si,Qt,ti,ni,Qe,ai,oi,Jt,li,ri,Kt,ii,pi,Zt,hi,di,en,ci,ui,sn,fi,mi,qa,P,bi,Ss,_i,ki,tn,gi,vi,nn,wi,yi,Ta;return ge=new B({}),ve=new B({}),we=new B({}),ye=new B({}),$e=new $i({props:{id:"VFp38yj8h3A"}}),je=new E({props:{code:`from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-cased") sequence = "A Titan RTX has 24GB of VRAM"`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = <span class="hljs-string">&quot;A Titan RTX has 24GB of VRAM&quot;</span>`}}),qe=new E({props:{code:"tokenized_sequence = tokenizer.tokenize(sequence)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_sequence = tokenizer.tokenize(sequence)'}}),Te=new E({props:{code:"print(tokenized_sequence)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenized_sequence) [<span class="hljs-string">&#x27;A&#x27;</span>, <span class="hljs-string">&#x27;Titan&#x27;</span>, <span class="hljs-string">&#x27;R&#x27;</span>, <span class="hljs-string">&#x27;##T&#x27;</span>, <span class="hljs-string">&#x27;##X&#x27;</span>, <span class="hljs-string">&#x27;has&#x27;</span>, <span class="hljs-string">&#x27;24&#x27;</span>, <span class="hljs-string">&#x27;##GB&#x27;</span>, <span class="hljs-string">&#x27;of&#x27;</span>, <span class="hljs-string">&#x27;V&#x27;</span>, <span class="hljs-string">&#x27;##RA&#x27;</span>, <span class="hljs-string">&#x27;##M&#x27;</span>]`}}),xe=new E({props:{code:"inputs = tokenizer(sequence)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(sequence)'}}),Pe=new E({props:{code:`encoded_sequence = inputs["input_ids"] print(encoded_sequence)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_sequence = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoded_sequence) [<span class="hljs-number">101</span>, <span class="hljs-number">138</span>, <span class="hljs-number">18696</span>, <span class="hljs-number">155</span>, <span class="hljs-number">1942</span>, <span class="hljs-number">3190</span>, <span class="hljs-number">1144</span>, <span class="hljs-number">1572</span>, <span class="hljs-number">13745</span>, <span class="hljs-number">1104</span>, <span class="hljs-number">159</span>, <span class="hljs-number">9664</span>, <span class="hljs-number">2107</span>, <span class="hljs-number">102</span>]`}}),Ae=new E({props:{code:"decoded_sequence = tokenizer.decode(encoded_sequence)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>decoded_sequence = tokenizer.decode(encoded_sequence)'}}),Ie=new E({props:{code:"print(decoded_sequence)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(decoded_sequence) [CLS] A Titan RTX has 24GB of VRAM [SEP]`}}),Ce=new B({}),De=new $i({props:{id:"M6adb1j2jPI"}}),Me=new E({props:{code:`from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-cased") sequence_a = "This is a short sequence." sequence_b = "This is a rather long sequence. It is at least longer than the sequence A." encoded_sequence_a = tokenizer(sequence_a)["input_ids"] encoded_sequence_b = tokenizer(sequence_b)["input_ids"]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_a = <span class="hljs-string">&quot;This is a short sequence.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_b = <span class="hljs-string">&quot;This is a rather long sequence. It is at least longer than the sequence A.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_sequence_a = tokenizer(sequence_a)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_sequence_b = tokenizer(sequence_b)[<span class="hljs-string">&quot;input_ids&quot;</span>]`}}),Be=new E({props:{code:"len(encoded_sequence_a), len(encoded_sequence_b)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">len</span>(encoded_sequence_a), <span class="hljs-built_in">len</span>(encoded_sequence_b) (<span class="hljs-number">8</span>, <span class="hljs-number">19</span>)`}}),Le=new E({props:{code:"padded_sequences = tokenizer([sequence_a, sequence_b], padding=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>padded_sequences = tokenizer([sequence_a, sequence_b], padding=<span class="hljs-literal">True</span>)'}}),Se=new E({props:{code:'padded_sequences["input_ids"]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>padded_sequences[<span class="hljs-string">&quot;input_ids&quot;</span>] [[<span class="hljs-number">101</span>, <span class="hljs-number">1188</span>, <span class="hljs-number">1110</span>, <span class="hljs-number">170</span>, <span class="hljs-number">1603</span>, <span class="hljs-number">4954</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">101</span>, <span class="hljs-number">1188</span>, <span class="hljs-number">1110</span>, <span class="hljs-number">170</span>, <span class="hljs-number">1897</span>, <span class="hljs-number">1263</span>, <span class="hljs-number">4954</span>, <span class="hljs-number">119</span>, <span class="hljs-number">1135</span>, <span class="hljs-number">1110</span>, <span class="hljs-number">1120</span>, <span class="hljs-number">1655</span>, <span class="hljs-number">2039</span>, <span class="hljs-number">1190</span>, <span class="hljs-number">1103</span>, <span class="hljs-number">4954</span>, <span class="hljs-number">138</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>]]`}}),Fe=new E({props:{code:'padded_sequences["attention_mask"]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>padded_sequences[<span class="hljs-string">&quot;attention_mask&quot;</span>] [[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]]`}}),Ne=new B({}),Re=new $i({props:{id:"0u3ioSwev3s"}}),Oe=new E({props:{code:"# [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]</span>'}}),He=new E({props:{code:`from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-cased") sequence_a = "HuggingFace is based in NYC" sequence_b = "Where is HuggingFace based?" encoded_dict = tokenizer(sequence_a, sequence_b) decoded = tokenizer.decode(encoded_dict["input_ids"])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_a = <span class="hljs-string">&quot;HuggingFace is based in NYC&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_b = <span class="hljs-string">&quot;Where is HuggingFace based?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_dict = tokenizer(sequence_a, sequence_b) <span class="hljs-meta">&gt;&gt;&gt; </span>decoded = tokenizer.decode(encoded_dict[<span class="hljs-string">&quot;input_ids&quot;</span>])`}}),Ge=new E({props:{code:"print(decoded)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(decoded) [CLS] HuggingFace <span class="hljs-keyword">is</span> based <span class="hljs-keyword">in</span> NYC [SEP] Where <span class="hljs-keyword">is</span> HuggingFace based? [SEP]`}}),We=new E({props:{code:'encoded_dict["token_type_ids"]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_dict[<span class="hljs-string">&quot;token_type_ids&quot;</span>] [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]`}}),Ve=new B({}),Ue=new B({}),Xe=new B({}),Ye=new B({}),{c(){I=l("meta"),Je=h(),q=l("h1"),A=l("a"),Vs=l("span"),m(ge.$$.fragment),Ya=h(),Us=l("span"),Qa=n("Glossary"),an=h(),L=l("h2"),Q=l("a"),Xs=l("span"),m(ve.$$.fragment),Ja=h(),Ys=l("span"),Ka=n("General terms"),on=h(),u=l("ul"),Qs=l("li"),Za=n("autoencoding models: see MLM"),eo=h(),Js=l("li"),so=n("autoregressive models: see CLM"),to=h(),Ks=l("li"),no=n(`CLM: causal language modeling, a pretraining task where the model reads the texts in order and has to predict the next word. It\u2019s usually done by reading the whole sentence but using a mask inside the model to hide the future tokens at a certain timestep.`),ao=h(),Zs=l("li"),oo=n("deep learning: machine learning algorithms which uses neural networks with several layers."),lo=h(),et=l("li"),ro=n(`MLM: masked language modeling, a pretraining task where the model sees a corrupted version of the texts, usually done by masking some tokens randomly, and has to predict the original text.`),io=h(),st=l("li"),po=n("multimodal: a task that combines texts with another kind of inputs (for instance images)."),ho=h(),tt=l("li"),co=n(`NLG: natural language generation, all tasks related to generating text (for instance talk with transformers, translation).`),uo=h(),nt=l("li"),fo=n("NLP: natural language processing, a generic way to say \u201Cdeal with texts\u201D."),mo=h(),at=l("li"),bo=n(`NLU: natural language understanding, all tasks related to understanding what is in a text (for instance classifying the whole text, individual words).`),_o=h(),ot=l("li"),ko=n(`pretrained model: a model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods involve a self-supervised objective, which can be reading the text and trying to predict the next word (see CLM) or masking some words and trying to predict them (see MLM).`),go=h(),lt=l("li"),vo=n("RNN: recurrent neural network, a type of model that uses a loop over a layer to process texts."),wo=h(),rt=l("li"),yo=n("self-attention: each element of the input finds out which other elements of the input they should attend to."),$o=h(),S=l("li"),Eo=n(`seq2seq or sequence-to-sequence: models that generate a new sequence from an input, like translation models, or summarization models (such as `),Ke=l("a"),jo=n("Bart"),qo=n(" or "),Ze=l("a"),To=n("T5"),zo=n(")."),xo=h(),it=l("li"),Po=n(`token: a part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords) or a punctuation symbol.`),Ao=h(),pt=l("li"),Io=n("transformer: self-attention based deep learning model architecture."),ln=h(),F=l("h2"),J=l("a"),ht=l("span"),m(we.$$.fragment),Co=h(),dt=l("span"),Do=n("Model inputs"),rn=h(),es=l("p"),Mo=n(`Every model is different yet bears similarities with the others. Therefore most models use the same inputs, which are detailed here alongside usage examples.`),pn=h(),ss=l("a"),hn=h(),N=l("h3"),K=l("a"),ct=l("span"),m(ye.$$.fragment),Bo=h(),ut=l("span"),Lo=n("Input IDs"),dn=h(),Z=l("p"),So=n("The input ids are often the only required parameters to be passed to the model as input. "),ft=l("em"),Fo=n(`They are token indices, numerical representations of tokens building the sequences that will be used as input by the model`),No=n("."),cn=h(),m($e.$$.fragment),un=h(),ee=l("p"),Ro=n(`Each tokenizer works differently but the underlying mechanism remains the same. Here\u2019s an example using the BERT tokenizer, which is a `),Ee=l("a"),Oo=n("WordPiece"),Ho=n(" tokenizer:"),fn=h(),m(je.$$.fragment),mn=h(),ts=l("p"),Go=n("The tokenizer takes care of splitting the sequence into tokens available in the tokenizer vocabulary."),bn=h(),m(qe.$$.fragment),_n=h(),ns=l("p"),Wo=n(`The tokens are either words or subwords. Here for instance, \u201CVRAM\u201D wasn\u2019t in the model vocabulary, so it\u2019s been split in \u201CV\u201D, \u201CRA\u201D and \u201CM\u201D. To indicate those tokens are not separate words but parts of the same word, a double-hash prefix is added for \u201CRA\u201D and \u201CM\u201D:`),kn=h(),m(Te.$$.fragment),gn=h(),se=l("p"),Vo=n(`These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding the sentence to the tokenizer, which leverages the Rust implementation of `),ze=l("a"),Uo=n("\u{1F917} Tokenizers"),Xo=n(" for peak performance."),vn=h(),m(xe.$$.fragment),wn=h(),as=l("p"),Yo=n(`The tokenizer returns a dictionary with all the arguments necessary for its corresponding model to work properly. The token indices are under the key \u201Cinput_ids\u201D:`),yn=h(),m(Pe.$$.fragment),$n=h(),os=l("p"),Qo=n(`Note that the tokenizer automatically adds \u201Cspecial tokens\u201D (if the associated model relies on them) which are special IDs the model sometimes uses.`),En=h(),ls=l("p"),Jo=n("If we decode the previous sequence of ids,"),jn=h(),m(Ae.$$.fragment),qn=h(),rs=l("p"),Ko=n("we will see"),Tn=h(),m(Ie.$$.fragment),zn=h(),te=l("p"),Zo=n("because this is the way a "),is=l("a"),el=n("BertModel"),sl=n(" is going to expect its inputs."),xn=h(),ps=l("a"),Pn=h(),R=l("h3"),ne=l("a"),mt=l("span"),m(Ce.$$.fragment),tl=h(),bt=l("span"),nl=n("Attention mask"),An=h(),hs=l("p"),al=n("The attention mask is an optional argument used when batching sequences together."),In=h(),m(De.$$.fragment),Cn=h(),ds=l("p"),ol=n("This argument indicates to the model which tokens should be attended to, and which should not."),Dn=h(),cs=l("p"),ll=n("For example, consider these two sequences:"),Mn=h(),m(Me.$$.fragment),Bn=h(),us=l("p"),rl=n("The encoded versions have different lengths:"),Ln=h(),m(Be.$$.fragment),Sn=h(),fs=l("p"),il=n(`Therefore, we can\u2019t put them together in the same tensor as-is. The first sequence needs to be padded up to the length of the second one, or the second one needs to be truncated down to the length of the first one.`),Fn=h(),ms=l("p"),pl=n(`In the first case, the list of IDs will be extended by the padding indices. We can pass a list to the tokenizer and ask it to pad like this:`),Nn=h(),m(Le.$$.fragment),Rn=h(),bs=l("p"),hl=n("We can see that 0s have been added on the right of the first sentence to make it the same length as the second one:"),On=h(),m(Se.$$.fragment),Hn=h(),T=l("p"),dl=n(`This can then be converted into a tensor in PyTorch or TensorFlow. The attention mask is a binary tensor indicating the position of the padded indices so that the model does not attend to them. For the `),_s=l("a"),cl=n("BertTokenizer"),ul=n(`, `),_t=l("code"),fl=n("1"),ml=n(" indicates a value that should be attended to, while "),kt=l("code"),bl=n("0"),_l=n(` indicates a padded value. This attention mask is in the dictionary returned by the tokenizer under the key \u201Cattention_mask\u201D:`),Gn=h(),m(Fe.$$.fragment),Wn=h(),ks=l("a"),Vn=h(),O=l("h3"),ae=l("a"),gt=l("span"),m(Ne.$$.fragment),kl=h(),vt=l("span"),gl=n("Token Type IDs"),Un=h(),gs=l("p"),vl=n("Some models\u2019 purpose is to do classification on pairs of sentences or question answering."),Xn=h(),m(Re.$$.fragment),Yn=h(),C=l("p"),wl=n(`These require two different sequences to be joined in a single \u201Cinput_ids\u201D entry, which usually is performed with the help of special tokens, such as the classifier (`),wt=l("code"),yl=n("[CLS]"),$l=n(") and separator ("),yt=l("code"),El=n("[SEP]"),jl=n(`) tokens. For example, the BERT model builds its two sequence input as such:`),Qn=h(),m(Oe.$$.fragment),Jn=h(),oe=l("p"),ql=n("We can use our tokenizer to automatically generate such a sentence by passing the two sequences to "),$t=l("code"),Tl=n("tokenizer"),zl=n(` as two arguments (and not a list, like before) like this:`),Kn=h(),m(He.$$.fragment),Zn=h(),vs=l("p"),xl=n("which will return:"),ea=h(),m(Ge.$$.fragment),sa=h(),ws=l("p"),Pl=n(`This is enough for some models to understand where one sequence ends and where another begins. However, other models, such as BERT, also deploy token type IDs (also called segment IDs). They are represented as a binary mask identifying the two types of sequence in the model.`),ta=h(),ys=l("p"),Al=n("The tokenizer returns this mask as the \u201Ctoken_type_ids\u201D entry:"),na=h(),m(We.$$.fragment),aa=h(),D=l("p"),Il=n("The first sequence, the \u201Ccontext\u201D used for the question, has all its tokens represented by a "),Et=l("code"),Cl=n("0"),Dl=n(`, whereas the second sequence, corresponding to the \u201Cquestion\u201D, has all its tokens represented by a `),jt=l("code"),Ml=n("1"),Bl=n("."),oa=h(),M=l("p"),Ll=n("Some models, like "),$s=l("a"),Sl=n("XLNetModel"),Fl=n(" use an additional token represented by a "),qt=l("code"),Nl=n("2"),Rl=n("."),la=h(),Es=l("a"),ra=h(),H=l("h3"),le=l("a"),Tt=l("span"),m(Ve.$$.fragment),Ol=h(),zt=l("span"),Hl=n("Position IDs"),ia=h(),re=l("p"),Gl=n(`Contrary to RNNs that have the position of each token embedded within them, transformers are unaware of the position of each token. Therefore, the position IDs (`),xt=l("code"),Wl=n("position_ids"),Vl=n(`) are used by the model to identify each token\u2019s position in the list of tokens.`),pa=h(),ie=l("p"),Ul=n("They are an optional parameter. If no "),Pt=l("code"),Xl=n("position_ids"),Yl=n(` are passed to the model, the IDs are automatically created as absolute positional embeddings.`),ha=h(),pe=l("p"),Ql=n("Absolute positional embeddings are selected in the range "),At=l("code"),Jl=n("[0, config.max_position_embeddings - 1]"),Kl=n(`. Some models use other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings.`),da=h(),js=l("a"),ca=h(),G=l("h3"),he=l("a"),It=l("span"),m(Ue.$$.fragment),Zl=h(),Ct=l("span"),er=n("Labels"),ua=h(),qs=l("p"),sr=n(`The labels are an optional argument which can be passed in order for the model to compute the loss itself. These labels should be the expected prediction of the model: it will use the standard loss in order to compute the loss between its predictions and the expected value (the label).`),fa=h(),Ts=l("p"),tr=n("These labels are different according to the model head, for example:"),ma=h(),z=l("ul"),W=l("li"),nr=n("For sequence classification models (e.g., "),zs=l("a"),ar=n("BertForSequenceClassification"),or=n(`), the model expects a tensor of dimension `),Dt=l("code"),lr=n("(batch_size)"),rr=n(` with each value of the batch corresponding to the expected label of the entire sequence.`),ir=h(),V=l("li"),pr=n("For token classification models (e.g., "),xs=l("a"),hr=n("BertForTokenClassification"),dr=n(`), the model expects a tensor of dimension `),Mt=l("code"),cr=n("(batch_size, seq_length)"),ur=n(` with each value corresponding to the expected label of each individual token.`),fr=h(),U=l("li"),mr=n("For masked language modeling (e.g., "),Ps=l("a"),br=n("BertForMaskedLM"),_r=n(`), the model expects a tensor of dimension `),Bt=l("code"),kr=n("(batch_size, seq_length)"),gr=n(` with each value corresponding to the expected label of each individual token: the labels being the token ID for the masked token, and values to be ignored for the rest (usually -100).`),vr=h(),$=l("li"),wr=n("For sequence to sequence tasks,(e.g., "),As=l("a"),yr=n("BartForConditionalGeneration"),$r=n(`, `),Is=l("a"),Er=n("MBartForConditionalGeneration"),jr=n("), the model expects a tensor of dimension "),Lt=l("code"),qr=n("(batch_size, tgt_seq_length)"),Tr=n(` with each value corresponding to the target sequences associated with each input sequence. During training, both `),St=l("em"),zr=n("BART"),xr=n(" and "),Ft=l("em"),Pr=n("T5"),Ar=n(" will make the appropriate "),Nt=l("em"),Ir=n("decoder_input_ids"),Cr=n(` and decoder attention masks internally. They usually do not need to be supplied. This does not apply to models leveraging the Encoder-Decoder framework. See the documentation of each model for more information on each specific model\u2019s labels.`),ba=h(),de=l("p"),Dr=n("The base models (e.g., "),Cs=l("a"),Mr=n("BertModel"),Br=n(`) do not accept labels, as these are the base transformer models, simply outputting features.`),_a=h(),Ds=l("a"),ka=h(),X=l("h3"),ce=l("a"),Rt=l("span"),m(Xe.$$.fragment),Lr=h(),Ot=l("span"),Sr=n("Decoder input IDs"),ga=h(),Ms=l("p"),Fr=n(`This input is specific to encoder-decoder models, and contains the input IDs that will be fed to the decoder. These inputs should be used for sequence to sequence tasks, such as translation or summarization, and are usually built in a way specific to each model.`),va=h(),x=l("p"),Nr=n("Most encoder-decoder models (BART, T5) create their "),Ht=l("code"),Rr=n("decoder_input_ids"),Or=n(" on their own from the "),Gt=l("code"),Hr=n("labels"),Gr=n(`. In such models, passing the `),Wt=l("code"),Wr=n("labels"),Vr=n(" is the preferred way to handle training."),wa=h(),Bs=l("p"),Ur=n("Please check each model\u2019s docs to see how they handle these input IDs for sequence to sequence training."),ya=h(),Ls=l("a"),$a=h(),Y=l("h3"),ue=l("a"),Vt=l("span"),m(Ye.$$.fragment),Xr=h(),Ut=l("span"),Yr=n("Feed Forward Chunking"),Ea=h(),fe=l("p"),Qr=n(`In each residual attention block in transformers the self-attention layer is usually followed by 2 feed forward layers. The intermediate embedding size of the feed forward layers is often bigger than the hidden size of the model (e.g., for `),Xt=l("code"),Jr=n("bert-base-uncased"),Kr=n(")."),ja=h(),w=l("p"),Zr=n("For an input of size "),Yt=l("code"),ei=n("[batch_size, sequence_length]"),si=n(`, the memory required to store the intermediate feed forward embeddings `),Qt=l("code"),ti=n("[batch_size, sequence_length, config.intermediate_size]"),ni=n(` can account for a large fraction of the memory use. The authors of `),Qe=l("a"),ai=n("Reformer: The Efficient Transformer"),oi=n(` noticed that since the computation is independent of the `),Jt=l("code"),li=n("sequence_length"),ri=n(` dimension, it is mathematically equivalent to compute the output embeddings of both feed forward layers `),Kt=l("code"),ii=n("[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n"),pi=n(` individually and concat them afterward to `),Zt=l("code"),hi=n("[batch_size, sequence_length, config.hidden_size]"),di=n(" with "),en=l("code"),ci=n("n = sequence_length"),ui=n(`, which trades increased computation time against reduced memory use, but yields a mathematically `),sn=l("strong"),fi=n("equivalent"),mi=n(" result."),qa=h(),P=l("p"),bi=n("For models employing the function "),Ss=l("a"),_i=n("apply_chunking_to_forward()"),ki=n(", the "),tn=l("code"),gi=n("chunk_size"),vi=n(` defines the number of output embeddings that are computed in parallel and thus defines the trade-off between memory and time complexity. If `),nn=l("code"),wi=n("chunk_size"),yi=n(" is set to 0, no feed forward chunking is done."),this.h()},l(e){const o=Ih('[data-svelte="svelte-1phssyn"]',document.head);I=r(o,"META",{name:!0,content:!0}),o.forEach(s),Je=d(e),q=r(e,"H1",{class:!0});var za=i(q);A=r(za,"A",{id:!0,class:!0,href:!0});var Ei=i(A);Vs=r(Ei,"SPAN",{});var ji=i(Vs);b(ge.$$.fragment,ji),ji.forEach(s),Ei.forEach(s),Ya=d(za),Us=r(za,"SPAN",{});var qi=i(Us);Qa=a(qi,"Glossary"),qi.forEach(s),za.forEach(s),an=d(e),L=r(e,"H2",{class:!0});var xa=i(L);Q=r(xa,"A",{id:!0,class:!0,href:!0});var Ti=i(Q);Xs=r(Ti,"SPAN",{});var zi=i(Xs);b(ve.$$.fragment,zi),zi.forEach(s),Ti.forEach(s),Ja=d(xa),Ys=r(xa,"SPAN",{});var xi=i(Ys);Ka=a(xi,"General terms"),xi.forEach(s),xa.forEach(s),on=d(e),u=r(e,"UL",{});var f=i(u);Qs=r(f,"LI",{});var Pi=i(Qs);Za=a(Pi,"autoencoding models: see MLM"),Pi.forEach(s),eo=d(f),Js=r(f,"LI",{});var Ai=i(Js);so=a(Ai,"autoregressive models: see CLM"),Ai.forEach(s),to=d(f),Ks=r(f,"LI",{});var Ii=i(Ks);no=a(Ii,`CLM: causal language modeling, a pretraining task where the model reads the texts in order and has to predict the next word. It\u2019s usually done by reading the whole sentence but using a mask inside the model to hide the future tokens at a certain timestep.`),Ii.forEach(s),ao=d(f),Zs=r(f,"LI",{});var Ci=i(Zs);oo=a(Ci,"deep learning: machine learning algorithms which uses neural networks with several layers."),Ci.forEach(s),lo=d(f),et=r(f,"LI",{});var Di=i(et);ro=a(Di,`MLM: masked language modeling, a pretraining task where the model sees a corrupted version of the texts, usually done by masking some tokens randomly, and has to predict the original text.`),Di.forEach(s),io=d(f),st=r(f,"LI",{});var Mi=i(st);po=a(Mi,"multimodal: a task that combines texts with another kind of inputs (for instance images)."),Mi.forEach(s),ho=d(f),tt=r(f,"LI",{});var Bi=i(tt);co=a(Bi,`NLG: natural language generation, all tasks related to generating text (for instance talk with transformers, translation).`),Bi.forEach(s),uo=d(f),nt=r(f,"LI",{});var Li=i(nt);fo=a(Li,"NLP: natural language processing, a generic way to say \u201Cdeal with texts\u201D."),Li.forEach(s),mo=d(f),at=r(f,"LI",{});var Si=i(at);bo=a(Si,`NLU: natural language understanding, all tasks related to understanding what is in a text (for instance classifying the whole text, individual words).`),Si.forEach(s),_o=d(f),ot=r(f,"LI",{});var Fi=i(ot);ko=a(Fi,`pretrained model: a model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods involve a self-supervised objective, which can be reading the text and trying to predict the next word (see CLM) or masking some words and trying to predict them (see MLM).`),Fi.forEach(s),go=d(f),lt=r(f,"LI",{});var Ni=i(lt);vo=a(Ni,"RNN: recurrent neural network, a type of model that uses a loop over a layer to process texts."),Ni.forEach(s),wo=d(f),rt=r(f,"LI",{});var Ri=i(rt);yo=a(Ri,"self-attention: each element of the input finds out which other elements of the input they should attend to."),Ri.forEach(s),$o=d(f),S=r(f,"LI",{});var Fs=i(S);Eo=a(Fs,`seq2seq or sequence-to-sequence: models that generate a new sequence from an input, like translation models, or summarization models (such as `),Ke=r(Fs,"A",{href:!0});var Oi=i(Ke);jo=a(Oi,"Bart"),Oi.forEach(s),qo=a(Fs," or "),Ze=r(Fs,"A",{href:!0});var Hi=i(Ze);To=a(Hi,"T5"),Hi.forEach(s),zo=a(Fs,")."),Fs.forEach(s),xo=d(f),it=r(f,"LI",{});var Gi=i(it);Po=a(Gi,`token: a part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords) or a punctuation symbol.`),Gi.forEach(s),Ao=d(f),pt=r(f,"LI",{});var Wi=i(pt);Io=a(Wi,"transformer: self-attention based deep learning model architecture."),Wi.forEach(s),f.forEach(s),ln=d(e),F=r(e,"H2",{class:!0});var Pa=i(F);J=r(Pa,"A",{id:!0,class:!0,href:!0});var Vi=i(J);ht=r(Vi,"SPAN",{});var Ui=i(ht);b(we.$$.fragment,Ui),Ui.forEach(s),Vi.forEach(s),Co=d(Pa),dt=r(Pa,"SPAN",{});var Xi=i(dt);Do=a(Xi,"Model inputs"),Xi.forEach(s),Pa.forEach(s),rn=d(e),es=r(e,"P",{});var Yi=i(es);Mo=a(Yi,`Every model is different yet bears similarities with the others. Therefore most models use the same inputs, which are detailed here alongside usage examples.`),Yi.forEach(s),pn=d(e),ss=r(e,"A",{id:!0}),i(ss).forEach(s),hn=d(e),N=r(e,"H3",{class:!0});var Aa=i(N);K=r(Aa,"A",{id:!0,class:!0,href:!0});var Qi=i(K);ct=r(Qi,"SPAN",{});var Ji=i(ct);b(ye.$$.fragment,Ji),Ji.forEach(s),Qi.forEach(s),Bo=d(Aa),ut=r(Aa,"SPAN",{});var Ki=i(ut);Lo=a(Ki,"Input IDs"),Ki.forEach(s),Aa.forEach(s),dn=d(e),Z=r(e,"P",{});var Ia=i(Z);So=a(Ia,"The input ids are often the only required parameters to be passed to the model as input. "),ft=r(Ia,"EM",{});var Zi=i(ft);Fo=a(Zi,`They are token indices, numerical representations of tokens building the sequences that will be used as input by the model`),Zi.forEach(s),No=a(Ia,"."),Ia.forEach(s),cn=d(e),b($e.$$.fragment,e),un=d(e),ee=r(e,"P",{});var Ca=i(ee);Ro=a(Ca,`Each tokenizer works differently but the underlying mechanism remains the same. Here\u2019s an example using the BERT tokenizer, which is a `),Ee=r(Ca,"A",{href:!0,rel:!0});var ep=i(Ee);Oo=a(ep,"WordPiece"),ep.forEach(s),Ho=a(Ca," tokenizer:"),Ca.forEach(s),fn=d(e),b(je.$$.fragment,e),mn=d(e),ts=r(e,"P",{});var sp=i(ts);Go=a(sp,"The tokenizer takes care of splitting the sequence into tokens available in the tokenizer vocabulary."),sp.forEach(s),bn=d(e),b(qe.$$.fragment,e),_n=d(e),ns=r(e,"P",{});var tp=i(ns);Wo=a(tp,`The tokens are either words or subwords. Here for instance, \u201CVRAM\u201D wasn\u2019t in the model vocabulary, so it\u2019s been split in \u201CV\u201D, \u201CRA\u201D and \u201CM\u201D. To indicate those tokens are not separate words but parts of the same word, a double-hash prefix is added for \u201CRA\u201D and \u201CM\u201D:`),tp.forEach(s),kn=d(e),b(Te.$$.fragment,e),gn=d(e),se=r(e,"P",{});var Da=i(se);Vo=a(Da,`These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding the sentence to the tokenizer, which leverages the Rust implementation of `),ze=r(Da,"A",{href:!0,rel:!0});var np=i(ze);Uo=a(np,"\u{1F917} Tokenizers"),np.forEach(s),Xo=a(Da," for peak performance."),Da.forEach(s),vn=d(e),b(xe.$$.fragment,e),wn=d(e),as=r(e,"P",{});var ap=i(as);Yo=a(ap,`The tokenizer returns a dictionary with all the arguments necessary for its corresponding model to work properly. The token indices are under the key \u201Cinput_ids\u201D:`),ap.forEach(s),yn=d(e),b(Pe.$$.fragment,e),$n=d(e),os=r(e,"P",{});var op=i(os);Qo=a(op,`Note that the tokenizer automatically adds \u201Cspecial tokens\u201D (if the associated model relies on them) which are special IDs the model sometimes uses.`),op.forEach(s),En=d(e),ls=r(e,"P",{});var lp=i(ls);Jo=a(lp,"If we decode the previous sequence of ids,"),lp.forEach(s),jn=d(e),b(Ae.$$.fragment,e),qn=d(e),rs=r(e,"P",{});var rp=i(rs);Ko=a(rp,"we will see"),rp.forEach(s),Tn=d(e),b(Ie.$$.fragment,e),zn=d(e),te=r(e,"P",{});var Ma=i(te);Zo=a(Ma,"because this is the way a "),is=r(Ma,"A",{href:!0});var ip=i(is);el=a(ip,"BertModel"),ip.forEach(s),sl=a(Ma," is going to expect its inputs."),Ma.forEach(s),xn=d(e),ps=r(e,"A",{id:!0}),i(ps).forEach(s),Pn=d(e),R=r(e,"H3",{class:!0});var Ba=i(R);ne=r(Ba,"A",{id:!0,class:!0,href:!0});var pp=i(ne);mt=r(pp,"SPAN",{});var hp=i(mt);b(Ce.$$.fragment,hp),hp.forEach(s),pp.forEach(s),tl=d(Ba),bt=r(Ba,"SPAN",{});var dp=i(bt);nl=a(dp,"Attention mask"),dp.forEach(s),Ba.forEach(s),An=d(e),hs=r(e,"P",{});var cp=i(hs);al=a(cp,"The attention mask is an optional argument used when batching sequences together."),cp.forEach(s),In=d(e),b(De.$$.fragment,e),Cn=d(e),ds=r(e,"P",{});var up=i(ds);ol=a(up,"This argument indicates to the model which tokens should be attended to, and which should not."),up.forEach(s),Dn=d(e),cs=r(e,"P",{});var fp=i(cs);ll=a(fp,"For example, consider these two sequences:"),fp.forEach(s),Mn=d(e),b(Me.$$.fragment,e),Bn=d(e),us=r(e,"P",{});var mp=i(us);rl=a(mp,"The encoded versions have different lengths:"),mp.forEach(s),Ln=d(e),b(Be.$$.fragment,e),Sn=d(e),fs=r(e,"P",{});var bp=i(fs);il=a(bp,`Therefore, we can\u2019t put them together in the same tensor as-is. The first sequence needs to be padded up to the length of the second one, or the second one needs to be truncated down to the length of the first one.`),bp.forEach(s),Fn=d(e),ms=r(e,"P",{});var _p=i(ms);pl=a(_p,`In the first case, the list of IDs will be extended by the padding indices. We can pass a list to the tokenizer and ask it to pad like this:`),_p.forEach(s),Nn=d(e),b(Le.$$.fragment,e),Rn=d(e),bs=r(e,"P",{});var kp=i(bs);hl=a(kp,"We can see that 0s have been added on the right of the first sentence to make it the same length as the second one:"),kp.forEach(s),On=d(e),b(Se.$$.fragment,e),Hn=d(e),T=r(e,"P",{});var me=i(T);dl=a(me,`This can then be converted into a tensor in PyTorch or TensorFlow. The attention mask is a binary tensor indicating the position of the padded indices so that the model does not attend to them. For the `),_s=r(me,"A",{href:!0});var gp=i(_s);cl=a(gp,"BertTokenizer"),gp.forEach(s),ul=a(me,`, `),_t=r(me,"CODE",{});var vp=i(_t);fl=a(vp,"1"),vp.forEach(s),ml=a(me," indicates a value that should be attended to, while "),kt=r(me,"CODE",{});var wp=i(kt);bl=a(wp,"0"),wp.forEach(s),_l=a(me,` indicates a padded value. This attention mask is in the dictionary returned by the tokenizer under the key \u201Cattention_mask\u201D:`),me.forEach(s),Gn=d(e),b(Fe.$$.fragment,e),Wn=d(e),ks=r(e,"A",{id:!0}),i(ks).forEach(s),Vn=d(e),O=r(e,"H3",{class:!0});var La=i(O);ae=r(La,"A",{id:!0,class:!0,href:!0});var yp=i(ae);gt=r(yp,"SPAN",{});var $p=i(gt);b(Ne.$$.fragment,$p),$p.forEach(s),yp.forEach(s),kl=d(La),vt=r(La,"SPAN",{});var Ep=i(vt);gl=a(Ep,"Token Type IDs"),Ep.forEach(s),La.forEach(s),Un=d(e),gs=r(e,"P",{});var jp=i(gs);vl=a(jp,"Some models\u2019 purpose is to do classification on pairs of sentences or question answering."),jp.forEach(s),Xn=d(e),b(Re.$$.fragment,e),Yn=d(e),C=r(e,"P",{});var Ns=i(C);wl=a(Ns,`These require two different sequences to be joined in a single \u201Cinput_ids\u201D entry, which usually is performed with the help of special tokens, such as the classifier (`),wt=r(Ns,"CODE",{});var qp=i(wt);yl=a(qp,"[CLS]"),qp.forEach(s),$l=a(Ns,") and separator ("),yt=r(Ns,"CODE",{});var Tp=i(yt);El=a(Tp,"[SEP]"),Tp.forEach(s),jl=a(Ns,`) tokens. For example, the BERT model builds its two sequence input as such:`),Ns.forEach(s),Qn=d(e),b(Oe.$$.fragment,e),Jn=d(e),oe=r(e,"P",{});var Sa=i(oe);ql=a(Sa,"We can use our tokenizer to automatically generate such a sentence by passing the two sequences to "),$t=r(Sa,"CODE",{});var zp=i($t);Tl=a(zp,"tokenizer"),zp.forEach(s),zl=a(Sa,` as two arguments (and not a list, like before) like this:`),Sa.forEach(s),Kn=d(e),b(He.$$.fragment,e),Zn=d(e),vs=r(e,"P",{});var xp=i(vs);xl=a(xp,"which will return:"),xp.forEach(s),ea=d(e),b(Ge.$$.fragment,e),sa=d(e),ws=r(e,"P",{});var Pp=i(ws);Pl=a(Pp,`This is enough for some models to understand where one sequence ends and where another begins. However, other models, such as BERT, also deploy token type IDs (also called segment IDs). They are represented as a binary mask identifying the two types of sequence in the model.`),Pp.forEach(s),ta=d(e),ys=r(e,"P",{});var Ap=i(ys);Al=a(Ap,"The tokenizer returns this mask as the \u201Ctoken_type_ids\u201D entry:"),Ap.forEach(s),na=d(e),b(We.$$.fragment,e),aa=d(e),D=r(e,"P",{});var Rs=i(D);Il=a(Rs,"The first sequence, the \u201Ccontext\u201D used for the question, has all its tokens represented by a "),Et=r(Rs,"CODE",{});var Ip=i(Et);Cl=a(Ip,"0"),Ip.forEach(s),Dl=a(Rs,`, whereas the second sequence, corresponding to the \u201Cquestion\u201D, has all its tokens represented by a `),jt=r(Rs,"CODE",{});var Cp=i(jt);Ml=a(Cp,"1"),Cp.forEach(s),Bl=a(Rs,"."),Rs.forEach(s),oa=d(e),M=r(e,"P",{});var Os=i(M);Ll=a(Os,"Some models, like "),$s=r(Os,"A",{href:!0});var Dp=i($s);Sl=a(Dp,"XLNetModel"),Dp.forEach(s),Fl=a(Os," use an additional token represented by a "),qt=r(Os,"CODE",{});var Mp=i(qt);Nl=a(Mp,"2"),Mp.forEach(s),Rl=a(Os,"."),Os.forEach(s),la=d(e),Es=r(e,"A",{id:!0}),i(Es).forEach(s),ra=d(e),H=r(e,"H3",{class:!0});var Fa=i(H);le=r(Fa,"A",{id:!0,class:!0,href:!0});var Bp=i(le);Tt=r(Bp,"SPAN",{});var Lp=i(Tt);b(Ve.$$.fragment,Lp),Lp.forEach(s),Bp.forEach(s),Ol=d(Fa),zt=r(Fa,"SPAN",{});var Sp=i(zt);Hl=a(Sp,"Position IDs"),Sp.forEach(s),Fa.forEach(s),ia=d(e),re=r(e,"P",{});var Na=i(re);Gl=a(Na,`Contrary to RNNs that have the position of each token embedded within them, transformers are unaware of the position of each token. Therefore, the position IDs (`),xt=r(Na,"CODE",{});var Fp=i(xt);Wl=a(Fp,"position_ids"),Fp.forEach(s),Vl=a(Na,`) are used by the model to identify each token\u2019s position in the list of tokens.`),Na.forEach(s),pa=d(e),ie=r(e,"P",{});var Ra=i(ie);Ul=a(Ra,"They are an optional parameter. If no "),Pt=r(Ra,"CODE",{});var Np=i(Pt);Xl=a(Np,"position_ids"),Np.forEach(s),Yl=a(Ra,` are passed to the model, the IDs are automatically created as absolute positional embeddings.`),Ra.forEach(s),ha=d(e),pe=r(e,"P",{});var Oa=i(pe);Ql=a(Oa,"Absolute positional embeddings are selected in the range "),At=r(Oa,"CODE",{});var Rp=i(At);Jl=a(Rp,"[0, config.max_position_embeddings - 1]"),Rp.forEach(s),Kl=a(Oa,`. Some models use other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings.`),Oa.forEach(s),da=d(e),js=r(e,"A",{id:!0}),i(js).forEach(s),ca=d(e),G=r(e,"H3",{class:!0});var Ha=i(G);he=r(Ha,"A",{id:!0,class:!0,href:!0});var Op=i(he);It=r(Op,"SPAN",{});var Hp=i(It);b(Ue.$$.fragment,Hp),Hp.forEach(s),Op.forEach(s),Zl=d(Ha),Ct=r(Ha,"SPAN",{});var Gp=i(Ct);er=a(Gp,"Labels"),Gp.forEach(s),Ha.forEach(s),ua=d(e),qs=r(e,"P",{});var Wp=i(qs);sr=a(Wp,`The labels are an optional argument which can be passed in order for the model to compute the loss itself. These labels should be the expected prediction of the model: it will use the standard loss in order to compute the loss between its predictions and the expected value (the label).`),Wp.forEach(s),fa=d(e),Ts=r(e,"P",{});var Vp=i(Ts);tr=a(Vp,"These labels are different according to the model head, for example:"),Vp.forEach(s),ma=d(e),z=r(e,"UL",{});var be=i(z);W=r(be,"LI",{});var Hs=i(W);nr=a(Hs,"For sequence classification models (e.g., "),zs=r(Hs,"A",{href:!0});var Up=i(zs);ar=a(Up,"BertForSequenceClassification"),Up.forEach(s),or=a(Hs,`), the model expects a tensor of dimension `),Dt=r(Hs,"CODE",{});var Xp=i(Dt);lr=a(Xp,"(batch_size)"),Xp.forEach(s),rr=a(Hs,` with each value of the batch corresponding to the expected label of the entire sequence.`),Hs.forEach(s),ir=d(be),V=r(be,"LI",{});var Gs=i(V);pr=a(Gs,"For token classification models (e.g., "),xs=r(Gs,"A",{href:!0});var Yp=i(xs);hr=a(Yp,"BertForTokenClassification"),Yp.forEach(s),dr=a(Gs,`), the model expects a tensor of dimension `),Mt=r(Gs,"CODE",{});var Qp=i(Mt);cr=a(Qp,"(batch_size, seq_length)"),Qp.forEach(s),ur=a(Gs,` with each value corresponding to the expected label of each individual token.`),Gs.forEach(s),fr=d(be),U=r(be,"LI",{});var Ws=i(U);mr=a(Ws,"For masked language modeling (e.g., "),Ps=r(Ws,"A",{href:!0});var Jp=i(Ps);br=a(Jp,"BertForMaskedLM"),Jp.forEach(s),_r=a(Ws,`), the model expects a tensor of dimension `),Bt=r(Ws,"CODE",{});var Kp=i(Bt);kr=a(Kp,"(batch_size, seq_length)"),Kp.forEach(s),gr=a(Ws,` with each value corresponding to the expected label of each individual token: the labels being the token ID for the masked token, and values to be ignored for the rest (usually -100).`),Ws.forEach(s),vr=d(be),$=r(be,"LI",{});var j=i($);wr=a(j,"For sequence to sequence tasks,(e.g., "),As=r(j,"A",{href:!0});var Zp=i(As);yr=a(Zp,"BartForConditionalGeneration"),Zp.forEach(s),$r=a(j,`, `),Is=r(j,"A",{href:!0});var eh=i(Is);Er=a(eh,"MBartForConditionalGeneration"),eh.forEach(s),jr=a(j,"), the model expects a tensor of dimension "),Lt=r(j,"CODE",{});var sh=i(Lt);qr=a(sh,"(batch_size, tgt_seq_length)"),sh.forEach(s),Tr=a(j,` with each value corresponding to the target sequences associated with each input sequence. During training, both `),St=r(j,"EM",{});var th=i(St);zr=a(th,"BART"),th.forEach(s),xr=a(j," and "),Ft=r(j,"EM",{});var nh=i(Ft);Pr=a(nh,"T5"),nh.forEach(s),Ar=a(j," will make the appropriate "),Nt=r(j,"EM",{});var ah=i(Nt);Ir=a(ah,"decoder_input_ids"),ah.forEach(s),Cr=a(j,` and decoder attention masks internally. They usually do not need to be supplied. This does not apply to models leveraging the Encoder-Decoder framework. See the documentation of each model for more information on each specific model\u2019s labels.`),j.forEach(s),be.forEach(s),ba=d(e),de=r(e,"P",{});var Ga=i(de);Dr=a(Ga,"The base models (e.g., "),Cs=r(Ga,"A",{href:!0});var oh=i(Cs);Mr=a(oh,"BertModel"),oh.forEach(s),Br=a(Ga,`) do not accept labels, as these are the base transformer models, simply outputting features.`),Ga.forEach(s),_a=d(e),Ds=r(e,"A",{id:!0}),i(Ds).forEach(s),ka=d(e),X=r(e,"H3",{class:!0});var Wa=i(X);ce=r(Wa,"A",{id:!0,class:!0,href:!0});var lh=i(ce);Rt=r(lh,"SPAN",{});var rh=i(Rt);b(Xe.$$.fragment,rh),rh.forEach(s),lh.forEach(s),Lr=d(Wa),Ot=r(Wa,"SPAN",{});var ih=i(Ot);Sr=a(ih,"Decoder input IDs"),ih.forEach(s),Wa.forEach(s),ga=d(e),Ms=r(e,"P",{});var ph=i(Ms);Fr=a(ph,`This input is specific to encoder-decoder models, and contains the input IDs that will be fed to the decoder. These inputs should be used for sequence to sequence tasks, such as translation or summarization, and are usually built in a way specific to each model.`),ph.forEach(s),va=d(e),x=r(e,"P",{});var _e=i(x);Nr=a(_e,"Most encoder-decoder models (BART, T5) create their "),Ht=r(_e,"CODE",{});var hh=i(Ht);Rr=a(hh,"decoder_input_ids"),hh.forEach(s),Or=a(_e," on their own from the "),Gt=r(_e,"CODE",{});var dh=i(Gt);Hr=a(dh,"labels"),dh.forEach(s),Gr=a(_e,`. In such models, passing the `),Wt=r(_e,"CODE",{});var ch=i(Wt);Wr=a(ch,"labels"),ch.forEach(s),Vr=a(_e," is the preferred way to handle training."),_e.forEach(s),wa=d(e),Bs=r(e,"P",{});var uh=i(Bs);Ur=a(uh,"Please check each model\u2019s docs to see how they handle these input IDs for sequence to sequence training."),uh.forEach(s),ya=d(e),Ls=r(e,"A",{id:!0}),i(Ls).forEach(s),$a=d(e),Y=r(e,"H3",{class:!0});var Va=i(Y);ue=r(Va,"A",{id:!0,class:!0,href:!0});var fh=i(ue);Vt=r(fh,"SPAN",{});var mh=i(Vt);b(Ye.$$.fragment,mh),mh.forEach(s),fh.forEach(s),Xr=d(Va),Ut=r(Va,"SPAN",{});var bh=i(Ut);Yr=a(bh,"Feed Forward Chunking"),bh.forEach(s),Va.forEach(s),Ea=d(e),fe=r(e,"P",{});var Ua=i(fe);Qr=a(Ua,`In each residual attention block in transformers the self-attention layer is usually followed by 2 feed forward layers. The intermediate embedding size of the feed forward layers is often bigger than the hidden size of the model (e.g., for `),Xt=r(Ua,"CODE",{});var _h=i(Xt);Jr=a(_h,"bert-base-uncased"),_h.forEach(s),Kr=a(Ua,")."),Ua.forEach(s),ja=d(e),w=r(e,"P",{});var y=i(w);Zr=a(y,"For an input of size "),Yt=r(y,"CODE",{});var kh=i(Yt);ei=a(kh,"[batch_size, sequence_length]"),kh.forEach(s),si=a(y,`, the memory required to store the intermediate feed forward embeddings `),Qt=r(y,"CODE",{});var gh=i(Qt);ti=a(gh,"[batch_size, sequence_length, config.intermediate_size]"),gh.forEach(s),ni=a(y,` can account for a large fraction of the memory use. The authors of `),Qe=r(y,"A",{href:!0,rel:!0});var vh=i(Qe);ai=a(vh,"Reformer: The Efficient Transformer"),vh.forEach(s),oi=a(y,` noticed that since the computation is independent of the `),Jt=r(y,"CODE",{});var wh=i(Jt);li=a(wh,"sequence_length"),wh.forEach(s),ri=a(y,` dimension, it is mathematically equivalent to compute the output embeddings of both feed forward layers `),Kt=r(y,"CODE",{});var yh=i(Kt);ii=a(yh,"[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n"),yh.forEach(s),pi=a(y,` individually and concat them afterward to `),Zt=r(y,"CODE",{});var $h=i(Zt);hi=a($h,"[batch_size, sequence_length, config.hidden_size]"),$h.forEach(s),di=a(y," with "),en=r(y,"CODE",{});var Eh=i(en);ci=a(Eh,"n = sequence_length"),Eh.forEach(s),ui=a(y,`, which trades increased computation time against reduced memory use, but yields a mathematically `),sn=r(y,"STRONG",{});var jh=i(sn);fi=a(jh,"equivalent"),jh.forEach(s),mi=a(y," result."),y.forEach(s),qa=d(e),P=r(e,"P",{});var ke=i(P);bi=a(ke,"For models employing the function "),Ss=r(ke,"A",{href:!0});var qh=i(Ss);_i=a(qh,"apply_chunking_to_forward()"),qh.forEach(s),ki=a(ke,", the "),tn=r(ke,"CODE",{});var Th=i(tn);gi=a(Th,"chunk_size"),Th.forEach(s),vi=a(ke,` defines the number of output embeddings that are computed in parallel and thus defines the trade-off between memory and time complexity. If `),nn=r(ke,"CODE",{});var zh=i(nn);wi=a(zh,"chunk_size"),zh.forEach(s),yi=a(ke," is set to 0, no feed forward chunking is done."),ke.forEach(s),this.h()},h(){c(I,"name","hf:doc:metadata"),c(I,"content",JSON.stringify(Mh)),c(A,"id","glossary"),c(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(A,"href","#glossary"),c(q,"class","relative group"),c(Q,"id","general-terms"),c(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Q,"href","#general-terms"),c(L,"class","relative group"),c(Ke,"href","model_doc/bart"),c(Ze,"href","model_doc/t5"),c(J,"id","model-inputs"),c(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(J,"href","#model-inputs"),c(F,"class","relative group"),c(ss,"id","input-ids"),c(K,"id","input-ids"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#input-ids"),c(N,"class","relative group"),c(Ee,"href","https://arxiv.org/pdf/1609.08144.pdf"),c(Ee,"rel","nofollow"),c(ze,"href","https://github.com/huggingface/tokenizers"),c(ze,"rel","nofollow"),c(is,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel"),c(ps,"id","attention-mask"),c(ne,"id","attention-mask"),c(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ne,"href","#attention-mask"),c(R,"class","relative group"),c(_s,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),c(ks,"id","token-type-ids"),c(ae,"id","token-type-ids"),c(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ae,"href","#token-type-ids"),c(O,"class","relative group"),c($s,"href","/docs/transformers/pr_16143/en/model_doc/xlnet#transformers.XLNetModel"),c(Es,"id","position-ids"),c(le,"id","position-ids"),c(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(le,"href","#position-ids"),c(H,"class","relative group"),c(js,"id","labels"),c(he,"id","labels"),c(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(he,"href","#labels"),c(G,"class","relative group"),c(zs,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertForSequenceClassification"),c(xs,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertForTokenClassification"),c(Ps,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertForMaskedLM"),c(As,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(Is,"href","/docs/transformers/pr_16143/en/model_doc/mbart#transformers.MBartForConditionalGeneration"),c(Cs,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel"),c(Ds,"id","decoder-input-ids"),c(ce,"id","decoder-input-ids"),c(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ce,"href","#decoder-input-ids"),c(X,"class","relative group"),c(Ls,"id","feed-forward-chunking"),c(ue,"id","feed-forward-chunking"),c(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ue,"href","#feed-forward-chunking"),c(Y,"class","relative group"),c(Qe,"href","https://arxiv.org/abs/2001.04451"),c(Qe,"rel","nofollow"),c(Ss,"href","/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.apply_chunking_to_forward")},m(e,o){t(document.head,I),p(e,Je,o),p(e,q,o),t(q,A),t(A,Vs),_(ge,Vs,null),t(q,Ya),t(q,Us),t(Us,Qa),p(e,an,o),p(e,L,o),t(L,Q),t(Q,Xs),_(ve,Xs,null),t(L,Ja),t(L,Ys),t(Ys,Ka),p(e,on,o),p(e,u,o),t(u,Qs),t(Qs,Za),t(u,eo),t(u,Js),t(Js,so),t(u,to),t(u,Ks),t(Ks,no),t(u,ao),t(u,Zs),t(Zs,oo),t(u,lo),t(u,et),t(et,ro),t(u,io),t(u,st),t(st,po),t(u,ho),t(u,tt),t(tt,co),t(u,uo),t(u,nt),t(nt,fo),t(u,mo),t(u,at),t(at,bo),t(u,_o),t(u,ot),t(ot,ko),t(u,go),t(u,lt),t(lt,vo),t(u,wo),t(u,rt),t(rt,yo),t(u,$o),t(u,S),t(S,Eo),t(S,Ke),t(Ke,jo),t(S,qo),t(S,Ze),t(Ze,To),t(S,zo),t(u,xo),t(u,it),t(it,Po),t(u,Ao),t(u,pt),t(pt,Io),p(e,ln,o),p(e,F,o),t(F,J),t(J,ht),_(we,ht,null),t(F,Co),t(F,dt),t(dt,Do),p(e,rn,o),p(e,es,o),t(es,Mo),p(e,pn,o),p(e,ss,o),p(e,hn,o),p(e,N,o),t(N,K),t(K,ct),_(ye,ct,null),t(N,Bo),t(N,ut),t(ut,Lo),p(e,dn,o),p(e,Z,o),t(Z,So),t(Z,ft),t(ft,Fo),t(Z,No),p(e,cn,o),_($e,e,o),p(e,un,o),p(e,ee,o),t(ee,Ro),t(ee,Ee),t(Ee,Oo),t(ee,Ho),p(e,fn,o),_(je,e,o),p(e,mn,o),p(e,ts,o),t(ts,Go),p(e,bn,o),_(qe,e,o),p(e,_n,o),p(e,ns,o),t(ns,Wo),p(e,kn,o),_(Te,e,o),p(e,gn,o),p(e,se,o),t(se,Vo),t(se,ze),t(ze,Uo),t(se,Xo),p(e,vn,o),_(xe,e,o),p(e,wn,o),p(e,as,o),t(as,Yo),p(e,yn,o),_(Pe,e,o),p(e,$n,o),p(e,os,o),t(os,Qo),p(e,En,o),p(e,ls,o),t(ls,Jo),p(e,jn,o),_(Ae,e,o),p(e,qn,o),p(e,rs,o),t(rs,Ko),p(e,Tn,o),_(Ie,e,o),p(e,zn,o),p(e,te,o),t(te,Zo),t(te,is),t(is,el),t(te,sl),p(e,xn,o),p(e,ps,o),p(e,Pn,o),p(e,R,o),t(R,ne),t(ne,mt),_(Ce,mt,null),t(R,tl),t(R,bt),t(bt,nl),p(e,An,o),p(e,hs,o),t(hs,al),p(e,In,o),_(De,e,o),p(e,Cn,o),p(e,ds,o),t(ds,ol),p(e,Dn,o),p(e,cs,o),t(cs,ll),p(e,Mn,o),_(Me,e,o),p(e,Bn,o),p(e,us,o),t(us,rl),p(e,Ln,o),_(Be,e,o),p(e,Sn,o),p(e,fs,o),t(fs,il),p(e,Fn,o),p(e,ms,o),t(ms,pl),p(e,Nn,o),_(Le,e,o),p(e,Rn,o),p(e,bs,o),t(bs,hl),p(e,On,o),_(Se,e,o),p(e,Hn,o),p(e,T,o),t(T,dl),t(T,_s),t(_s,cl),t(T,ul),t(T,_t),t(_t,fl),t(T,ml),t(T,kt),t(kt,bl),t(T,_l),p(e,Gn,o),_(Fe,e,o),p(e,Wn,o),p(e,ks,o),p(e,Vn,o),p(e,O,o),t(O,ae),t(ae,gt),_(Ne,gt,null),t(O,kl),t(O,vt),t(vt,gl),p(e,Un,o),p(e,gs,o),t(gs,vl),p(e,Xn,o),_(Re,e,o),p(e,Yn,o),p(e,C,o),t(C,wl),t(C,wt),t(wt,yl),t(C,$l),t(C,yt),t(yt,El),t(C,jl),p(e,Qn,o),_(Oe,e,o),p(e,Jn,o),p(e,oe,o),t(oe,ql),t(oe,$t),t($t,Tl),t(oe,zl),p(e,Kn,o),_(He,e,o),p(e,Zn,o),p(e,vs,o),t(vs,xl),p(e,ea,o),_(Ge,e,o),p(e,sa,o),p(e,ws,o),t(ws,Pl),p(e,ta,o),p(e,ys,o),t(ys,Al),p(e,na,o),_(We,e,o),p(e,aa,o),p(e,D,o),t(D,Il),t(D,Et),t(Et,Cl),t(D,Dl),t(D,jt),t(jt,Ml),t(D,Bl),p(e,oa,o),p(e,M,o),t(M,Ll),t(M,$s),t($s,Sl),t(M,Fl),t(M,qt),t(qt,Nl),t(M,Rl),p(e,la,o),p(e,Es,o),p(e,ra,o),p(e,H,o),t(H,le),t(le,Tt),_(Ve,Tt,null),t(H,Ol),t(H,zt),t(zt,Hl),p(e,ia,o),p(e,re,o),t(re,Gl),t(re,xt),t(xt,Wl),t(re,Vl),p(e,pa,o),p(e,ie,o),t(ie,Ul),t(ie,Pt),t(Pt,Xl),t(ie,Yl),p(e,ha,o),p(e,pe,o),t(pe,Ql),t(pe,At),t(At,Jl),t(pe,Kl),p(e,da,o),p(e,js,o),p(e,ca,o),p(e,G,o),t(G,he),t(he,It),_(Ue,It,null),t(G,Zl),t(G,Ct),t(Ct,er),p(e,ua,o),p(e,qs,o),t(qs,sr),p(e,fa,o),p(e,Ts,o),t(Ts,tr),p(e,ma,o),p(e,z,o),t(z,W),t(W,nr),t(W,zs),t(zs,ar),t(W,or),t(W,Dt),t(Dt,lr),t(W,rr),t(z,ir),t(z,V),t(V,pr),t(V,xs),t(xs,hr),t(V,dr),t(V,Mt),t(Mt,cr),t(V,ur),t(z,fr),t(z,U),t(U,mr),t(U,Ps),t(Ps,br),t(U,_r),t(U,Bt),t(Bt,kr),t(U,gr),t(z,vr),t(z,$),t($,wr),t($,As),t(As,yr),t($,$r),t($,Is),t(Is,Er),t($,jr),t($,Lt),t(Lt,qr),t($,Tr),t($,St),t(St,zr),t($,xr),t($,Ft),t(Ft,Pr),t($,Ar),t($,Nt),t(Nt,Ir),t($,Cr),p(e,ba,o),p(e,de,o),t(de,Dr),t(de,Cs),t(Cs,Mr),t(de,Br),p(e,_a,o),p(e,Ds,o),p(e,ka,o),p(e,X,o),t(X,ce),t(ce,Rt),_(Xe,Rt,null),t(X,Lr),t(X,Ot),t(Ot,Sr),p(e,ga,o),p(e,Ms,o),t(Ms,Fr),p(e,va,o),p(e,x,o),t(x,Nr),t(x,Ht),t(Ht,Rr),t(x,Or),t(x,Gt),t(Gt,Hr),t(x,Gr),t(x,Wt),t(Wt,Wr),t(x,Vr),p(e,wa,o),p(e,Bs,o),t(Bs,Ur),p(e,ya,o),p(e,Ls,o),p(e,$a,o),p(e,Y,o),t(Y,ue),t(ue,Vt),_(Ye,Vt,null),t(Y,Xr),t(Y,Ut),t(Ut,Yr),p(e,Ea,o),p(e,fe,o),t(fe,Qr),t(fe,Xt),t(Xt,Jr),t(fe,Kr),p(e,ja,o),p(e,w,o),t(w,Zr),t(w,Yt),t(Yt,ei),t(w,si),t(w,Qt),t(Qt,ti),t(w,ni),t(w,Qe),t(Qe,ai),t(w,oi),t(w,Jt),t(Jt,li),t(w,ri),t(w,Kt),t(Kt,ii),t(w,pi),t(w,Zt),t(Zt,hi),t(w,di),t(w,en),t(en,ci),t(w,ui),t(w,sn),t(sn,fi),t(w,mi),p(e,qa,o),p(e,P,o),t(P,bi),t(P,Ss),t(Ss,_i),t(P,ki),t(P,tn),t(tn,gi),t(P,vi),t(P,nn),t(nn,wi),t(P,yi),Ta=!0},p:Ch,i(e){Ta||(k(ge.$$.fragment,e),k(ve.$$.fragment,e),k(we.$$.fragment,e),k(ye.$$.fragment,e),k($e.$$.fragment,e),k(je.$$.fragment,e),k(qe.$$.fragment,e),k(Te.$$.fragment,e),k(xe.$$.fragment,e),k(Pe.$$.fragment,e),k(Ae.$$.fragment,e),k(Ie.$$.fragment,e),k(Ce.$$.fragment,e),k(De.$$.fragment,e),k(Me.$$.fragment,e),k(Be.$$.fragment,e),k(Le.$$.fragment,e),k(Se.$$.fragment,e),k(Fe.$$.fragment,e),k(Ne.$$.fragment,e),k(Re.$$.fragment,e),k(Oe.$$.fragment,e),k(He.$$.fragment,e),k(Ge.$$.fragment,e),k(We.$$.fragment,e),k(Ve.$$.fragment,e),k(Ue.$$.fragment,e),k(Xe.$$.fragment,e),k(Ye.$$.fragment,e),Ta=!0)},o(e){g(ge.$$.fragment,e),g(ve.$$.fragment,e),g(we.$$.fragment,e),g(ye.$$.fragment,e),g($e.$$.fragment,e),g(je.$$.fragment,e),g(qe.$$.fragment,e),g(Te.$$.fragment,e),g(xe.$$.fragment,e),g(Pe.$$.fragment,e),g(Ae.$$.fragment,e),g(Ie.$$.fragment,e),g(Ce.$$.fragment,e),g(De.$$.fragment,e),g(Me.$$.fragment,e),g(Be.$$.fragment,e),g(Le.$$.fragment,e),g(Se.$$.fragment,e),g(Fe.$$.fragment,e),g(Ne.$$.fragment,e),g(Re.$$.fragment,e),g(Oe.$$.fragment,e),g(He.$$.fragment,e),g(Ge.$$.fragment,e),g(We.$$.fragment,e),g(Ve.$$.fragment,e),g(Ue.$$.fragment,e),g(Xe.$$.fragment,e),g(Ye.$$.fragment,e),Ta=!1},d(e){s(I),e&&s(Je),e&&s(q),v(ge),e&&s(an),e&&s(L),v(ve),e&&s(on),e&&s(u),e&&s(ln),e&&s(F),v(we),e&&s(rn),e&&s(es),e&&s(pn),e&&s(ss),e&&s(hn),e&&s(N),v(ye),e&&s(dn),e&&s(Z),e&&s(cn),v($e,e),e&&s(un),e&&s(ee),e&&s(fn),v(je,e),e&&s(mn),e&&s(ts),e&&s(bn),v(qe,e),e&&s(_n),e&&s(ns),e&&s(kn),v(Te,e),e&&s(gn),e&&s(se),e&&s(vn),v(xe,e),e&&s(wn),e&&s(as),e&&s(yn),v(Pe,e),e&&s($n),e&&s(os),e&&s(En),e&&s(ls),e&&s(jn),v(Ae,e),e&&s(qn),e&&s(rs),e&&s(Tn),v(Ie,e),e&&s(zn),e&&s(te),e&&s(xn),e&&s(ps),e&&s(Pn),e&&s(R),v(Ce),e&&s(An),e&&s(hs),e&&s(In),v(De,e),e&&s(Cn),e&&s(ds),e&&s(Dn),e&&s(cs),e&&s(Mn),v(Me,e),e&&s(Bn),e&&s(us),e&&s(Ln),v(Be,e),e&&s(Sn),e&&s(fs),e&&s(Fn),e&&s(ms),e&&s(Nn),v(Le,e),e&&s(Rn),e&&s(bs),e&&s(On),v(Se,e),e&&s(Hn),e&&s(T),e&&s(Gn),v(Fe,e),e&&s(Wn),e&&s(ks),e&&s(Vn),e&&s(O),v(Ne),e&&s(Un),e&&s(gs),e&&s(Xn),v(Re,e),e&&s(Yn),e&&s(C),e&&s(Qn),v(Oe,e),e&&s(Jn),e&&s(oe),e&&s(Kn),v(He,e),e&&s(Zn),e&&s(vs),e&&s(ea),v(Ge,e),e&&s(sa),e&&s(ws),e&&s(ta),e&&s(ys),e&&s(na),v(We,e),e&&s(aa),e&&s(D),e&&s(oa),e&&s(M),e&&s(la),e&&s(Es),e&&s(ra),e&&s(H),v(Ve),e&&s(ia),e&&s(re),e&&s(pa),e&&s(ie),e&&s(ha),e&&s(pe),e&&s(da),e&&s(js),e&&s(ca),e&&s(G),v(Ue),e&&s(ua),e&&s(qs),e&&s(fa),e&&s(Ts),e&&s(ma),e&&s(z),e&&s(ba),e&&s(de),e&&s(_a),e&&s(Ds),e&&s(ka),e&&s(X),v(Xe),e&&s(ga),e&&s(Ms),e&&s(va),e&&s(x),e&&s(wa),e&&s(Bs),e&&s(ya),e&&s(Ls),e&&s($a),e&&s(Y),v(Ye),e&&s(Ea),e&&s(fe),e&&s(ja),e&&s(w),e&&s(qa),e&&s(P)}}}const Mh={local:"glossary",sections:[{local:"general-terms",title:"General terms"},{local:"model-inputs",sections:[{local:"input-ids",title:"Input IDs"},{local:"attention-mask",title:"Attention mask"},{local:"token-type-ids",title:"Token Type IDs"},{local:"position-ids",title:"Position IDs"},{local:"labels",title:"Labels"},{local:"decoder-input-ids",title:"Decoder input IDs"},{local:"feed-forward-chunking",title:"Feed Forward Chunking"}],title:"Model inputs"}],title:"Glossary"};function Bh(Xa,I,Je){let{fw:q}=I;return Xa.$$set=A=>{"fw"in A&&Je(0,q=A.fw)},[q]}class Oh extends xh{constructor(I){super();Ph(this,I,Bh,Dh,Ah,{fw:0})}}export{Oh as default,Mh as metadata};
255
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/autoclass_tutorial.mdx-256b03d8.js
import{S as Qs,i as Xs,s as Ys,e as r,k as c,w as $,t as l,M as Zs,c as o,d as t,m as f,a as n,x as g,h as i,b as u,F as s,g as p,y,q as A,o as j,B as E}from"../chunks/vendor-4833417e.js";import{T as ea}from"../chunks/Tip-fffd6df1.js";import{I as Ie}from"../chunks/IconCopyLink-4b81c553.js";import{C as kt}from"../chunks/CodeBlock-6a3d1b46.js";import{C as Ks}from"../chunks/CodeBlockFw-27a176a0.js";import"../chunks/CopyButton-dacfbfaf.js";function ta(_e){let h,F,m,d,T,v,J,q;return{c(){h=r("p"),F=l("Remember, architecture refers to the skeleton of the model and checkpoints are the weights for a given architecture. For example, "),m=r("a"),d=l("BERT"),T=l(" is an architecture, while "),v=r("code"),J=l("bert-base-uncased"),q=l(" is a checkpoint. Model is a general term that can mean either architecture or checkpoint."),this.h()},l(P){h=o(P,"P",{});var k=n(h);F=i(k,"Remember, architecture refers to the skeleton of the model and checkpoints are the weights for a given architecture. For example, "),m=o(k,"A",{href:!0,rel:!0});var b=n(m);d=i(b,"BERT"),b.forEach(t),T=i(k," is an architecture, while "),v=o(k,"CODE",{});var ne=n(v);J=i(ne,"bert-base-uncased"),ne.forEach(t),q=i(k," is a checkpoint. Model is a general term that can mean either architecture or checkpoint."),k.forEach(t),this.h()},h(){u(m,"href","https://huggingface.co/bert-base-uncased"),u(m,"rel","nofollow")},m(P,k){p(P,h,k),s(h,F),s(h,m),s(m,d),s(h,T),s(h,v),s(v,J),s(h,q)},d(P){P&&t(h)}}}function sa(_e){let h,F,m,d,T,v,J,q,P,k,b,ne,we,bt,_t,$e,wt,$t,Oe,S,De,le,gt,Be,_,ge,yt,At,ye,jt,Et,Ae,xt,Ft,je,Tt,He,C,N,Ee,U,qt,xe,Pt,Re,ie,Ct,Ge,I,zt,pe,Mt,Lt,Ve,K,We,ce,St,Je,Q,Ue,z,O,Fe,X,Nt,Te,It,Ke,fe,Ot,Qe,D,Dt,ue,Bt,Ht,Xe,Y,Ye,M,B,qe,Z,Rt,Pe,Gt,Ze,H,Vt,he,Wt,Jt,et,R,Ut,me,Kt,Qt,tt,ee,st,L,G,Ce,te,Xt,ze,Yt,at,x,Zt,Me,es,ts,de,ss,as,Le,rs,rt,se,ot,ve,os,nt,ae,lt,w,ns,Se,ls,is,Ne,ps,cs,ke,fs,us,it;return v=new Ie({}),S=new ea({props:{$$slots:{default:[ta]},$$scope:{ctx:_e}}}),U=new Ie({}),K=new kt({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)`}}),Q=new kt({props:{code:`sequence = "In a hole in the ground there lived a hobbit." print(tokenizer(sequence))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>sequence = <span class="hljs-string">&quot;In a hole in the ground there lived a hobbit.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer(sequence)) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">1999</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">4920</span>, <span class="hljs-number">1999</span>, <span class="hljs-number">1996</span>, <span class="hljs-number">2598</span>, <span class="hljs-number">2045</span>, <span class="hljs-number">2973</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">7570</span>, <span class="hljs-number">10322</span>, <span class="hljs-number">4183</span>, <span class="hljs-number">1012</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}`}}),X=new Ie({}),Y=new kt({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained( "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)`}}),Z=new Ie({}),ee=new kt({props:{code:`from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>)`}}),te=new Ie({}),se=new Ks({props:{group1:{id:"pt",code:`from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`},group2:{id:"tf",code:`from transformers import TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}}),ae=new Ks({props:{group1:{id:"pt",code:`from transformers import AutoModelForTokenClassification model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`},group2:{id:"tf",code:`from transformers import TFAutoModelForTokenClassification model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}}),{c(){h=r("meta"),F=c(),m=r("h1"),d=r("a"),T=r("span"),$(v.$$.fragment),J=c(),q=r("span"),P=l("Load pretrained instances with an AutoClass"),k=c(),b=r("p"),ne=l("With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of \u{1F917} Transformers core philosophy to make the library easy, simple and flexible to use, an "),we=r("code"),bt=l("AutoClass"),_t=l(" automatically infer and load the correct architecture from a given checkpoint. The "),$e=r("code"),wt=l("from_pretrained"),$t=l(" method lets you quickly load a pretrained model for any architecture so you don\u2019t have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different."),Oe=c(),$(S.$$.fragment),De=c(),le=r("p"),gt=l("In this tutorial, learn to:"),Be=c(),_=r("ul"),ge=r("li"),yt=l("Load a pretrained tokenizer."),At=c(),ye=r("li"),jt=l("Load a pretrained feature extractor."),Et=c(),Ae=r("li"),xt=l("Load a pretrained processor."),Ft=c(),je=r("li"),Tt=l("Load a pretrained model."),He=c(),C=r("h2"),N=r("a"),Ee=r("span"),$(U.$$.fragment),qt=c(),xe=r("span"),Pt=l("AutoTokenizer"),Re=c(),ie=r("p"),Ct=l("Nearly every NLP task begins with a tokenizer. A tokenizer converts your input into a format that can be processed by the model."),Ge=c(),I=r("p"),zt=l("Load a tokenizer with "),pe=r("a"),Mt=l("AutoTokenizer.from_pretrained()"),Lt=l(":"),Ve=c(),$(K.$$.fragment),We=c(),ce=r("p"),St=l("Then tokenize your input as shown below:"),Je=c(),$(Q.$$.fragment),Ue=c(),z=r("h2"),O=r("a"),Fe=r("span"),$(X.$$.fragment),Nt=c(),Te=r("span"),It=l("AutoFeatureExtractor"),Ke=c(),fe=r("p"),Ot=l("For audio and vision tasks, a feature extractor processes the audio signal or image into the correct input format."),Qe=c(),D=r("p"),Dt=l("Load a feature extractor with "),ue=r("a"),Bt=l("AutoFeatureExtractor.from_pretrained()"),Ht=l(":"),Xe=c(),$(Y.$$.fragment),Ye=c(),M=r("h2"),B=r("a"),qe=r("span"),$(Z.$$.fragment),Rt=c(),Pe=r("span"),Gt=l("AutoProcessor"),Ze=c(),H=r("p"),Vt=l("Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the "),he=r("a"),Wt=l("LayoutLMV2"),Jt=l(" model requires a feature extractor to handle images and a tokenizer to handle text; a processor combines both of them."),et=c(),R=r("p"),Ut=l("Load a processor with "),me=r("a"),Kt=l("AutoProcessor.from_pretrained()"),Qt=l(":"),tt=c(),$(ee.$$.fragment),st=c(),L=r("h2"),G=r("a"),Ce=r("span"),$(te.$$.fragment),Xt=c(),ze=r("span"),Yt=l("AutoModel"),at=c(),x=r("p"),Zt=l("Finally, the "),Me=r("code"),es=l("AutoModelFor"),ts=l(" classes let you load a pretrained model for a given task (see "),de=r("a"),ss=l("here"),as=l(" for a complete list of available tasks). For example, load a model for sequence classification with "),Le=r("code"),rs=l("AutoModelForSequenceClassification.from_pretrained()"),rt=c(),$(se.$$.fragment),ot=c(),ve=r("p"),os=l("Easily reuse the same checkpoint to load an architecture for a different task:"),nt=c(),$(ae.$$.fragment),lt=c(),w=r("p"),ns=l("Generally, we recommend using the "),Se=r("code"),ls=l("AutoTokenizer"),is=l(" class and the "),Ne=r("code"),ps=l("AutoModelFor"),cs=l(" class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next "),ke=r("a"),fs=l("tutorial"),us=l(", learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning."),this.h()},l(e){const a=Zs('[data-svelte="svelte-1phssyn"]',document.head);h=o(a,"META",{name:!0,content:!0}),a.forEach(t),F=f(e),m=o(e,"H1",{class:!0});var re=n(m);d=o(re,"A",{id:!0,class:!0,href:!0});var hs=n(d);T=o(hs,"SPAN",{});var ms=n(T);g(v.$$.fragment,ms),ms.forEach(t),hs.forEach(t),J=f(re),q=o(re,"SPAN",{});var ds=n(q);P=i(ds,"Load pretrained instances with an AutoClass"),ds.forEach(t),re.forEach(t),k=f(e),b=o(e,"P",{});var be=n(b);ne=i(be,"With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of \u{1F917} Transformers core philosophy to make the library easy, simple and flexible to use, an "),we=o(be,"CODE",{});var vs=n(we);bt=i(vs,"AutoClass"),vs.forEach(t),_t=i(be," automatically infer and load the correct architecture from a given checkpoint. The "),$e=o(be,"CODE",{});var ks=n($e);wt=i(ks,"from_pretrained"),ks.forEach(t),$t=i(be," method lets you quickly load a pretrained model for any architecture so you don\u2019t have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different."),be.forEach(t),Oe=f(e),g(S.$$.fragment,e),De=f(e),le=o(e,"P",{});var bs=n(le);gt=i(bs,"In this tutorial, learn to:"),bs.forEach(t),Be=f(e),_=o(e,"UL",{});var V=n(_);ge=o(V,"LI",{});var _s=n(ge);yt=i(_s,"Load a pretrained tokenizer."),_s.forEach(t),At=f(V),ye=o(V,"LI",{});var ws=n(ye);jt=i(ws,"Load a pretrained feature extractor."),ws.forEach(t),Et=f(V),Ae=o(V,"LI",{});var $s=n(Ae);xt=i($s,"Load a pretrained processor."),$s.forEach(t),Ft=f(V),je=o(V,"LI",{});var gs=n(je);Tt=i(gs,"Load a pretrained model."),gs.forEach(t),V.forEach(t),He=f(e),C=o(e,"H2",{class:!0});var pt=n(C);N=o(pt,"A",{id:!0,class:!0,href:!0});var ys=n(N);Ee=o(ys,"SPAN",{});var As=n(Ee);g(U.$$.fragment,As),As.forEach(t),ys.forEach(t),qt=f(pt),xe=o(pt,"SPAN",{});var js=n(xe);Pt=i(js,"AutoTokenizer"),js.forEach(t),pt.forEach(t),Re=f(e),ie=o(e,"P",{});var Es=n(ie);Ct=i(Es,"Nearly every NLP task begins with a tokenizer. A tokenizer converts your input into a format that can be processed by the model."),Es.forEach(t),Ge=f(e),I=o(e,"P",{});var ct=n(I);zt=i(ct,"Load a tokenizer with "),pe=o(ct,"A",{href:!0});var xs=n(pe);Mt=i(xs,"AutoTokenizer.from_pretrained()"),xs.forEach(t),Lt=i(ct,":"),ct.forEach(t),Ve=f(e),g(K.$$.fragment,e),We=f(e),ce=o(e,"P",{});var Fs=n(ce);St=i(Fs,"Then tokenize your input as shown below:"),Fs.forEach(t),Je=f(e),g(Q.$$.fragment,e),Ue=f(e),z=o(e,"H2",{class:!0});var ft=n(z);O=o(ft,"A",{id:!0,class:!0,href:!0});var Ts=n(O);Fe=o(Ts,"SPAN",{});var qs=n(Fe);g(X.$$.fragment,qs),qs.forEach(t),Ts.forEach(t),Nt=f(ft),Te=o(ft,"SPAN",{});var Ps=n(Te);It=i(Ps,"AutoFeatureExtractor"),Ps.forEach(t),ft.forEach(t),Ke=f(e),fe=o(e,"P",{});var Cs=n(fe);Ot=i(Cs,"For audio and vision tasks, a feature extractor processes the audio signal or image into the correct input format."),Cs.forEach(t),Qe=f(e),D=o(e,"P",{});var ut=n(D);Dt=i(ut,"Load a feature extractor with "),ue=o(ut,"A",{href:!0});var zs=n(ue);Bt=i(zs,"AutoFeatureExtractor.from_pretrained()"),zs.forEach(t),Ht=i(ut,":"),ut.forEach(t),Xe=f(e),g(Y.$$.fragment,e),Ye=f(e),M=o(e,"H2",{class:!0});var ht=n(M);B=o(ht,"A",{id:!0,class:!0,href:!0});var Ms=n(B);qe=o(Ms,"SPAN",{});var Ls=n(qe);g(Z.$$.fragment,Ls),Ls.forEach(t),Ms.forEach(t),Rt=f(ht),Pe=o(ht,"SPAN",{});var Ss=n(Pe);Gt=i(Ss,"AutoProcessor"),Ss.forEach(t),ht.forEach(t),Ze=f(e),H=o(e,"P",{});var mt=n(H);Vt=i(mt,"Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the "),he=o(mt,"A",{href:!0});var Ns=n(he);Wt=i(Ns,"LayoutLMV2"),Ns.forEach(t),Jt=i(mt," model requires a feature extractor to handle images and a tokenizer to handle text; a processor combines both of them."),mt.forEach(t),et=f(e),R=o(e,"P",{});var dt=n(R);Ut=i(dt,"Load a processor with "),me=o(dt,"A",{href:!0});var Is=n(me);Kt=i(Is,"AutoProcessor.from_pretrained()"),Is.forEach(t),Qt=i(dt,":"),dt.forEach(t),tt=f(e),g(ee.$$.fragment,e),st=f(e),L=o(e,"H2",{class:!0});var vt=n(L);G=o(vt,"A",{id:!0,class:!0,href:!0});var Os=n(G);Ce=o(Os,"SPAN",{});var Ds=n(Ce);g(te.$$.fragment,Ds),Ds.forEach(t),Os.forEach(t),Xt=f(vt),ze=o(vt,"SPAN",{});var Bs=n(ze);Yt=i(Bs,"AutoModel"),Bs.forEach(t),vt.forEach(t),at=f(e),x=o(e,"P",{});var oe=n(x);Zt=i(oe,"Finally, the "),Me=o(oe,"CODE",{});var Hs=n(Me);es=i(Hs,"AutoModelFor"),Hs.forEach(t),ts=i(oe," classes let you load a pretrained model for a given task (see "),de=o(oe,"A",{href:!0});var Rs=n(de);ss=i(Rs,"here"),Rs.forEach(t),as=i(oe," for a complete list of available tasks). For example, load a model for sequence classification with "),Le=o(oe,"CODE",{});var Gs=n(Le);rs=i(Gs,"AutoModelForSequenceClassification.from_pretrained()"),Gs.forEach(t),oe.forEach(t),rt=f(e),g(se.$$.fragment,e),ot=f(e),ve=o(e,"P",{});var Vs=n(ve);os=i(Vs,"Easily reuse the same checkpoint to load an architecture for a different task:"),Vs.forEach(t),nt=f(e),g(ae.$$.fragment,e),lt=f(e),w=o(e,"P",{});var W=n(w);ns=i(W,"Generally, we recommend using the "),Se=o(W,"CODE",{});var Ws=n(Se);ls=i(Ws,"AutoTokenizer"),Ws.forEach(t),is=i(W," class and the "),Ne=o(W,"CODE",{});var Js=n(Ne);ps=i(Js,"AutoModelFor"),Js.forEach(t),cs=i(W," class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next "),ke=o(W,"A",{href:!0});var Us=n(ke);fs=i(Us,"tutorial"),Us.forEach(t),us=i(W,", learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning."),W.forEach(t),this.h()},h(){u(h,"name","hf:doc:metadata"),u(h,"content",JSON.stringify(aa)),u(d,"id","load-pretrained-instances-with-an-autoclass"),u(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(d,"href","#load-pretrained-instances-with-an-autoclass"),u(m,"class","relative group"),u(N,"id","autotokenizer"),u(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(N,"href","#autotokenizer"),u(C,"class","relative group"),u(pe,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer.from_pretrained"),u(O,"id","autofeatureextractor"),u(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(O,"href","#autofeatureextractor"),u(z,"class","relative group"),u(ue,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor.from_pretrained"),u(B,"id","autoprocessor"),u(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(B,"href","#autoprocessor"),u(M,"class","relative group"),u(he,"href","model_doc/layoutlmv2"),u(me,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoProcessor.from_pretrained"),u(G,"id","automodel"),u(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(G,"href","#automodel"),u(L,"class","relative group"),u(de,"href","model_doc/auto"),u(ke,"href","preprocessing")},m(e,a){s(document.head,h),p(e,F,a),p(e,m,a),s(m,d),s(d,T),y(v,T,null),s(m,J),s(m,q),s(q,P),p(e,k,a),p(e,b,a),s(b,ne),s(b,we),s(we,bt),s(b,_t),s(b,$e),s($e,wt),s(b,$t),p(e,Oe,a),y(S,e,a),p(e,De,a),p(e,le,a),s(le,gt),p(e,Be,a),p(e,_,a),s(_,ge),s(ge,yt),s(_,At),s(_,ye),s(ye,jt),s(_,Et),s(_,Ae),s(Ae,xt),s(_,Ft),s(_,je),s(je,Tt),p(e,He,a),p(e,C,a),s(C,N),s(N,Ee),y(U,Ee,null),s(C,qt),s(C,xe),s(xe,Pt),p(e,Re,a),p(e,ie,a),s(ie,Ct),p(e,Ge,a),p(e,I,a),s(I,zt),s(I,pe),s(pe,Mt),s(I,Lt),p(e,Ve,a),y(K,e,a),p(e,We,a),p(e,ce,a),s(ce,St),p(e,Je,a),y(Q,e,a),p(e,Ue,a),p(e,z,a),s(z,O),s(O,Fe),y(X,Fe,null),s(z,Nt),s(z,Te),s(Te,It),p(e,Ke,a),p(e,fe,a),s(fe,Ot),p(e,Qe,a),p(e,D,a),s(D,Dt),s(D,ue),s(ue,Bt),s(D,Ht),p(e,Xe,a),y(Y,e,a),p(e,Ye,a),p(e,M,a),s(M,B),s(B,qe),y(Z,qe,null),s(M,Rt),s(M,Pe),s(Pe,Gt),p(e,Ze,a),p(e,H,a),s(H,Vt),s(H,he),s(he,Wt),s(H,Jt),p(e,et,a),p(e,R,a),s(R,Ut),s(R,me),s(me,Kt),s(R,Qt),p(e,tt,a),y(ee,e,a),p(e,st,a),p(e,L,a),s(L,G),s(G,Ce),y(te,Ce,null),s(L,Xt),s(L,ze),s(ze,Yt),p(e,at,a),p(e,x,a),s(x,Zt),s(x,Me),s(Me,es),s(x,ts),s(x,de),s(de,ss),s(x,as),s(x,Le),s(Le,rs),p(e,rt,a),y(se,e,a),p(e,ot,a),p(e,ve,a),s(ve,os),p(e,nt,a),y(ae,e,a),p(e,lt,a),p(e,w,a),s(w,ns),s(w,Se),s(Se,ls),s(w,is),s(w,Ne),s(Ne,ps),s(w,cs),s(w,ke),s(ke,fs),s(w,us),it=!0},p(e,[a]){const re={};a&2&&(re.$$scope={dirty:a,ctx:e}),S.$set(re)},i(e){it||(A(v.$$.fragment,e),A(S.$$.fragment,e),A(U.$$.fragment,e),A(K.$$.fragment,e),A(Q.$$.fragment,e),A(X.$$.fragment,e),A(Y.$$.fragment,e),A(Z.$$.fragment,e),A(ee.$$.fragment,e),A(te.$$.fragment,e),A(se.$$.fragment,e),A(ae.$$.fragment,e),it=!0)},o(e){j(v.$$.fragment,e),j(S.$$.fragment,e),j(U.$$.fragment,e),j(K.$$.fragment,e),j(Q.$$.fragment,e),j(X.$$.fragment,e),j(Y.$$.fragment,e),j(Z.$$.fragment,e),j(ee.$$.fragment,e),j(te.$$.fragment,e),j(se.$$.fragment,e),j(ae.$$.fragment,e),it=!1},d(e){t(h),e&&t(F),e&&t(m),E(v),e&&t(k),e&&t(b),e&&t(Oe),E(S,e),e&&t(De),e&&t(le),e&&t(Be),e&&t(_),e&&t(He),e&&t(C),E(U),e&&t(Re),e&&t(ie),e&&t(Ge),e&&t(I),e&&t(Ve),E(K,e),e&&t(We),e&&t(ce),e&&t(Je),E(Q,e),e&&t(Ue),e&&t(z),E(X),e&&t(Ke),e&&t(fe),e&&t(Qe),e&&t(D),e&&t(Xe),E(Y,e),e&&t(Ye),e&&t(M),E(Z),e&&t(Ze),e&&t(H),e&&t(et),e&&t(R),e&&t(tt),E(ee,e),e&&t(st),e&&t(L),E(te),e&&t(at),e&&t(x),e&&t(rt),E(se,e),e&&t(ot),e&&t(ve),e&&t(nt),E(ae,e),e&&t(lt),e&&t(w)}}}const aa={local:"load-pretrained-instances-with-an-autoclass",sections:[{local:"autotokenizer",title:"AutoTokenizer"},{local:"autofeatureextractor",title:"AutoFeatureExtractor"},{local:"autoprocessor",title:"AutoProcessor"},{local:"automodel",title:"AutoModel"}],title:"Load pretrained instances with an AutoClass"};function ra(_e,h,F){let{fw:m}=h;return _e.$$set=d=>{"fw"in d&&F(0,m=d.fw)},[m]}class fa extends Qs{constructor(h){super();Xs(this,h,ra,sa,Ys,{fw:0})}}export{fa as default,aa as metadata};
256
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/multilingual.mdx-0feae7c7.js
import{S as Zr,i as ei,s as ti,e as l,k as d,w as u,t as s,M as ai,c as o,d as a,m as g,a as n,x as h,h as r,b as f,F as t,g as m,y as c,L as li,q as p,o as _,B as b}from"../chunks/vendor-4833417e.js";import{I as re}from"../chunks/IconCopyLink-4b81c553.js";import{C as L}from"../chunks/CodeBlock-6a3d1b46.js";import{D as oi}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function ni(jl){let j,ze,$,T,gt,ie,zl,ft,ql,ha,me,ca,z,Cl,ut,Dl,Pl,de,Ol,Xl,pa,D,N,ht,ge,Il,ct,Al,_a,qe,Sl,ba,P,B,pt,fe,Nl,_t,Bl,ka,Ce,Rl,va,k,De,bt,Fl,Hl,Gl,Pe,kt,Wl,Ul,Yl,Oe,vt,Jl,Kl,Ql,Xe,Et,Vl,Zl,eo,Ie,$t,to,ao,lo,Ae,wt,oo,no,so,Se,Mt,ro,io,Ea,w,mo,yt,go,fo,xt,uo,ho,Tt,co,po,$a,R,_o,Lt,bo,ko,wa,ue,Ma,F,vo,jt,Eo,$o,ya,he,xa,Ne,wo,Ta,ce,La,M,Mo,zt,yo,xo,qt,To,Lo,Ct,jo,zo,ja,pe,za,H,qo,Dt,Co,Do,qa,_e,Ca,q,Po,be,Oo,Xo,Pt,Io,Ao,Da,O,G,Ot,ke,So,Xt,No,Pa,Be,Bo,Oa,W,Re,It,Ro,Fo,Ho,Fe,At,Go,Wo,Xa,He,Uo,Ia,X,U,St,ve,Yo,Nt,Jo,Aa,Ge,Ko,Sa,Y,We,Bt,Qo,Vo,Zo,Ue,Rt,en,tn,Na,Ye,an,Ba,I,J,Ft,Ee,ln,Ht,on,Ra,Je,nn,Fa,K,Ke,Gt,sn,rn,mn,Qe,Wt,dn,gn,Ha,Ve,fn,Ga,A,Q,Ut,$e,un,Yt,hn,Wa,Ze,cn,Ua,V,et,Jt,pn,_n,bn,tt,Kt,kn,vn,Ya,Z,En,Qt,$n,wn,Ja,we,Ka,at,Mn,Qa,Me,Va,y,yn,Vt,xn,Tn,Zt,Ln,jn,ea,zn,qn,Za,ye,el,S,ee,ta,xe,Cn,aa,Dn,tl,lt,Pn,al,v,ot,la,On,Xn,In,nt,oa,An,Sn,Nn,st,na,Bn,Rn,Fn,rt,sa,Hn,Gn,Wn,ra,ia,Un,ll,te,Yn,ma,Jn,Kn,ol,Te,nl,it,Qn,sl,Le,rl,x,Vn,da,Zn,es,ga,ts,as,fa,ls,os,il,je,ml,ae,ns,ua,ss,rs,dl;return ie=new re({}),me=new oi({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/multilingual.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/multilingual.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/multilingual.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/multilingual.ipynb"}]}}),ge=new re({}),fe=new re({}),ue=new L({props:{code:`import torch from transformers import XLMTokenizer, XLMWithLMHeadModel tokenizer = XLMTokenizer.from_pretrained("xlm-clm-enfr-1024") model = XLMWithLMHeadModel.from_pretrained("xlm-clm-enfr-1024")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMWithLMHeadModel.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>)`}}),he=new L({props:{code:"print(tokenizer.lang2id)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.lang2id) {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-number">1</span>}`}}),ce=new L({props:{code:'input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor([tokenizer.encode(<span class="hljs-string">&quot;Wikipedia was used to&quot;</span>)]) <span class="hljs-comment"># batch size of 1</span>'}}),pe=new L({props:{code:`language_id = tokenizer.lang2id["en"] # 0 langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) # We reshape it to be of size (batch_size, sequence_length) langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>language_id = tokenizer.lang2id[<span class="hljs-string">&quot;en&quot;</span>] <span class="hljs-comment"># 0</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = torch.tensor([language_id] * input_ids.shape[<span class="hljs-number">1</span>]) <span class="hljs-comment"># torch.tensor([0, 0, 0, ..., 0])</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># We reshape it to be of size (batch_size, sequence_length)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = langs.view(<span class="hljs-number">1</span>, -<span class="hljs-number">1</span>) <span class="hljs-comment"># is now of shape [1, sequence_length] (we have a batch size of 1)</span>`}}),_e=new L({props:{code:"outputs = model(input_ids, langs=langs)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, langs=langs)'}}),ke=new re({}),ve=new re({}),Ee=new re({}),$e=new re({}),we=new L({props:{code:`from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." chinese_text = "\u4E0D\u8981\u63D2\u624B\u5DEB\u5E2B\u7684\u4E8B\u52D9, \u56E0\u70BA\u4ED6\u5011\u662F\u5FAE\u5999\u7684, \u5F88\u5FEB\u5C31\u6703\u767C\u6012." tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100ForConditionalGeneration, M2M100Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>chinese_text = <span class="hljs-string">&quot;\u4E0D\u8981\u63D2\u624B\u5DEB\u5E2B\u7684\u4E8B\u52D9, \u56E0\u70BA\u4ED6\u5011\u662F\u5FAE\u5999\u7684, \u5F88\u5FEB\u5C31\u6703\u767C\u6012.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>, src_lang=<span class="hljs-string">&quot;zh&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>)`}}),Me=new L({props:{code:'encoded_zh = tokenizer(chinese_text, return_tensors="pt")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_zh = tokenizer(chinese_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)'}}),ye=new L({props:{code:`generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id(<span class="hljs-string">&quot;en&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&#x27;Do not interfere with the matters of the witches, because they are delicate and will soon be angry.&#x27;</span>`}}),xe=new re({}),Te=new L({props:{code:`from transformers import AutoTokenizer, AutoModelForSeq2SeqLM en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." fi_text = "\xC4l\xE4 sekaannu velhojen asioihin, sill\xE4 ne ovat hienovaraisia ja nopeasti vihaisia." tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>fi_text = <span class="hljs-string">&quot;\xC4l\xE4 sekaannu velhojen asioihin, sill\xE4 ne ovat hienovaraisia ja nopeasti vihaisia.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>, src_lang=<span class="hljs-string">&quot;fi_FI&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>)`}}),Le=new L({props:{code:'encoded_en = tokenizer(en_text, return_tensors="pt")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_en = tokenizer(en_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)'}}),je=new L({props:{code:`generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id(<span class="hljs-string">&quot;en_XX&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Don&#x27;t interfere with the wizard&#x27;s affairs, because they are subtle, will soon get angry.&quot;</span>`}}),{c(){j=l("meta"),ze=d(),$=l("h1"),T=l("a"),gt=l("span"),u(ie.$$.fragment),zl=d(),ft=l("span"),ql=s("Multilingual models for inference"),ha=d(),u(me.$$.fragment),ca=d(),z=l("p"),Cl=s("There are several multilingual models in \u{1F917} Transformers, and their inference usage differs from monolingual models. Not "),ut=l("em"),Dl=s("all"),Pl=s(" multilingual model usage is different though. Some models, like "),de=l("a"),Ol=s("bert-base-multilingual-uncased"),Xl=s(", can be used just like a monolingual model. This guide will show you how to use multilingual models whose usage differs for inference."),pa=d(),D=l("h2"),N=l("a"),ht=l("span"),u(ge.$$.fragment),Il=d(),ct=l("span"),Al=s("XLM"),_a=d(),qe=l("p"),Sl=s("XLM has ten different checkpoints, only one of which is monolingual. The nine remaining model checkpoints can be split into two categories: the checkpoints that use language embeddings and those that don\u2019t."),ba=d(),P=l("h3"),B=l("a"),pt=l("span"),u(fe.$$.fragment),Nl=d(),_t=l("span"),Bl=s("XLM with language embeddings"),ka=d(),Ce=l("p"),Rl=s("The following XLM models use language embeddings to specify the language used at inference:"),va=d(),k=l("ul"),De=l("li"),bt=l("code"),Fl=s("xlm-mlm-ende-1024"),Hl=s(" (Masked language modeling, English-German)"),Gl=d(),Pe=l("li"),kt=l("code"),Wl=s("xlm-mlm-enfr-1024"),Ul=s(" (Masked language modeling, English-French)"),Yl=d(),Oe=l("li"),vt=l("code"),Jl=s("xlm-mlm-enro-1024"),Kl=s(" (Masked language modeling, English-Romanian)"),Ql=d(),Xe=l("li"),Et=l("code"),Vl=s("xlm-mlm-xnli15-1024"),Zl=s(" (Masked language modeling, XNLI languages)"),eo=d(),Ie=l("li"),$t=l("code"),to=s("xlm-mlm-tlm-xnli15-1024"),ao=s(" (Masked language modeling + translation, XNLI languages)"),lo=d(),Ae=l("li"),wt=l("code"),oo=s("xlm-clm-enfr-1024"),no=s(" (Causal language modeling, English-French)"),so=d(),Se=l("li"),Mt=l("code"),ro=s("xlm-clm-ende-1024"),io=s(" (Causal language modeling, English-German)"),Ea=d(),w=l("p"),mo=s("Language embeddings are represented as a tensor of the same shape as the "),yt=l("code"),go=s("input_ids"),fo=s(" passed to the model. The values in these tensors depend on the language used and are identified by the tokenizer\u2019s "),xt=l("code"),uo=s("lang2id"),ho=s(" and "),Tt=l("code"),co=s("id2lang"),po=s(" attributes."),$a=d(),R=l("p"),_o=s("In this example, load the "),Lt=l("code"),bo=s("xlm-clm-enfr-1024"),ko=s(" checkpoint (Causal language modeling, English-French):"),wa=d(),u(ue.$$.fragment),Ma=d(),F=l("p"),vo=s("The "),jt=l("code"),Eo=s("lang2id"),$o=s(" attribute of the tokenizer displays this model\u2019s languages and their ids:"),ya=d(),u(he.$$.fragment),xa=d(),Ne=l("p"),wo=s("Next, create an example input:"),Ta=d(),u(ce.$$.fragment),La=d(),M=l("p"),Mo=s("Set the language id as "),zt=l("code"),yo=s('"en"'),xo=s(" and use it to define the language embedding. The language embedding is a tensor filled with "),qt=l("code"),To=s("0"),Lo=s(" since that is the language id for English. This tensor should be the same size as "),Ct=l("code"),jo=s("input_ids"),zo=s("."),ja=d(),u(pe.$$.fragment),za=d(),H=l("p"),qo=s("Now you can pass the "),Dt=l("code"),Co=s("input_ids"),Do=s(" and language embedding to the model:"),qa=d(),u(_e.$$.fragment),Ca=d(),q=l("p"),Po=s("The "),be=l("a"),Oo=s("run_generation.py"),Xo=s(" script can generate text with language embeddings using the "),Pt=l("code"),Io=s("xlm-clm"),Ao=s(" checkpoints."),Da=d(),O=l("h3"),G=l("a"),Ot=l("span"),u(ke.$$.fragment),So=d(),Xt=l("span"),No=s("XLM without language embeddings"),Pa=d(),Be=l("p"),Bo=s("The following XLM models do not require language embeddings during inference:"),Oa=d(),W=l("ul"),Re=l("li"),It=l("code"),Ro=s("xlm-mlm-17-1280"),Fo=s(" (Masked language modeling, 17 languages)"),Ho=d(),Fe=l("li"),At=l("code"),Go=s("xlm-mlm-100-1280"),Wo=s(" (Masked language modeling, 100 languages)"),Xa=d(),He=l("p"),Uo=s("These models are used for generic sentence representations, unlike the previous XLM checkpoints."),Ia=d(),X=l("h2"),U=l("a"),St=l("span"),u(ve.$$.fragment),Yo=d(),Nt=l("span"),Jo=s("BERT"),Aa=d(),Ge=l("p"),Ko=s("The following BERT models can be used for multilingual tasks:"),Sa=d(),Y=l("ul"),We=l("li"),Bt=l("code"),Qo=s("bert-base-multilingual-uncased"),Vo=s(" (Masked language modeling + Next sentence prediction, 102 languages)"),Zo=d(),Ue=l("li"),Rt=l("code"),en=s("bert-base-multilingual-cased"),tn=s(" (Masked language modeling + Next sentence prediction, 104 languages)"),Na=d(),Ye=l("p"),an=s(`These models do not require language embeddings during inference. They should identify the language from the context and infer accordingly.`),Ba=d(),I=l("h2"),J=l("a"),Ft=l("span"),u(Ee.$$.fragment),ln=d(),Ht=l("span"),on=s("XLM-RoBERTa"),Ra=d(),Je=l("p"),nn=s("The following XLM-RoBERTa models can be used for multilingual tasks:"),Fa=d(),K=l("ul"),Ke=l("li"),Gt=l("code"),sn=s("xlm-roberta-base"),rn=s(" (Masked language modeling, 100 languages)"),mn=d(),Qe=l("li"),Wt=l("code"),dn=s("xlm-roberta-large"),gn=s(" (Masked language modeling, 100 languages)"),Ha=d(),Ve=l("p"),fn=s("XLM-RoBERTa was trained on 2.5TB of newly created and cleaned CommonCrawl data in 100 languages. It provides strong gains over previously released multilingual models like mBERT or XLM on downstream tasks like classification, sequence labeling, and question answering."),Ga=d(),A=l("h2"),Q=l("a"),Ut=l("span"),u($e.$$.fragment),un=d(),Yt=l("span"),hn=s("M2M100"),Wa=d(),Ze=l("p"),cn=s("The following M2M100 models can be used for multilingual translation:"),Ua=d(),V=l("ul"),et=l("li"),Jt=l("code"),pn=s("facebook/m2m100_418M"),_n=s(" (Translation)"),bn=d(),tt=l("li"),Kt=l("code"),kn=s("facebook/m2m100_1.2B"),vn=s(" (Translation)"),Ya=d(),Z=l("p"),En=s("In this example, load the "),Qt=l("code"),$n=s("facebook/m2m100_418M"),wn=s(" checkpoint to translate from Chinese to English. You can set the source language in the tokenizer:"),Ja=d(),u(we.$$.fragment),Ka=d(),at=l("p"),Mn=s("Tokenize the text:"),Qa=d(),u(Me.$$.fragment),Va=d(),y=l("p"),yn=s("M2M100 forces the target language id as the first generated token to translate to the target language. Set the "),Vt=l("code"),xn=s("forced_bos_token_id"),Tn=s(" to "),Zt=l("code"),Ln=s("en"),jn=s(" in the "),ea=l("code"),zn=s("generate"),qn=s(" method to translate to English:"),Za=d(),u(ye.$$.fragment),el=d(),S=l("h2"),ee=l("a"),ta=l("span"),u(xe.$$.fragment),Cn=d(),aa=l("span"),Dn=s("MBart"),tl=d(),lt=l("p"),Pn=s("The following MBart models can be used for multilingual translation:"),al=d(),v=l("ul"),ot=l("li"),la=l("code"),On=s("facebook/mbart-large-50-one-to-many-mmt"),Xn=s(" (One-to-many multilingual machine translation, 50 languages)"),In=d(),nt=l("li"),oa=l("code"),An=s("facebook/mbart-large-50-many-to-many-mmt"),Sn=s(" (Many-to-many multilingual machine translation, 50 languages)"),Nn=d(),st=l("li"),na=l("code"),Bn=s("facebook/mbart-large-50-many-to-one-mmt"),Rn=s(" (Many-to-one multilingual machine translation, 50 languages)"),Fn=d(),rt=l("li"),sa=l("code"),Hn=s("facebook/mbart-large-50"),Gn=s(" (Multilingual translation, 50 languages)"),Wn=d(),ra=l("li"),ia=l("code"),Un=s("facebook/mbart-large-cc25"),ll=d(),te=l("p"),Yn=s("In this example, load the "),ma=l("code"),Jn=s("facebook/mbart-large-50-many-to-many-mmt"),Kn=s(" checkpoint to translate Finnish to English. You can set the source language in the tokenizer:"),ol=d(),u(Te.$$.fragment),nl=d(),it=l("p"),Qn=s("Tokenize the text:"),sl=d(),u(Le.$$.fragment),rl=d(),x=l("p"),Vn=s("MBart forces the target language id as the first generated token to translate to the target language. Set the "),da=l("code"),Zn=s("forced_bos_token_id"),es=s(" to "),ga=l("code"),ts=s("en"),as=s(" in the "),fa=l("code"),ls=s("generate"),os=s(" method to translate to English:"),il=d(),u(je.$$.fragment),ml=d(),ae=l("p"),ns=s("If you are using the "),ua=l("code"),ss=s("facebook/mbart-large-50-many-to-one-mmt"),rs=s(" checkpoint, you don\u2019t need to force the target language id as the first generated token otherwise the usage is the same."),this.h()},l(e){const i=ai('[data-svelte="svelte-1phssyn"]',document.head);j=o(i,"META",{name:!0,content:!0}),i.forEach(a),ze=g(e),$=o(e,"H1",{class:!0});var gl=n($);T=o(gl,"A",{id:!0,class:!0,href:!0});var Ts=n(T);gt=o(Ts,"SPAN",{});var Ls=n(gt);h(ie.$$.fragment,Ls),Ls.forEach(a),Ts.forEach(a),zl=g(gl),ft=o(gl,"SPAN",{});var js=n(ft);ql=r(js,"Multilingual models for inference"),js.forEach(a),gl.forEach(a),ha=g(e),h(me.$$.fragment,e),ca=g(e),z=o(e,"P",{});var mt=n(z);Cl=r(mt,"There are several multilingual models in \u{1F917} Transformers, and their inference usage differs from monolingual models. Not "),ut=o(mt,"EM",{});var zs=n(ut);Dl=r(zs,"all"),zs.forEach(a),Pl=r(mt," multilingual model usage is different though. Some models, like "),de=o(mt,"A",{href:!0,rel:!0});var qs=n(de);Ol=r(qs,"bert-base-multilingual-uncased"),qs.forEach(a),Xl=r(mt,", can be used just like a monolingual model. This guide will show you how to use multilingual models whose usage differs for inference."),mt.forEach(a),pa=g(e),D=o(e,"H2",{class:!0});var fl=n(D);N=o(fl,"A",{id:!0,class:!0,href:!0});var Cs=n(N);ht=o(Cs,"SPAN",{});var Ds=n(ht);h(ge.$$.fragment,Ds),Ds.forEach(a),Cs.forEach(a),Il=g(fl),ct=o(fl,"SPAN",{});var Ps=n(ct);Al=r(Ps,"XLM"),Ps.forEach(a),fl.forEach(a),_a=g(e),qe=o(e,"P",{});var Os=n(qe);Sl=r(Os,"XLM has ten different checkpoints, only one of which is monolingual. The nine remaining model checkpoints can be split into two categories: the checkpoints that use language embeddings and those that don\u2019t."),Os.forEach(a),ba=g(e),P=o(e,"H3",{class:!0});var ul=n(P);B=o(ul,"A",{id:!0,class:!0,href:!0});var Xs=n(B);pt=o(Xs,"SPAN",{});var Is=n(pt);h(fe.$$.fragment,Is),Is.forEach(a),Xs.forEach(a),Nl=g(ul),_t=o(ul,"SPAN",{});var As=n(_t);Bl=r(As,"XLM with language embeddings"),As.forEach(a),ul.forEach(a),ka=g(e),Ce=o(e,"P",{});var Ss=n(Ce);Rl=r(Ss,"The following XLM models use language embeddings to specify the language used at inference:"),Ss.forEach(a),va=g(e),k=o(e,"UL",{});var E=n(k);De=o(E,"LI",{});var is=n(De);bt=o(is,"CODE",{});var Ns=n(bt);Fl=r(Ns,"xlm-mlm-ende-1024"),Ns.forEach(a),Hl=r(is," (Masked language modeling, English-German)"),is.forEach(a),Gl=g(E),Pe=o(E,"LI",{});var ms=n(Pe);kt=o(ms,"CODE",{});var Bs=n(kt);Wl=r(Bs,"xlm-mlm-enfr-1024"),Bs.forEach(a),Ul=r(ms," (Masked language modeling, English-French)"),ms.forEach(a),Yl=g(E),Oe=o(E,"LI",{});var ds=n(Oe);vt=o(ds,"CODE",{});var Rs=n(vt);Jl=r(Rs,"xlm-mlm-enro-1024"),Rs.forEach(a),Kl=r(ds," (Masked language modeling, English-Romanian)"),ds.forEach(a),Ql=g(E),Xe=o(E,"LI",{});var gs=n(Xe);Et=o(gs,"CODE",{});var Fs=n(Et);Vl=r(Fs,"xlm-mlm-xnli15-1024"),Fs.forEach(a),Zl=r(gs," (Masked language modeling, XNLI languages)"),gs.forEach(a),eo=g(E),Ie=o(E,"LI",{});var fs=n(Ie);$t=o(fs,"CODE",{});var Hs=n($t);to=r(Hs,"xlm-mlm-tlm-xnli15-1024"),Hs.forEach(a),ao=r(fs," (Masked language modeling + translation, XNLI languages)"),fs.forEach(a),lo=g(E),Ae=o(E,"LI",{});var us=n(Ae);wt=o(us,"CODE",{});var Gs=n(wt);oo=r(Gs,"xlm-clm-enfr-1024"),Gs.forEach(a),no=r(us," (Causal language modeling, English-French)"),us.forEach(a),so=g(E),Se=o(E,"LI",{});var hs=n(Se);Mt=o(hs,"CODE",{});var Ws=n(Mt);ro=r(Ws,"xlm-clm-ende-1024"),Ws.forEach(a),io=r(hs," (Causal language modeling, English-German)"),hs.forEach(a),E.forEach(a),Ea=g(e),w=o(e,"P",{});var le=n(w);mo=r(le,"Language embeddings are represented as a tensor of the same shape as the "),yt=o(le,"CODE",{});var Us=n(yt);go=r(Us,"input_ids"),Us.forEach(a),fo=r(le," passed to the model. The values in these tensors depend on the language used and are identified by the tokenizer\u2019s "),xt=o(le,"CODE",{});var Ys=n(xt);uo=r(Ys,"lang2id"),Ys.forEach(a),ho=r(le," and "),Tt=o(le,"CODE",{});var Js=n(Tt);co=r(Js,"id2lang"),Js.forEach(a),po=r(le," attributes."),le.forEach(a),$a=g(e),R=o(e,"P",{});var hl=n(R);_o=r(hl,"In this example, load the "),Lt=o(hl,"CODE",{});var Ks=n(Lt);bo=r(Ks,"xlm-clm-enfr-1024"),Ks.forEach(a),ko=r(hl," checkpoint (Causal language modeling, English-French):"),hl.forEach(a),wa=g(e),h(ue.$$.fragment,e),Ma=g(e),F=o(e,"P",{});var cl=n(F);vo=r(cl,"The "),jt=o(cl,"CODE",{});var Qs=n(jt);Eo=r(Qs,"lang2id"),Qs.forEach(a),$o=r(cl," attribute of the tokenizer displays this model\u2019s languages and their ids:"),cl.forEach(a),ya=g(e),h(he.$$.fragment,e),xa=g(e),Ne=o(e,"P",{});var Vs=n(Ne);wo=r(Vs,"Next, create an example input:"),Vs.forEach(a),Ta=g(e),h(ce.$$.fragment,e),La=g(e),M=o(e,"P",{});var oe=n(M);Mo=r(oe,"Set the language id as "),zt=o(oe,"CODE",{});var Zs=n(zt);yo=r(Zs,'"en"'),Zs.forEach(a),xo=r(oe," and use it to define the language embedding. The language embedding is a tensor filled with "),qt=o(oe,"CODE",{});var er=n(qt);To=r(er,"0"),er.forEach(a),Lo=r(oe," since that is the language id for English. This tensor should be the same size as "),Ct=o(oe,"CODE",{});var tr=n(Ct);jo=r(tr,"input_ids"),tr.forEach(a),zo=r(oe,"."),oe.forEach(a),ja=g(e),h(pe.$$.fragment,e),za=g(e),H=o(e,"P",{});var pl=n(H);qo=r(pl,"Now you can pass the "),Dt=o(pl,"CODE",{});var ar=n(Dt);Co=r(ar,"input_ids"),ar.forEach(a),Do=r(pl," and language embedding to the model:"),pl.forEach(a),qa=g(e),h(_e.$$.fragment,e),Ca=g(e),q=o(e,"P",{});var dt=n(q);Po=r(dt,"The "),be=o(dt,"A",{href:!0,rel:!0});var lr=n(be);Oo=r(lr,"run_generation.py"),lr.forEach(a),Xo=r(dt," script can generate text with language embeddings using the "),Pt=o(dt,"CODE",{});var or=n(Pt);Io=r(or,"xlm-clm"),or.forEach(a),Ao=r(dt," checkpoints."),dt.forEach(a),Da=g(e),O=o(e,"H3",{class:!0});var _l=n(O);G=o(_l,"A",{id:!0,class:!0,href:!0});var nr=n(G);Ot=o(nr,"SPAN",{});var sr=n(Ot);h(ke.$$.fragment,sr),sr.forEach(a),nr.forEach(a),So=g(_l),Xt=o(_l,"SPAN",{});var rr=n(Xt);No=r(rr,"XLM without language embeddings"),rr.forEach(a),_l.forEach(a),Pa=g(e),Be=o(e,"P",{});var ir=n(Be);Bo=r(ir,"The following XLM models do not require language embeddings during inference:"),ir.forEach(a),Oa=g(e),W=o(e,"UL",{});var bl=n(W);Re=o(bl,"LI",{});var cs=n(Re);It=o(cs,"CODE",{});var mr=n(It);Ro=r(mr,"xlm-mlm-17-1280"),mr.forEach(a),Fo=r(cs," (Masked language modeling, 17 languages)"),cs.forEach(a),Ho=g(bl),Fe=o(bl,"LI",{});var ps=n(Fe);At=o(ps,"CODE",{});var dr=n(At);Go=r(dr,"xlm-mlm-100-1280"),dr.forEach(a),Wo=r(ps," (Masked language modeling, 100 languages)"),ps.forEach(a),bl.forEach(a),Xa=g(e),He=o(e,"P",{});var gr=n(He);Uo=r(gr,"These models are used for generic sentence representations, unlike the previous XLM checkpoints."),gr.forEach(a),Ia=g(e),X=o(e,"H2",{class:!0});var kl=n(X);U=o(kl,"A",{id:!0,class:!0,href:!0});var fr=n(U);St=o(fr,"SPAN",{});var ur=n(St);h(ve.$$.fragment,ur),ur.forEach(a),fr.forEach(a),Yo=g(kl),Nt=o(kl,"SPAN",{});var hr=n(Nt);Jo=r(hr,"BERT"),hr.forEach(a),kl.forEach(a),Aa=g(e),Ge=o(e,"P",{});var cr=n(Ge);Ko=r(cr,"The following BERT models can be used for multilingual tasks:"),cr.forEach(a),Sa=g(e),Y=o(e,"UL",{});var vl=n(Y);We=o(vl,"LI",{});var _s=n(We);Bt=o(_s,"CODE",{});var pr=n(Bt);Qo=r(pr,"bert-base-multilingual-uncased"),pr.forEach(a),Vo=r(_s," (Masked language modeling + Next sentence prediction, 102 languages)"),_s.forEach(a),Zo=g(vl),Ue=o(vl,"LI",{});var bs=n(Ue);Rt=o(bs,"CODE",{});var _r=n(Rt);en=r(_r,"bert-base-multilingual-cased"),_r.forEach(a),tn=r(bs," (Masked language modeling + Next sentence prediction, 104 languages)"),bs.forEach(a),vl.forEach(a),Na=g(e),Ye=o(e,"P",{});var br=n(Ye);an=r(br,`These models do not require language embeddings during inference. They should identify the language from the context and infer accordingly.`),br.forEach(a),Ba=g(e),I=o(e,"H2",{class:!0});var El=n(I);J=o(El,"A",{id:!0,class:!0,href:!0});var kr=n(J);Ft=o(kr,"SPAN",{});var vr=n(Ft);h(Ee.$$.fragment,vr),vr.forEach(a),kr.forEach(a),ln=g(El),Ht=o(El,"SPAN",{});var Er=n(Ht);on=r(Er,"XLM-RoBERTa"),Er.forEach(a),El.forEach(a),Ra=g(e),Je=o(e,"P",{});var $r=n(Je);nn=r($r,"The following XLM-RoBERTa models can be used for multilingual tasks:"),$r.forEach(a),Fa=g(e),K=o(e,"UL",{});var $l=n(K);Ke=o($l,"LI",{});var ks=n(Ke);Gt=o(ks,"CODE",{});var wr=n(Gt);sn=r(wr,"xlm-roberta-base"),wr.forEach(a),rn=r(ks," (Masked language modeling, 100 languages)"),ks.forEach(a),mn=g($l),Qe=o($l,"LI",{});var vs=n(Qe);Wt=o(vs,"CODE",{});var Mr=n(Wt);dn=r(Mr,"xlm-roberta-large"),Mr.forEach(a),gn=r(vs," (Masked language modeling, 100 languages)"),vs.forEach(a),$l.forEach(a),Ha=g(e),Ve=o(e,"P",{});var yr=n(Ve);fn=r(yr,"XLM-RoBERTa was trained on 2.5TB of newly created and cleaned CommonCrawl data in 100 languages. It provides strong gains over previously released multilingual models like mBERT or XLM on downstream tasks like classification, sequence labeling, and question answering."),yr.forEach(a),Ga=g(e),A=o(e,"H2",{class:!0});var wl=n(A);Q=o(wl,"A",{id:!0,class:!0,href:!0});var xr=n(Q);Ut=o(xr,"SPAN",{});var Tr=n(Ut);h($e.$$.fragment,Tr),Tr.forEach(a),xr.forEach(a),un=g(wl),Yt=o(wl,"SPAN",{});var Lr=n(Yt);hn=r(Lr,"M2M100"),Lr.forEach(a),wl.forEach(a),Wa=g(e),Ze=o(e,"P",{});var jr=n(Ze);cn=r(jr,"The following M2M100 models can be used for multilingual translation:"),jr.forEach(a),Ua=g(e),V=o(e,"UL",{});var Ml=n(V);et=o(Ml,"LI",{});var Es=n(et);Jt=o(Es,"CODE",{});var zr=n(Jt);pn=r(zr,"facebook/m2m100_418M"),zr.forEach(a),_n=r(Es," (Translation)"),Es.forEach(a),bn=g(Ml),tt=o(Ml,"LI",{});var $s=n(tt);Kt=o($s,"CODE",{});var qr=n(Kt);kn=r(qr,"facebook/m2m100_1.2B"),qr.forEach(a),vn=r($s," (Translation)"),$s.forEach(a),Ml.forEach(a),Ya=g(e),Z=o(e,"P",{});var yl=n(Z);En=r(yl,"In this example, load the "),Qt=o(yl,"CODE",{});var Cr=n(Qt);$n=r(Cr,"facebook/m2m100_418M"),Cr.forEach(a),wn=r(yl," checkpoint to translate from Chinese to English. You can set the source language in the tokenizer:"),yl.forEach(a),Ja=g(e),h(we.$$.fragment,e),Ka=g(e),at=o(e,"P",{});var Dr=n(at);Mn=r(Dr,"Tokenize the text:"),Dr.forEach(a),Qa=g(e),h(Me.$$.fragment,e),Va=g(e),y=o(e,"P",{});var ne=n(y);yn=r(ne,"M2M100 forces the target language id as the first generated token to translate to the target language. Set the "),Vt=o(ne,"CODE",{});var Pr=n(Vt);xn=r(Pr,"forced_bos_token_id"),Pr.forEach(a),Tn=r(ne," to "),Zt=o(ne,"CODE",{});var Or=n(Zt);Ln=r(Or,"en"),Or.forEach(a),jn=r(ne," in the "),ea=o(ne,"CODE",{});var Xr=n(ea);zn=r(Xr,"generate"),Xr.forEach(a),qn=r(ne," method to translate to English:"),ne.forEach(a),Za=g(e),h(ye.$$.fragment,e),el=g(e),S=o(e,"H2",{class:!0});var xl=n(S);ee=o(xl,"A",{id:!0,class:!0,href:!0});var Ir=n(ee);ta=o(Ir,"SPAN",{});var Ar=n(ta);h(xe.$$.fragment,Ar),Ar.forEach(a),Ir.forEach(a),Cn=g(xl),aa=o(xl,"SPAN",{});var Sr=n(aa);Dn=r(Sr,"MBart"),Sr.forEach(a),xl.forEach(a),tl=g(e),lt=o(e,"P",{});var Nr=n(lt);Pn=r(Nr,"The following MBart models can be used for multilingual translation:"),Nr.forEach(a),al=g(e),v=o(e,"UL",{});var C=n(v);ot=o(C,"LI",{});var ws=n(ot);la=o(ws,"CODE",{});var Br=n(la);On=r(Br,"facebook/mbart-large-50-one-to-many-mmt"),Br.forEach(a),Xn=r(ws," (One-to-many multilingual machine translation, 50 languages)"),ws.forEach(a),In=g(C),nt=o(C,"LI",{});var Ms=n(nt);oa=o(Ms,"CODE",{});var Rr=n(oa);An=r(Rr,"facebook/mbart-large-50-many-to-many-mmt"),Rr.forEach(a),Sn=r(Ms," (Many-to-many multilingual machine translation, 50 languages)"),Ms.forEach(a),Nn=g(C),st=o(C,"LI",{});var ys=n(st);na=o(ys,"CODE",{});var Fr=n(na);Bn=r(Fr,"facebook/mbart-large-50-many-to-one-mmt"),Fr.forEach(a),Rn=r(ys," (Many-to-one multilingual machine translation, 50 languages)"),ys.forEach(a),Fn=g(C),rt=o(C,"LI",{});var xs=n(rt);sa=o(xs,"CODE",{});var Hr=n(sa);Hn=r(Hr,"facebook/mbart-large-50"),Hr.forEach(a),Gn=r(xs," (Multilingual translation, 50 languages)"),xs.forEach(a),Wn=g(C),ra=o(C,"LI",{});var Gr=n(ra);ia=o(Gr,"CODE",{});var Wr=n(ia);Un=r(Wr,"facebook/mbart-large-cc25"),Wr.forEach(a),Gr.forEach(a),C.forEach(a),ll=g(e),te=o(e,"P",{});var Tl=n(te);Yn=r(Tl,"In this example, load the "),ma=o(Tl,"CODE",{});var Ur=n(ma);Jn=r(Ur,"facebook/mbart-large-50-many-to-many-mmt"),Ur.forEach(a),Kn=r(Tl," checkpoint to translate Finnish to English. You can set the source language in the tokenizer:"),Tl.forEach(a),ol=g(e),h(Te.$$.fragment,e),nl=g(e),it=o(e,"P",{});var Yr=n(it);Qn=r(Yr,"Tokenize the text:"),Yr.forEach(a),sl=g(e),h(Le.$$.fragment,e),rl=g(e),x=o(e,"P",{});var se=n(x);Vn=r(se,"MBart forces the target language id as the first generated token to translate to the target language. Set the "),da=o(se,"CODE",{});var Jr=n(da);Zn=r(Jr,"forced_bos_token_id"),Jr.forEach(a),es=r(se," to "),ga=o(se,"CODE",{});var Kr=n(ga);ts=r(Kr,"en"),Kr.forEach(a),as=r(se," in the "),fa=o(se,"CODE",{});var Qr=n(fa);ls=r(Qr,"generate"),Qr.forEach(a),os=r(se," method to translate to English:"),se.forEach(a),il=g(e),h(je.$$.fragment,e),ml=g(e),ae=o(e,"P",{});var Ll=n(ae);ns=r(Ll,"If you are using the "),ua=o(Ll,"CODE",{});var Vr=n(ua);ss=r(Vr,"facebook/mbart-large-50-many-to-one-mmt"),Vr.forEach(a),rs=r(Ll," checkpoint, you don\u2019t need to force the target language id as the first generated token otherwise the usage is the same."),Ll.forEach(a),this.h()},h(){f(j,"name","hf:doc:metadata"),f(j,"content",JSON.stringify(si)),f(T,"id","multilingual-models-for-inference"),f(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(T,"href","#multilingual-models-for-inference"),f($,"class","relative group"),f(de,"href","https://huggingface.co/bert-base-multilingual-uncased"),f(de,"rel","nofollow"),f(N,"id","xlm"),f(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(N,"href","#xlm"),f(D,"class","relative group"),f(B,"id","xlm-with-language-embeddings"),f(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(B,"href","#xlm-with-language-embeddings"),f(P,"class","relative group"),f(be,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-generation/run_generation.py"),f(be,"rel","nofollow"),f(G,"id","xlm-without-language-embeddings"),f(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(G,"href","#xlm-without-language-embeddings"),f(O,"class","relative group"),f(U,"id","bert"),f(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(U,"href","#bert"),f(X,"class","relative group"),f(J,"id","xlmroberta"),f(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(J,"href","#xlmroberta"),f(I,"class","relative group"),f(Q,"id","m2m100"),f(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Q,"href","#m2m100"),f(A,"class","relative group"),f(ee,"id","mbart"),f(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ee,"href","#mbart"),f(S,"class","relative group")},m(e,i){t(document.head,j),m(e,ze,i),m(e,$,i),t($,T),t(T,gt),c(ie,gt,null),t($,zl),t($,ft),t(ft,ql),m(e,ha,i),c(me,e,i),m(e,ca,i),m(e,z,i),t(z,Cl),t(z,ut),t(ut,Dl),t(z,Pl),t(z,de),t(de,Ol),t(z,Xl),m(e,pa,i),m(e,D,i),t(D,N),t(N,ht),c(ge,ht,null),t(D,Il),t(D,ct),t(ct,Al),m(e,_a,i),m(e,qe,i),t(qe,Sl),m(e,ba,i),m(e,P,i),t(P,B),t(B,pt),c(fe,pt,null),t(P,Nl),t(P,_t),t(_t,Bl),m(e,ka,i),m(e,Ce,i),t(Ce,Rl),m(e,va,i),m(e,k,i),t(k,De),t(De,bt),t(bt,Fl),t(De,Hl),t(k,Gl),t(k,Pe),t(Pe,kt),t(kt,Wl),t(Pe,Ul),t(k,Yl),t(k,Oe),t(Oe,vt),t(vt,Jl),t(Oe,Kl),t(k,Ql),t(k,Xe),t(Xe,Et),t(Et,Vl),t(Xe,Zl),t(k,eo),t(k,Ie),t(Ie,$t),t($t,to),t(Ie,ao),t(k,lo),t(k,Ae),t(Ae,wt),t(wt,oo),t(Ae,no),t(k,so),t(k,Se),t(Se,Mt),t(Mt,ro),t(Se,io),m(e,Ea,i),m(e,w,i),t(w,mo),t(w,yt),t(yt,go),t(w,fo),t(w,xt),t(xt,uo),t(w,ho),t(w,Tt),t(Tt,co),t(w,po),m(e,$a,i),m(e,R,i),t(R,_o),t(R,Lt),t(Lt,bo),t(R,ko),m(e,wa,i),c(ue,e,i),m(e,Ma,i),m(e,F,i),t(F,vo),t(F,jt),t(jt,Eo),t(F,$o),m(e,ya,i),c(he,e,i),m(e,xa,i),m(e,Ne,i),t(Ne,wo),m(e,Ta,i),c(ce,e,i),m(e,La,i),m(e,M,i),t(M,Mo),t(M,zt),t(zt,yo),t(M,xo),t(M,qt),t(qt,To),t(M,Lo),t(M,Ct),t(Ct,jo),t(M,zo),m(e,ja,i),c(pe,e,i),m(e,za,i),m(e,H,i),t(H,qo),t(H,Dt),t(Dt,Co),t(H,Do),m(e,qa,i),c(_e,e,i),m(e,Ca,i),m(e,q,i),t(q,Po),t(q,be),t(be,Oo),t(q,Xo),t(q,Pt),t(Pt,Io),t(q,Ao),m(e,Da,i),m(e,O,i),t(O,G),t(G,Ot),c(ke,Ot,null),t(O,So),t(O,Xt),t(Xt,No),m(e,Pa,i),m(e,Be,i),t(Be,Bo),m(e,Oa,i),m(e,W,i),t(W,Re),t(Re,It),t(It,Ro),t(Re,Fo),t(W,Ho),t(W,Fe),t(Fe,At),t(At,Go),t(Fe,Wo),m(e,Xa,i),m(e,He,i),t(He,Uo),m(e,Ia,i),m(e,X,i),t(X,U),t(U,St),c(ve,St,null),t(X,Yo),t(X,Nt),t(Nt,Jo),m(e,Aa,i),m(e,Ge,i),t(Ge,Ko),m(e,Sa,i),m(e,Y,i),t(Y,We),t(We,Bt),t(Bt,Qo),t(We,Vo),t(Y,Zo),t(Y,Ue),t(Ue,Rt),t(Rt,en),t(Ue,tn),m(e,Na,i),m(e,Ye,i),t(Ye,an),m(e,Ba,i),m(e,I,i),t(I,J),t(J,Ft),c(Ee,Ft,null),t(I,ln),t(I,Ht),t(Ht,on),m(e,Ra,i),m(e,Je,i),t(Je,nn),m(e,Fa,i),m(e,K,i),t(K,Ke),t(Ke,Gt),t(Gt,sn),t(Ke,rn),t(K,mn),t(K,Qe),t(Qe,Wt),t(Wt,dn),t(Qe,gn),m(e,Ha,i),m(e,Ve,i),t(Ve,fn),m(e,Ga,i),m(e,A,i),t(A,Q),t(Q,Ut),c($e,Ut,null),t(A,un),t(A,Yt),t(Yt,hn),m(e,Wa,i),m(e,Ze,i),t(Ze,cn),m(e,Ua,i),m(e,V,i),t(V,et),t(et,Jt),t(Jt,pn),t(et,_n),t(V,bn),t(V,tt),t(tt,Kt),t(Kt,kn),t(tt,vn),m(e,Ya,i),m(e,Z,i),t(Z,En),t(Z,Qt),t(Qt,$n),t(Z,wn),m(e,Ja,i),c(we,e,i),m(e,Ka,i),m(e,at,i),t(at,Mn),m(e,Qa,i),c(Me,e,i),m(e,Va,i),m(e,y,i),t(y,yn),t(y,Vt),t(Vt,xn),t(y,Tn),t(y,Zt),t(Zt,Ln),t(y,jn),t(y,ea),t(ea,zn),t(y,qn),m(e,Za,i),c(ye,e,i),m(e,el,i),m(e,S,i),t(S,ee),t(ee,ta),c(xe,ta,null),t(S,Cn),t(S,aa),t(aa,Dn),m(e,tl,i),m(e,lt,i),t(lt,Pn),m(e,al,i),m(e,v,i),t(v,ot),t(ot,la),t(la,On),t(ot,Xn),t(v,In),t(v,nt),t(nt,oa),t(oa,An),t(nt,Sn),t(v,Nn),t(v,st),t(st,na),t(na,Bn),t(st,Rn),t(v,Fn),t(v,rt),t(rt,sa),t(sa,Hn),t(rt,Gn),t(v,Wn),t(v,ra),t(ra,ia),t(ia,Un),m(e,ll,i),m(e,te,i),t(te,Yn),t(te,ma),t(ma,Jn),t(te,Kn),m(e,ol,i),c(Te,e,i),m(e,nl,i),m(e,it,i),t(it,Qn),m(e,sl,i),c(Le,e,i),m(e,rl,i),m(e,x,i),t(x,Vn),t(x,da),t(da,Zn),t(x,es),t(x,ga),t(ga,ts),t(x,as),t(x,fa),t(fa,ls),t(x,os),m(e,il,i),c(je,e,i),m(e,ml,i),m(e,ae,i),t(ae,ns),t(ae,ua),t(ua,ss),t(ae,rs),dl=!0},p:li,i(e){dl||(p(ie.$$.fragment,e),p(me.$$.fragment,e),p(ge.$$.fragment,e),p(fe.$$.fragment,e),p(ue.$$.fragment,e),p(he.$$.fragment,e),p(ce.$$.fragment,e),p(pe.$$.fragment,e),p(_e.$$.fragment,e),p(ke.$$.fragment,e),p(ve.$$.fragment,e),p(Ee.$$.fragment,e),p($e.$$.fragment,e),p(we.$$.fragment,e),p(Me.$$.fragment,e),p(ye.$$.fragment,e),p(xe.$$.fragment,e),p(Te.$$.fragment,e),p(Le.$$.fragment,e),p(je.$$.fragment,e),dl=!0)},o(e){_(ie.$$.fragment,e),_(me.$$.fragment,e),_(ge.$$.fragment,e),_(fe.$$.fragment,e),_(ue.$$.fragment,e),_(he.$$.fragment,e),_(ce.$$.fragment,e),_(pe.$$.fragment,e),_(_e.$$.fragment,e),_(ke.$$.fragment,e),_(ve.$$.fragment,e),_(Ee.$$.fragment,e),_($e.$$.fragment,e),_(we.$$.fragment,e),_(Me.$$.fragment,e),_(ye.$$.fragment,e),_(xe.$$.fragment,e),_(Te.$$.fragment,e),_(Le.$$.fragment,e),_(je.$$.fragment,e),dl=!1},d(e){a(j),e&&a(ze),e&&a($),b(ie),e&&a(ha),b(me,e),e&&a(ca),e&&a(z),e&&a(pa),e&&a(D),b(ge),e&&a(_a),e&&a(qe),e&&a(ba),e&&a(P),b(fe),e&&a(ka),e&&a(Ce),e&&a(va),e&&a(k),e&&a(Ea),e&&a(w),e&&a($a),e&&a(R),e&&a(wa),b(ue,e),e&&a(Ma),e&&a(F),e&&a(ya),b(he,e),e&&a(xa),e&&a(Ne),e&&a(Ta),b(ce,e),e&&a(La),e&&a(M),e&&a(ja),b(pe,e),e&&a(za),e&&a(H),e&&a(qa),b(_e,e),e&&a(Ca),e&&a(q),e&&a(Da),e&&a(O),b(ke),e&&a(Pa),e&&a(Be),e&&a(Oa),e&&a(W),e&&a(Xa),e&&a(He),e&&a(Ia),e&&a(X),b(ve),e&&a(Aa),e&&a(Ge),e&&a(Sa),e&&a(Y),e&&a(Na),e&&a(Ye),e&&a(Ba),e&&a(I),b(Ee),e&&a(Ra),e&&a(Je),e&&a(Fa),e&&a(K),e&&a(Ha),e&&a(Ve),e&&a(Ga),e&&a(A),b($e),e&&a(Wa),e&&a(Ze),e&&a(Ua),e&&a(V),e&&a(Ya),e&&a(Z),e&&a(Ja),b(we,e),e&&a(Ka),e&&a(at),e&&a(Qa),b(Me,e),e&&a(Va),e&&a(y),e&&a(Za),b(ye,e),e&&a(el),e&&a(S),b(xe),e&&a(tl),e&&a(lt),e&&a(al),e&&a(v),e&&a(ll),e&&a(te),e&&a(ol),b(Te,e),e&&a(nl),e&&a(it),e&&a(sl),b(Le,e),e&&a(rl),e&&a(x),e&&a(il),b(je,e),e&&a(ml),e&&a(ae)}}}const si={local:"multilingual-models-for-inference",sections:[{local:"xlm",sections:[{local:"xlm-with-language-embeddings",title:"XLM with language embeddings"},{local:"xlm-without-language-embeddings",title:"XLM without language embeddings"}],title:"XLM"},{local:"bert",title:"BERT"},{local:"xlmroberta",title:"XLM-RoBERTa"},{local:"m2m100",title:"M2M100"},{local:"mbart",title:"MBart"}],title:"Multilingual models for inference"};function ri(jl,j,ze){let{fw:$}=j;return jl.$$set=T=>{"fw"in T&&ze(0,$=T.fw)},[$]}class ui extends Zr{constructor(j){super();ei(this,j,ri,ni,ti,{fw:0})}}export{ui as default,si as metadata};
257
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/run_scripts.mdx-dea66c26.js
import{S as Fh,i as Rh,s as Hh,e as a,k as h,w as _,t as s,M as Mh,c as o,d as r,m,a as i,x as d,h as l,b as p,F as t,g as u,y as v,L as Gh,q as g,o as y,B as w}from"../chunks/vendor-4833417e.js";import{I as R}from"../chunks/IconCopyLink-4b81c553.js";import{C as $}from"../chunks/CodeBlock-6a3d1b46.js";import{C as jh}from"../chunks/CodeBlockFw-27a176a0.js";import"../chunks/CopyButton-dacfbfaf.js";function Bh(pi){let q,ft,z,P,pr,ge,ui,ur,hi,Ca,b,mi,ct,fi,ci,ye,_i,di,we,vi,gi,Ee,yi,wi,Da,I,Ei,$e,$i,bi,be,xi,zi,Na,_t,Ti,Oa,L,ki,xe,Ai,Pi,ze,qi,Ii,ja,S,Li,Te,Si,Ui,ke,Ci,Di,Fa,H,K,hr,Ae,Ni,mr,Oi,Ra,Q,ji,fr,Fi,Ri,Ha,Pe,Ma,dt,Hi,Ga,Z,cr,Mi,Gi,f,_r,vt,Bi,Ji,dr,gt,Wi,Xi,vr,yt,Yi,Vi,gr,wt,Ki,Qi,yr,Et,Zi,es,wr,$t,ts,rs,Er,bt,as,os,$r,xt,is,ss,br,zt,ls,ns,xr,Tt,ps,us,zr,kt,hs,ms,Tr,At,fs,cs,kr,Pt,_s,ds,Ar,qt,vs,gs,Pr,It,ys,ws,qr,Lt,Es,$s,Ir,St,bs,xs,Lr,Ut,zs,Ts,Sr,Ct,ks,As,Ur,Dt,Ps,qs,Cr,Nt,Is,Ls,Dr,Ot,Ss,Us,Nr,jt,Cs,Ds,Or,Ft,Ns,Os,jr,Rt,js,Fs,Fr,Ht,Rs,Hs,Rr,Mt,Ms,Ba,Gt,Gs,Ja,qe,Wa,Bt,Bs,Xa,Ie,Ya,M,ee,Hr,Le,Js,Mr,Ws,Va,E,Xs,Se,Ys,Vs,Ue,Ks,Qs,Ce,Zs,el,De,tl,rl,Gr,al,ol,Ka,Ne,Qa,G,te,Br,Oe,il,Jr,sl,Za,re,ll,je,nl,pl,eo,ae,Fe,ul,Wr,hl,ml,fl,Re,cl,Xr,_l,dl,to,He,ro,oe,vl,Me,Yr,gl,yl,ao,B,ie,Vr,Ge,wl,Kr,El,oo,x,$l,Be,bl,xl,Je,zl,Tl,Qr,kl,Al,Zr,Pl,ql,io,U,Il,We,ea,Ll,Sl,ta,Ul,Cl,so,Xe,lo,J,se,ra,Ye,Dl,aa,Nl,no,le,Ol,Ve,jl,Fl,po,Ke,uo,T,Rl,oa,Hl,Ml,ia,Gl,Bl,sa,Jl,Wl,ho,Qe,mo,Jt,Xl,fo,Ze,co,Wt,Yl,_o,et,vo,W,ne,la,tt,Vl,na,Kl,go,Xt,Ql,yo,C,pe,pa,Zl,en,ua,tn,rn,an,Yt,ha,on,sn,ln,Vt,ma,nn,pn,wo,Kt,un,Eo,rt,$o,X,ue,fa,at,hn,ca,mn,bo,Qt,fn,xo,D,_a,da,cn,_n,va,ga,dn,vn,ya,wa,gn,zo,ot,To,N,yn,Ea,wn,En,$a,$n,bn,ko,it,Ao,Y,he,ba,st,xn,xa,zn,Po,Zt,Tn,qo,k,kn,za,An,Pn,Ta,qn,In,ka,Ln,Sn,Io,lt,Lo,me,Un,Aa,Cn,Dn,So,nt,Uo,V,fe,Pa,pt,Nn,qa,On,Co,ce,jn,ut,Fn,Rn,Do,ht,No,O,Hn,Ia,Mn,Gn,La,Bn,Jn,Oo,_e,Wn,Sa,Xn,Yn,jo,er,Vn,Fo,mt,Ro;return ge=new R({}),Ae=new R({}),Pe=new $({props:{code:`git clone https://github.com/huggingface/transformers cd transformers pip install .`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/huggingface/transformers <span class="hljs-built_in">cd</span> transformers pip install .`}}),qe=new $({props:{code:"git checkout tags/v3.5.1",highlighted:"git checkout tags/v3.5.1"}}),Ie=new $({props:{code:"pip install -r requirements.txt",highlighted:"pip install -r requirements.txt"}}),Le=new R({}),Ne=new jh({props:{group1:{id:"pt",code:`python examples/pytorch/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`,highlighted:`python examples/pytorch/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`},group2:{id:"tf",code:`python examples/tensorflow/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size 8 \\ --per_device_eval_batch_size 16 \\ --num_train_epochs 3 \\ --do_train \\ --do_eval`,highlighted:`python examples/tensorflow/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size 8 \\ --per_device_eval_batch_size 16 \\ --num_train_epochs 3 \\ --do_train \\ --do_eval`}}}),Oe=new R({}),He=new $({props:{code:`python -m torch.distributed.launch \\ --nproc_per_node 8 pytorch/summarization/run_summarization.py \\ --fp16 \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`,highlighted:`python -m torch.distributed.launch \\ --nproc_per_node 8 pytorch/summarization/run_summarization.py \\ --fp16 \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`}}),Ge=new R({}),Xe=new jh({props:{group1:{id:"pt",code:`python xla_spawn.py --num_cores 8 \\ summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`,highlighted:`python xla_spawn.py --num_cores 8 \\ summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`},group2:{id:"tf",code:`python run_summarization.py \\ --tpu name_of_tpu_resource \\ --model_name_or_path t5-small \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size 8 \\ --per_device_eval_batch_size 16 \\ --num_train_epochs 3 \\ --do_train \\ --do_eval`,highlighted:`python run_summarization.py \\ --tpu name_of_tpu_resource \\ --model_name_or_path t5-small \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size 8 \\ --per_device_eval_batch_size 16 \\ --num_train_epochs 3 \\ --do_train \\ --do_eval`}}}),Ye=new R({}),Ke=new $({props:{code:"pip install accelerate",highlighted:"pip install accelerate"}}),Qe=new $({props:{code:"accelerate config",highlighted:"accelerate config"}}),Ze=new $({props:{code:"accelerate test",highlighted:'accelerate <span class="hljs-built_in">test</span>'}}),et=new $({props:{code:`accelerate launch run_summarization_no_trainer.py \\ --model_name_or_path t5-small \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --output_dir ~/tmp/tst-summarization`,highlighted:`accelerate launch run_summarization_no_trainer.py \\ --model_name_or_path t5-small \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir ~/tmp/tst-summarization`}}),tt=new R({}),rt=new $({props:{code:`python examples/pytorch/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --train_file path_to_csv_or_jsonlines_file \\ --validation_file path_to_csv_or_jsonlines_file \\ --text_column text_column_name \\ --summary_column summary_column_name \\ --source_prefix "summarize: " \\ --output_dir /tmp/tst-summarization \\ --overwrite_output_dir \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --predict_with_generate`,highlighted:`python examples/pytorch/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --train_file path_to_csv_or_jsonlines_file \\ --validation_file path_to_csv_or_jsonlines_file \\ --text_column text_column_name \\ --summary_column summary_column_name \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir /tmp/tst-summarization \\ --overwrite_output_dir \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --predict_with_generate`}}),at=new R({}),ot=new $({props:{code:`python examples/pytorch/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --max_train_samples 50 \\ --max_eval_samples 50 \\ --max_predict_samples 50 \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`,highlighted:`python examples/pytorch/summarization/run_summarization.py \\ --model_name_or_path t5-small \\ --max_train_samples 50 \\ --max_eval_samples 50 \\ --max_predict_samples 50 \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`}}),it=new $({props:{code:"examples/pytorch/summarization/run_summarization.py -h",highlighted:"examples/pytorch/summarization/run_summarization.py -h"}}),st=new R({}),lt=new $({props:{code:`python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --output_dir previous_output_dir \\ --predict_with_generate`,highlighted:`python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --output_dir previous_output_dir \\ --predict_with_generate`}}),nt=new $({props:{code:`python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --resume_from_checkpoint path_to_specific_checkpoint \\ --predict_with_generate`,highlighted:`python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --resume_from_checkpoint path_to_specific_checkpoint \\ --predict_with_generate`}}),pt=new R({}),ht=new $({props:{code:"huggingface-cli login",highlighted:"huggingface-cli login"}}),mt=new $({props:{code:`python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config "3.0.0" \\ --source_prefix "summarize: " \\ --push_to_hub \\ --push_to_hub_model_id finetuned-t5-cnn_dailymail \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`,highlighted:`python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \\ --do_train \\ --do_eval \\ --dataset_name cnn_dailymail \\ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \\ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \\ --push_to_hub \\ --push_to_hub_model_id finetuned-t5-cnn_dailymail \\ --output_dir /tmp/tst-summarization \\ --per_device_train_batch_size=4 \\ --per_device_eval_batch_size=4 \\ --overwrite_output_dir \\ --predict_with_generate`}}),{c(){q=a("meta"),ft=h(),z=a("h1"),P=a("a"),pr=a("span"),_(ge.$$.fragment),ui=h(),ur=a("span"),hi=s("Train with a script"),Ca=h(),b=a("p"),mi=s("Along with the \u{1F917} Transformers "),ct=a("a"),fi=s("notebooks"),ci=s(", there are also example scripts demonstrating how to train a model for a task with "),ye=a("a"),_i=s("PyTorch"),di=s(", "),we=a("a"),vi=s("TensorFlow"),gi=s(", or "),Ee=a("a"),yi=s("JAX/Flax"),wi=s("."),Da=h(),I=a("p"),Ei=s("You will also find scripts we\u2019ve used in our "),$e=a("a"),$i=s("research projects"),bi=s(" and "),be=a("a"),xi=s("legacy examples"),zi=s(" which are mostly community contributed. These scripts are not actively maintained and require a specific version of \u{1F917} Transformers that will most likely be incompatible with the latest version of the library."),Na=h(),_t=a("p"),Ti=s("The example scripts are not expected to work out-of-the-box on every problem, and you may need to adapt the script to the problem you\u2019re trying to solve. To help you with this, most of the scripts fully expose how data is preprocessed, allowing you to edit it as necessary for your use case."),Oa=h(),L=a("p"),ki=s("For any feature you\u2019d like to implement in an example script, please discuss it on the "),xe=a("a"),Ai=s("forum"),Pi=s(" or in an "),ze=a("a"),qi=s("issue"),Ii=s(" before submitting a Pull Request. While we welcome bug fixes, it is unlikely we will merge a Pull Request that adds more functionality at the cost of readability."),ja=h(),S=a("p"),Li=s("This guide will show you how to run an example summarization training script in "),Te=a("a"),Si=s("PyTorch"),Ui=s(" and "),ke=a("a"),Ci=s("TensorFlow"),Di=s(". All examples are expected to work with both frameworks unless otherwise specified."),Fa=h(),H=a("h2"),K=a("a"),hr=a("span"),_(Ae.$$.fragment),Ni=h(),mr=a("span"),Oi=s("Setup"),Ra=h(),Q=a("p"),ji=s("To successfully run the latest version of the example scripts, you have to "),fr=a("strong"),Fi=s("install \u{1F917} Transformers from source"),Ri=s(" in a new virtual environment:"),Ha=h(),_(Pe.$$.fragment),Ma=h(),dt=a("p"),Hi=s("For older versions of the example scripts, click on the toggle below:"),Ga=h(),Z=a("details"),cr=a("summary"),Mi=s("Examples for older versions of \u{1F917} Transformers"),Gi=h(),f=a("ul"),_r=a("li"),vt=a("a"),Bi=s("v4.5.1"),Ji=h(),dr=a("li"),gt=a("a"),Wi=s("v4.4.2"),Xi=h(),vr=a("li"),yt=a("a"),Yi=s("v4.3.3"),Vi=h(),gr=a("li"),wt=a("a"),Ki=s("v4.2.2"),Qi=h(),yr=a("li"),Et=a("a"),Zi=s("v4.1.1"),es=h(),wr=a("li"),$t=a("a"),ts=s("v4.0.1"),rs=h(),Er=a("li"),bt=a("a"),as=s("v3.5.1"),os=h(),$r=a("li"),xt=a("a"),is=s("v3.4.0"),ss=h(),br=a("li"),zt=a("a"),ls=s("v3.3.1"),ns=h(),xr=a("li"),Tt=a("a"),ps=s("v3.2.0"),us=h(),zr=a("li"),kt=a("a"),hs=s("v3.1.0"),ms=h(),Tr=a("li"),At=a("a"),fs=s("v3.0.2"),cs=h(),kr=a("li"),Pt=a("a"),_s=s("v2.11.0"),ds=h(),Ar=a("li"),qt=a("a"),vs=s("v2.10.0"),gs=h(),Pr=a("li"),It=a("a"),ys=s("v2.9.1"),ws=h(),qr=a("li"),Lt=a("a"),Es=s("v2.8.0"),$s=h(),Ir=a("li"),St=a("a"),bs=s("v2.7.0"),xs=h(),Lr=a("li"),Ut=a("a"),zs=s("v2.6.0"),Ts=h(),Sr=a("li"),Ct=a("a"),ks=s("v2.5.1"),As=h(),Ur=a("li"),Dt=a("a"),Ps=s("v2.4.0"),qs=h(),Cr=a("li"),Nt=a("a"),Is=s("v2.3.0"),Ls=h(),Dr=a("li"),Ot=a("a"),Ss=s("v2.2.0"),Us=h(),Nr=a("li"),jt=a("a"),Cs=s("v2.1.1"),Ds=h(),Or=a("li"),Ft=a("a"),Ns=s("v2.0.0"),Os=h(),jr=a("li"),Rt=a("a"),js=s("v1.2.0"),Fs=h(),Fr=a("li"),Ht=a("a"),Rs=s("v1.1.0"),Hs=h(),Rr=a("li"),Mt=a("a"),Ms=s("v1.0.0"),Ba=h(),Gt=a("p"),Gs=s("Then switch your current clone of \u{1F917} Transformers to a specific version, like v3.5.1 for example:"),Ja=h(),_(qe.$$.fragment),Wa=h(),Bt=a("p"),Bs=s("After you\u2019ve setup the correct library version, navigate to the example folder of your choice and install the example specific requirements:"),Xa=h(),_(Ie.$$.fragment),Ya=h(),M=a("h2"),ee=a("a"),Hr=a("span"),_(Le.$$.fragment),Js=h(),Mr=a("span"),Ws=s("Run a script"),Va=h(),E=a("p"),Xs=s("The example script downloads and preprocesses a dataset from the \u{1F917} "),Se=a("a"),Ys=s("Datasets"),Vs=s(" library. Then the script fine-tunes a dataset with the "),Ue=a("a"),Ks=s("Trainer"),Qs=s(" on an architecture that supports summarization. The following example shows how to fine-tune "),Ce=a("a"),Zs=s("T5-small"),el=s(" on the "),De=a("a"),tl=s("CNN/DailyMail"),rl=s(" dataset. The T5 model requires an additional "),Gr=a("code"),al=s("source_prefix"),ol=s(" argument due to how it was trained. This prompt lets T5 know this is a summarization task."),Ka=h(),_(Ne.$$.fragment),Qa=h(),G=a("h2"),te=a("a"),Br=a("span"),_(Oe.$$.fragment),il=h(),Jr=a("span"),sl=s("Distributed training and mixed precision"),Za=h(),re=a("p"),ll=s("The "),je=a("a"),nl=s("Trainer"),pl=s(" supports distributed training and mixed precision, which means you can also use it in a script. To enable both of these features:"),eo=h(),ae=a("ul"),Fe=a("li"),ul=s("Add the "),Wr=a("code"),hl=s("fp16"),ml=s(" argument to enable mixed precision."),fl=h(),Re=a("li"),cl=s("Set the number of GPUs to use with the "),Xr=a("code"),_l=s("nproc_per_node"),dl=s(" argument."),to=h(),_(He.$$.fragment),ro=h(),oe=a("p"),vl=s("TensorFlow scripts utilize a "),Me=a("a"),Yr=a("code"),gl=s("MirroredStrategy"),yl=s(" for distributed training, and you don\u2019t need to add any additional arguments to the training script. The TensorFlow script will use multiple GPUs by default if they are available."),ao=h(),B=a("h2"),ie=a("a"),Vr=a("span"),_(Ge.$$.fragment),wl=h(),Kr=a("span"),El=s("Run a script on a TPU"),oo=h(),x=a("p"),$l=s("Tensor Processing Units (TPUs) are specifically designed to accelerate performance. PyTorch supports TPUs with the "),Be=a("a"),bl=s("XLA"),xl=s(" deep learning compiler (see "),Je=a("a"),zl=s("here"),Tl=s(" for more details). To use a TPU, launch the "),Qr=a("code"),kl=s("xla_spawn.py"),Al=s(" script and use the "),Zr=a("code"),Pl=s("num_cores"),ql=s(" argument to set the number of TPU cores you want to use."),io=h(),U=a("p"),Il=s("TensorFlow scripts utilize a "),We=a("a"),ea=a("code"),Ll=s("TPUStrategy"),Sl=s(" for training on TPUs. To use a TPU, pass the name of the TPU resource to the "),ta=a("code"),Ul=s("tpu"),Cl=s(" argument."),so=h(),_(Xe.$$.fragment),lo=h(),J=a("h2"),se=a("a"),ra=a("span"),_(Ye.$$.fragment),Dl=h(),aa=a("span"),Nl=s("Run a script with \u{1F917} Accelerate"),no=h(),le=a("p"),Ol=s("\u{1F917} "),Ve=a("a"),jl=s("Accelerate"),Fl=s(" is a PyTorch-only library that offers a unified method for training a model on several types of setups (CPU-only, multiple GPUs, TPUs) while maintaining complete visibility into the PyTorch training loop. Make sure you have \u{1F917} Accelerate installed if you don\u2019t already have it:"),po=h(),_(Ke.$$.fragment),uo=h(),T=a("p"),Rl=s("Instead of the "),oa=a("code"),Hl=s("run_summarization.py"),Ml=s(" script, you need to use the "),ia=a("code"),Gl=s("run_summarization_no_trainer.py"),Bl=s(" script. \u{1F917} Accelerate supported scripts will have a "),sa=a("code"),Jl=s("task_no_trainer.py"),Wl=s(" file in the folder. Begin by running the following command to create and save a configuration file:"),ho=h(),_(Qe.$$.fragment),mo=h(),Jt=a("p"),Xl=s("Test your setup to make sure it is configured correctly:"),fo=h(),_(Ze.$$.fragment),co=h(),Wt=a("p"),Yl=s("Now you are ready to launch the training:"),_o=h(),_(et.$$.fragment),vo=h(),W=a("h2"),ne=a("a"),la=a("span"),_(tt.$$.fragment),Vl=h(),na=a("span"),Kl=s("Use a custom dataset"),go=h(),Xt=a("p"),Ql=s("The summarization script supports custom datasets as long as they are a CSV or JSON Line file. When you use your own dataset, you need to specify several additional arguments:"),yo=h(),C=a("ul"),pe=a("li"),pa=a("code"),Zl=s("train_file"),en=s(" and "),ua=a("code"),tn=s("validation_file"),rn=s(" specify the path to your training and validation files."),an=h(),Yt=a("li"),ha=a("code"),on=s("text_column"),sn=s(" is the input text to summarize."),ln=h(),Vt=a("li"),ma=a("code"),nn=s("summary_column"),pn=s(" is the target text to output."),wo=h(),Kt=a("p"),un=s("A summarization script using a custom dataset would look like this:"),Eo=h(),_(rt.$$.fragment),$o=h(),X=a("h2"),ue=a("a"),fa=a("span"),_(at.$$.fragment),hn=h(),ca=a("span"),mn=s("Test a script"),bo=h(),Qt=a("p"),fn=s("It is often a good idea to run your script on a smaller number of dataset examples to ensure everything works as expected before committing to an entire dataset which may take hours to complete. Use the following arguments to truncate the dataset to a maximum number of samples:"),xo=h(),D=a("ul"),_a=a("li"),da=a("code"),cn=s("max_train_samples"),_n=h(),va=a("li"),ga=a("code"),dn=s("max_eval_samples"),vn=h(),ya=a("li"),wa=a("code"),gn=s("max_predict_samples"),zo=h(),_(ot.$$.fragment),To=h(),N=a("p"),yn=s("Not all example scripts support the "),Ea=a("code"),wn=s("max_predict_samples"),En=s(" argument. If you aren\u2019t sure whether your script supports this argument, add the "),$a=a("code"),$n=s("-h"),bn=s(" argument to check:"),ko=h(),_(it.$$.fragment),Ao=h(),Y=a("h2"),he=a("a"),ba=a("span"),_(st.$$.fragment),xn=h(),xa=a("span"),zn=s("Resume training from checkpoint"),Po=h(),Zt=a("p"),Tn=s("Another helpful option to enable is resuming training from a previous checkpoint. This will ensure you can pick up where you left off without starting over if your training gets interrupted. There are two methods to resume training from a checkpoint."),qo=h(),k=a("p"),kn=s("The first method uses the "),za=a("code"),An=s("output_dir previous_output_dir"),Pn=s(" argument to resume training from the latest checkpoint stored in "),Ta=a("code"),qn=s("output_dir"),In=s(". In this case, you should remove "),ka=a("code"),Ln=s("overwrite_output_dir"),Sn=s(":"),Io=h(),_(lt.$$.fragment),Lo=h(),me=a("p"),Un=s("The second method uses the "),Aa=a("code"),Cn=s("resume_from_checkpoint path_to_specific_checkpoint"),Dn=s(" argument to resume training from a specific checkpoint folder."),So=h(),_(nt.$$.fragment),Uo=h(),V=a("h2"),fe=a("a"),Pa=a("span"),_(pt.$$.fragment),Nn=h(),qa=a("span"),On=s("Share your model"),Co=h(),ce=a("p"),jn=s("All scripts can upload your final model to the "),ut=a("a"),Fn=s("Model Hub"),Rn=s(". Make sure you are logged into Hugging Face before you begin:"),Do=h(),_(ht.$$.fragment),No=h(),O=a("p"),Hn=s("Then add the "),Ia=a("code"),Mn=s("push_to_hub"),Gn=s(" argument to the script. This argument will create a repository with your Hugging Face username and the folder name specified in "),La=a("code"),Bn=s("output_dir"),Jn=s("."),Oo=h(),_e=a("p"),Wn=s("To give your repository a specific name, use the "),Sa=a("code"),Xn=s("push_to_hub_model_id"),Yn=s(" argument to add it. The repository will be automatically listed under your namespace."),jo=h(),er=a("p"),Vn=s("The following example shows how to upload a model with a specific repository name:"),Fo=h(),_(mt.$$.fragment),this.h()},l(e){const n=Mh('[data-svelte="svelte-1phssyn"]',document.head);q=o(n,"META",{name:!0,content:!0}),n.forEach(r),ft=m(e),z=o(e,"H1",{class:!0});var Ho=i(z);P=o(Ho,"A",{id:!0,class:!0,href:!0});var Zn=i(P);pr=o(Zn,"SPAN",{});var ep=i(pr);d(ge.$$.fragment,ep),ep.forEach(r),Zn.forEach(r),ui=m(Ho),ur=o(Ho,"SPAN",{});var tp=i(ur);hi=l(tp,"Train with a script"),tp.forEach(r),Ho.forEach(r),Ca=m(e),b=o(e,"P",{});var j=i(b);mi=l(j,"Along with the \u{1F917} Transformers "),ct=o(j,"A",{href:!0});var rp=i(ct);fi=l(rp,"notebooks"),rp.forEach(r),ci=l(j,", there are also example scripts demonstrating how to train a model for a task with "),ye=o(j,"A",{href:!0,rel:!0});var ap=i(ye);_i=l(ap,"PyTorch"),ap.forEach(r),di=l(j,", "),we=o(j,"A",{href:!0,rel:!0});var op=i(we);vi=l(op,"TensorFlow"),op.forEach(r),gi=l(j,", or "),Ee=o(j,"A",{href:!0,rel:!0});var ip=i(Ee);yi=l(ip,"JAX/Flax"),ip.forEach(r),wi=l(j,"."),j.forEach(r),Da=m(e),I=o(e,"P",{});var tr=i(I);Ei=l(tr,"You will also find scripts we\u2019ve used in our "),$e=o(tr,"A",{href:!0,rel:!0});var sp=i($e);$i=l(sp,"research projects"),sp.forEach(r),bi=l(tr," and "),be=o(tr,"A",{href:!0,rel:!0});var lp=i(be);xi=l(lp,"legacy examples"),lp.forEach(r),zi=l(tr," which are mostly community contributed. These scripts are not actively maintained and require a specific version of \u{1F917} Transformers that will most likely be incompatible with the latest version of the library."),tr.forEach(r),Na=m(e),_t=o(e,"P",{});var np=i(_t);Ti=l(np,"The example scripts are not expected to work out-of-the-box on every problem, and you may need to adapt the script to the problem you\u2019re trying to solve. To help you with this, most of the scripts fully expose how data is preprocessed, allowing you to edit it as necessary for your use case."),np.forEach(r),Oa=m(e),L=o(e,"P",{});var rr=i(L);ki=l(rr,"For any feature you\u2019d like to implement in an example script, please discuss it on the "),xe=o(rr,"A",{href:!0,rel:!0});var pp=i(xe);Ai=l(pp,"forum"),pp.forEach(r),Pi=l(rr," or in an "),ze=o(rr,"A",{href:!0,rel:!0});var up=i(ze);qi=l(up,"issue"),up.forEach(r),Ii=l(rr," before submitting a Pull Request. While we welcome bug fixes, it is unlikely we will merge a Pull Request that adds more functionality at the cost of readability."),rr.forEach(r),ja=m(e),S=o(e,"P",{});var ar=i(S);Li=l(ar,"This guide will show you how to run an example summarization training script in "),Te=o(ar,"A",{href:!0,rel:!0});var hp=i(Te);Si=l(hp,"PyTorch"),hp.forEach(r),Ui=l(ar," and "),ke=o(ar,"A",{href:!0,rel:!0});var mp=i(ke);Ci=l(mp,"TensorFlow"),mp.forEach(r),Di=l(ar,". All examples are expected to work with both frameworks unless otherwise specified."),ar.forEach(r),Fa=m(e),H=o(e,"H2",{class:!0});var Mo=i(H);K=o(Mo,"A",{id:!0,class:!0,href:!0});var fp=i(K);hr=o(fp,"SPAN",{});var cp=i(hr);d(Ae.$$.fragment,cp),cp.forEach(r),fp.forEach(r),Ni=m(Mo),mr=o(Mo,"SPAN",{});var _p=i(mr);Oi=l(_p,"Setup"),_p.forEach(r),Mo.forEach(r),Ra=m(e),Q=o(e,"P",{});var Go=i(Q);ji=l(Go,"To successfully run the latest version of the example scripts, you have to "),fr=o(Go,"STRONG",{});var dp=i(fr);Fi=l(dp,"install \u{1F917} Transformers from source"),dp.forEach(r),Ri=l(Go," in a new virtual environment:"),Go.forEach(r),Ha=m(e),d(Pe.$$.fragment,e),Ma=m(e),dt=o(e,"P",{});var vp=i(dt);Hi=l(vp,"For older versions of the example scripts, click on the toggle below:"),vp.forEach(r),Ga=m(e),Z=o(e,"DETAILS",{});var Bo=i(Z);cr=o(Bo,"SUMMARY",{});var gp=i(cr);Mi=l(gp,"Examples for older versions of \u{1F917} Transformers"),gp.forEach(r),Gi=m(Bo),f=o(Bo,"UL",{});var c=i(f);_r=o(c,"LI",{});var yp=i(_r);vt=o(yp,"A",{href:!0});var wp=i(vt);Bi=l(wp,"v4.5.1"),wp.forEach(r),yp.forEach(r),Ji=m(c),dr=o(c,"LI",{});var Ep=i(dr);gt=o(Ep,"A",{href:!0});var $p=i(gt);Wi=l($p,"v4.4.2"),$p.forEach(r),Ep.forEach(r),Xi=m(c),vr=o(c,"LI",{});var bp=i(vr);yt=o(bp,"A",{href:!0});var xp=i(yt);Yi=l(xp,"v4.3.3"),xp.forEach(r),bp.forEach(r),Vi=m(c),gr=o(c,"LI",{});var zp=i(gr);wt=o(zp,"A",{href:!0});var Tp=i(wt);Ki=l(Tp,"v4.2.2"),Tp.forEach(r),zp.forEach(r),Qi=m(c),yr=o(c,"LI",{});var kp=i(yr);Et=o(kp,"A",{href:!0});var Ap=i(Et);Zi=l(Ap,"v4.1.1"),Ap.forEach(r),kp.forEach(r),es=m(c),wr=o(c,"LI",{});var Pp=i(wr);$t=o(Pp,"A",{href:!0});var qp=i($t);ts=l(qp,"v4.0.1"),qp.forEach(r),Pp.forEach(r),rs=m(c),Er=o(c,"LI",{});var Ip=i(Er);bt=o(Ip,"A",{href:!0});var Lp=i(bt);as=l(Lp,"v3.5.1"),Lp.forEach(r),Ip.forEach(r),os=m(c),$r=o(c,"LI",{});var Sp=i($r);xt=o(Sp,"A",{href:!0});var Up=i(xt);is=l(Up,"v3.4.0"),Up.forEach(r),Sp.forEach(r),ss=m(c),br=o(c,"LI",{});var Cp=i(br);zt=o(Cp,"A",{href:!0});var Dp=i(zt);ls=l(Dp,"v3.3.1"),Dp.forEach(r),Cp.forEach(r),ns=m(c),xr=o(c,"LI",{});var Np=i(xr);Tt=o(Np,"A",{href:!0});var Op=i(Tt);ps=l(Op,"v3.2.0"),Op.forEach(r),Np.forEach(r),us=m(c),zr=o(c,"LI",{});var jp=i(zr);kt=o(jp,"A",{href:!0});var Fp=i(kt);hs=l(Fp,"v3.1.0"),Fp.forEach(r),jp.forEach(r),ms=m(c),Tr=o(c,"LI",{});var Rp=i(Tr);At=o(Rp,"A",{href:!0});var Hp=i(At);fs=l(Hp,"v3.0.2"),Hp.forEach(r),Rp.forEach(r),cs=m(c),kr=o(c,"LI",{});var Mp=i(kr);Pt=o(Mp,"A",{href:!0});var Gp=i(Pt);_s=l(Gp,"v2.11.0"),Gp.forEach(r),Mp.forEach(r),ds=m(c),Ar=o(c,"LI",{});var Bp=i(Ar);qt=o(Bp,"A",{href:!0});var Jp=i(qt);vs=l(Jp,"v2.10.0"),Jp.forEach(r),Bp.forEach(r),gs=m(c),Pr=o(c,"LI",{});var Wp=i(Pr);It=o(Wp,"A",{href:!0});var Xp=i(It);ys=l(Xp,"v2.9.1"),Xp.forEach(r),Wp.forEach(r),ws=m(c),qr=o(c,"LI",{});var Yp=i(qr);Lt=o(Yp,"A",{href:!0});var Vp=i(Lt);Es=l(Vp,"v2.8.0"),Vp.forEach(r),Yp.forEach(r),$s=m(c),Ir=o(c,"LI",{});var Kp=i(Ir);St=o(Kp,"A",{href:!0});var Qp=i(St);bs=l(Qp,"v2.7.0"),Qp.forEach(r),Kp.forEach(r),xs=m(c),Lr=o(c,"LI",{});var Zp=i(Lr);Ut=o(Zp,"A",{href:!0});var eu=i(Ut);zs=l(eu,"v2.6.0"),eu.forEach(r),Zp.forEach(r),Ts=m(c),Sr=o(c,"LI",{});var tu=i(Sr);Ct=o(tu,"A",{href:!0});var ru=i(Ct);ks=l(ru,"v2.5.1"),ru.forEach(r),tu.forEach(r),As=m(c),Ur=o(c,"LI",{});var au=i(Ur);Dt=o(au,"A",{href:!0});var ou=i(Dt);Ps=l(ou,"v2.4.0"),ou.forEach(r),au.forEach(r),qs=m(c),Cr=o(c,"LI",{});var iu=i(Cr);Nt=o(iu,"A",{href:!0});var su=i(Nt);Is=l(su,"v2.3.0"),su.forEach(r),iu.forEach(r),Ls=m(c),Dr=o(c,"LI",{});var lu=i(Dr);Ot=o(lu,"A",{href:!0});var nu=i(Ot);Ss=l(nu,"v2.2.0"),nu.forEach(r),lu.forEach(r),Us=m(c),Nr=o(c,"LI",{});var pu=i(Nr);jt=o(pu,"A",{href:!0});var uu=i(jt);Cs=l(uu,"v2.1.1"),uu.forEach(r),pu.forEach(r),Ds=m(c),Or=o(c,"LI",{});var hu=i(Or);Ft=o(hu,"A",{href:!0});var mu=i(Ft);Ns=l(mu,"v2.0.0"),mu.forEach(r),hu.forEach(r),Os=m(c),jr=o(c,"LI",{});var fu=i(jr);Rt=o(fu,"A",{href:!0});var cu=i(Rt);js=l(cu,"v1.2.0"),cu.forEach(r),fu.forEach(r),Fs=m(c),Fr=o(c,"LI",{});var _u=i(Fr);Ht=o(_u,"A",{href:!0});var du=i(Ht);Rs=l(du,"v1.1.0"),du.forEach(r),_u.forEach(r),Hs=m(c),Rr=o(c,"LI",{});var vu=i(Rr);Mt=o(vu,"A",{href:!0});var gu=i(Mt);Ms=l(gu,"v1.0.0"),gu.forEach(r),vu.forEach(r),c.forEach(r),Bo.forEach(r),Ba=m(e),Gt=o(e,"P",{});var yu=i(Gt);Gs=l(yu,"Then switch your current clone of \u{1F917} Transformers to a specific version, like v3.5.1 for example:"),yu.forEach(r),Ja=m(e),d(qe.$$.fragment,e),Wa=m(e),Bt=o(e,"P",{});var wu=i(Bt);Bs=l(wu,"After you\u2019ve setup the correct library version, navigate to the example folder of your choice and install the example specific requirements:"),wu.forEach(r),Xa=m(e),d(Ie.$$.fragment,e),Ya=m(e),M=o(e,"H2",{class:!0});var Jo=i(M);ee=o(Jo,"A",{id:!0,class:!0,href:!0});var Eu=i(ee);Hr=o(Eu,"SPAN",{});var $u=i(Hr);d(Le.$$.fragment,$u),$u.forEach(r),Eu.forEach(r),Js=m(Jo),Mr=o(Jo,"SPAN",{});var bu=i(Mr);Ws=l(bu,"Run a script"),bu.forEach(r),Jo.forEach(r),Va=m(e),E=o(e,"P",{});var A=i(E);Xs=l(A,"The example script downloads and preprocesses a dataset from the \u{1F917} "),Se=o(A,"A",{href:!0,rel:!0});var xu=i(Se);Ys=l(xu,"Datasets"),xu.forEach(r),Vs=l(A," library. Then the script fine-tunes a dataset with the "),Ue=o(A,"A",{href:!0,rel:!0});var zu=i(Ue);Ks=l(zu,"Trainer"),zu.forEach(r),Qs=l(A," on an architecture that supports summarization. The following example shows how to fine-tune "),Ce=o(A,"A",{href:!0,rel:!0});var Tu=i(Ce);Zs=l(Tu,"T5-small"),Tu.forEach(r),el=l(A," on the "),De=o(A,"A",{href:!0,rel:!0});var ku=i(De);tl=l(ku,"CNN/DailyMail"),ku.forEach(r),rl=l(A," dataset. The T5 model requires an additional "),Gr=o(A,"CODE",{});var Au=i(Gr);al=l(Au,"source_prefix"),Au.forEach(r),ol=l(A," argument due to how it was trained. This prompt lets T5 know this is a summarization task."),A.forEach(r),Ka=m(e),d(Ne.$$.fragment,e),Qa=m(e),G=o(e,"H2",{class:!0});var Wo=i(G);te=o(Wo,"A",{id:!0,class:!0,href:!0});var Pu=i(te);Br=o(Pu,"SPAN",{});var qu=i(Br);d(Oe.$$.fragment,qu),qu.forEach(r),Pu.forEach(r),il=m(Wo),Jr=o(Wo,"SPAN",{});var Iu=i(Jr);sl=l(Iu,"Distributed training and mixed precision"),Iu.forEach(r),Wo.forEach(r),Za=m(e),re=o(e,"P",{});var Xo=i(re);ll=l(Xo,"The "),je=o(Xo,"A",{href:!0,rel:!0});var Lu=i(je);nl=l(Lu,"Trainer"),Lu.forEach(r),pl=l(Xo," supports distributed training and mixed precision, which means you can also use it in a script. To enable both of these features:"),Xo.forEach(r),eo=m(e),ae=o(e,"UL",{});var Yo=i(ae);Fe=o(Yo,"LI",{});var Vo=i(Fe);ul=l(Vo,"Add the "),Wr=o(Vo,"CODE",{});var Su=i(Wr);hl=l(Su,"fp16"),Su.forEach(r),ml=l(Vo," argument to enable mixed precision."),Vo.forEach(r),fl=m(Yo),Re=o(Yo,"LI",{});var Ko=i(Re);cl=l(Ko,"Set the number of GPUs to use with the "),Xr=o(Ko,"CODE",{});var Uu=i(Xr);_l=l(Uu,"nproc_per_node"),Uu.forEach(r),dl=l(Ko," argument."),Ko.forEach(r),Yo.forEach(r),to=m(e),d(He.$$.fragment,e),ro=m(e),oe=o(e,"P",{});var Qo=i(oe);vl=l(Qo,"TensorFlow scripts utilize a "),Me=o(Qo,"A",{href:!0,rel:!0});var Cu=i(Me);Yr=o(Cu,"CODE",{});var Du=i(Yr);gl=l(Du,"MirroredStrategy"),Du.forEach(r),Cu.forEach(r),yl=l(Qo," for distributed training, and you don\u2019t need to add any additional arguments to the training script. The TensorFlow script will use multiple GPUs by default if they are available."),Qo.forEach(r),ao=m(e),B=o(e,"H2",{class:!0});var Zo=i(B);ie=o(Zo,"A",{id:!0,class:!0,href:!0});var Nu=i(ie);Vr=o(Nu,"SPAN",{});var Ou=i(Vr);d(Ge.$$.fragment,Ou),Ou.forEach(r),Nu.forEach(r),wl=m(Zo),Kr=o(Zo,"SPAN",{});var ju=i(Kr);El=l(ju,"Run a script on a TPU"),ju.forEach(r),Zo.forEach(r),oo=m(e),x=o(e,"P",{});var F=i(x);$l=l(F,"Tensor Processing Units (TPUs) are specifically designed to accelerate performance. PyTorch supports TPUs with the "),Be=o(F,"A",{href:!0,rel:!0});var Fu=i(Be);bl=l(Fu,"XLA"),Fu.forEach(r),xl=l(F," deep learning compiler (see "),Je=o(F,"A",{href:!0,rel:!0});var Ru=i(Je);zl=l(Ru,"here"),Ru.forEach(r),Tl=l(F," for more details). To use a TPU, launch the "),Qr=o(F,"CODE",{});var Hu=i(Qr);kl=l(Hu,"xla_spawn.py"),Hu.forEach(r),Al=l(F," script and use the "),Zr=o(F,"CODE",{});var Mu=i(Zr);Pl=l(Mu,"num_cores"),Mu.forEach(r),ql=l(F," argument to set the number of TPU cores you want to use."),F.forEach(r),io=m(e),U=o(e,"P",{});var or=i(U);Il=l(or,"TensorFlow scripts utilize a "),We=o(or,"A",{href:!0,rel:!0});var Gu=i(We);ea=o(Gu,"CODE",{});var Bu=i(ea);Ll=l(Bu,"TPUStrategy"),Bu.forEach(r),Gu.forEach(r),Sl=l(or," for training on TPUs. To use a TPU, pass the name of the TPU resource to the "),ta=o(or,"CODE",{});var Ju=i(ta);Ul=l(Ju,"tpu"),Ju.forEach(r),Cl=l(or," argument."),or.forEach(r),so=m(e),d(Xe.$$.fragment,e),lo=m(e),J=o(e,"H2",{class:!0});var ei=i(J);se=o(ei,"A",{id:!0,class:!0,href:!0});var Wu=i(se);ra=o(Wu,"SPAN",{});var Xu=i(ra);d(Ye.$$.fragment,Xu),Xu.forEach(r),Wu.forEach(r),Dl=m(ei),aa=o(ei,"SPAN",{});var Yu=i(aa);Nl=l(Yu,"Run a script with \u{1F917} Accelerate"),Yu.forEach(r),ei.forEach(r),no=m(e),le=o(e,"P",{});var ti=i(le);Ol=l(ti,"\u{1F917} "),Ve=o(ti,"A",{href:!0,rel:!0});var Vu=i(Ve);jl=l(Vu,"Accelerate"),Vu.forEach(r),Fl=l(ti," is a PyTorch-only library that offers a unified method for training a model on several types of setups (CPU-only, multiple GPUs, TPUs) while maintaining complete visibility into the PyTorch training loop. Make sure you have \u{1F917} Accelerate installed if you don\u2019t already have it:"),ti.forEach(r),po=m(e),d(Ke.$$.fragment,e),uo=m(e),T=o(e,"P",{});var de=i(T);Rl=l(de,"Instead of the "),oa=o(de,"CODE",{});var Ku=i(oa);Hl=l(Ku,"run_summarization.py"),Ku.forEach(r),Ml=l(de," script, you need to use the "),ia=o(de,"CODE",{});var Qu=i(ia);Gl=l(Qu,"run_summarization_no_trainer.py"),Qu.forEach(r),Bl=l(de," script. \u{1F917} Accelerate supported scripts will have a "),sa=o(de,"CODE",{});var Zu=i(sa);Jl=l(Zu,"task_no_trainer.py"),Zu.forEach(r),Wl=l(de," file in the folder. Begin by running the following command to create and save a configuration file:"),de.forEach(r),ho=m(e),d(Qe.$$.fragment,e),mo=m(e),Jt=o(e,"P",{});var eh=i(Jt);Xl=l(eh,"Test your setup to make sure it is configured correctly:"),eh.forEach(r),fo=m(e),d(Ze.$$.fragment,e),co=m(e),Wt=o(e,"P",{});var th=i(Wt);Yl=l(th,"Now you are ready to launch the training:"),th.forEach(r),_o=m(e),d(et.$$.fragment,e),vo=m(e),W=o(e,"H2",{class:!0});var ri=i(W);ne=o(ri,"A",{id:!0,class:!0,href:!0});var rh=i(ne);la=o(rh,"SPAN",{});var ah=i(la);d(tt.$$.fragment,ah),ah.forEach(r),rh.forEach(r),Vl=m(ri),na=o(ri,"SPAN",{});var oh=i(na);Kl=l(oh,"Use a custom dataset"),oh.forEach(r),ri.forEach(r),go=m(e),Xt=o(e,"P",{});var ih=i(Xt);Ql=l(ih,"The summarization script supports custom datasets as long as they are a CSV or JSON Line file. When you use your own dataset, you need to specify several additional arguments:"),ih.forEach(r),yo=m(e),C=o(e,"UL",{});var ir=i(C);pe=o(ir,"LI",{});var Ua=i(pe);pa=o(Ua,"CODE",{});var sh=i(pa);Zl=l(sh,"train_file"),sh.forEach(r),en=l(Ua," and "),ua=o(Ua,"CODE",{});var lh=i(ua);tn=l(lh,"validation_file"),lh.forEach(r),rn=l(Ua," specify the path to your training and validation files."),Ua.forEach(r),an=m(ir),Yt=o(ir,"LI",{});var Kn=i(Yt);ha=o(Kn,"CODE",{});var nh=i(ha);on=l(nh,"text_column"),nh.forEach(r),sn=l(Kn," is the input text to summarize."),Kn.forEach(r),ln=m(ir),Vt=o(ir,"LI",{});var Qn=i(Vt);ma=o(Qn,"CODE",{});var ph=i(ma);nn=l(ph,"summary_column"),ph.forEach(r),pn=l(Qn," is the target text to output."),Qn.forEach(r),ir.forEach(r),wo=m(e),Kt=o(e,"P",{});var uh=i(Kt);un=l(uh,"A summarization script using a custom dataset would look like this:"),uh.forEach(r),Eo=m(e),d(rt.$$.fragment,e),$o=m(e),X=o(e,"H2",{class:!0});var ai=i(X);ue=o(ai,"A",{id:!0,class:!0,href:!0});var hh=i(ue);fa=o(hh,"SPAN",{});var mh=i(fa);d(at.$$.fragment,mh),mh.forEach(r),hh.forEach(r),hn=m(ai),ca=o(ai,"SPAN",{});var fh=i(ca);mn=l(fh,"Test a script"),fh.forEach(r),ai.forEach(r),bo=m(e),Qt=o(e,"P",{});var ch=i(Qt);fn=l(ch,"It is often a good idea to run your script on a smaller number of dataset examples to ensure everything works as expected before committing to an entire dataset which may take hours to complete. Use the following arguments to truncate the dataset to a maximum number of samples:"),ch.forEach(r),xo=m(e),D=o(e,"UL",{});var sr=i(D);_a=o(sr,"LI",{});var _h=i(_a);da=o(_h,"CODE",{});var dh=i(da);cn=l(dh,"max_train_samples"),dh.forEach(r),_h.forEach(r),_n=m(sr),va=o(sr,"LI",{});var vh=i(va);ga=o(vh,"CODE",{});var gh=i(ga);dn=l(gh,"max_eval_samples"),gh.forEach(r),vh.forEach(r),vn=m(sr),ya=o(sr,"LI",{});var yh=i(ya);wa=o(yh,"CODE",{});var wh=i(wa);gn=l(wh,"max_predict_samples"),wh.forEach(r),yh.forEach(r),sr.forEach(r),zo=m(e),d(ot.$$.fragment,e),To=m(e),N=o(e,"P",{});var lr=i(N);yn=l(lr,"Not all example scripts support the "),Ea=o(lr,"CODE",{});var Eh=i(Ea);wn=l(Eh,"max_predict_samples"),Eh.forEach(r),En=l(lr," argument. If you aren\u2019t sure whether your script supports this argument, add the "),$a=o(lr,"CODE",{});var $h=i($a);$n=l($h,"-h"),$h.forEach(r),bn=l(lr," argument to check:"),lr.forEach(r),ko=m(e),d(it.$$.fragment,e),Ao=m(e),Y=o(e,"H2",{class:!0});var oi=i(Y);he=o(oi,"A",{id:!0,class:!0,href:!0});var bh=i(he);ba=o(bh,"SPAN",{});var xh=i(ba);d(st.$$.fragment,xh),xh.forEach(r),bh.forEach(r),xn=m(oi),xa=o(oi,"SPAN",{});var zh=i(xa);zn=l(zh,"Resume training from checkpoint"),zh.forEach(r),oi.forEach(r),Po=m(e),Zt=o(e,"P",{});var Th=i(Zt);Tn=l(Th,"Another helpful option to enable is resuming training from a previous checkpoint. This will ensure you can pick up where you left off without starting over if your training gets interrupted. There are two methods to resume training from a checkpoint."),Th.forEach(r),qo=m(e),k=o(e,"P",{});var ve=i(k);kn=l(ve,"The first method uses the "),za=o(ve,"CODE",{});var kh=i(za);An=l(kh,"output_dir previous_output_dir"),kh.forEach(r),Pn=l(ve," argument to resume training from the latest checkpoint stored in "),Ta=o(ve,"CODE",{});var Ah=i(Ta);qn=l(Ah,"output_dir"),Ah.forEach(r),In=l(ve,". In this case, you should remove "),ka=o(ve,"CODE",{});var Ph=i(ka);Ln=l(Ph,"overwrite_output_dir"),Ph.forEach(r),Sn=l(ve,":"),ve.forEach(r),Io=m(e),d(lt.$$.fragment,e),Lo=m(e),me=o(e,"P",{});var ii=i(me);Un=l(ii,"The second method uses the "),Aa=o(ii,"CODE",{});var qh=i(Aa);Cn=l(qh,"resume_from_checkpoint path_to_specific_checkpoint"),qh.forEach(r),Dn=l(ii," argument to resume training from a specific checkpoint folder."),ii.forEach(r),So=m(e),d(nt.$$.fragment,e),Uo=m(e),V=o(e,"H2",{class:!0});var si=i(V);fe=o(si,"A",{id:!0,class:!0,href:!0});var Ih=i(fe);Pa=o(Ih,"SPAN",{});var Lh=i(Pa);d(pt.$$.fragment,Lh),Lh.forEach(r),Ih.forEach(r),Nn=m(si),qa=o(si,"SPAN",{});var Sh=i(qa);On=l(Sh,"Share your model"),Sh.forEach(r),si.forEach(r),Co=m(e),ce=o(e,"P",{});var li=i(ce);jn=l(li,"All scripts can upload your final model to the "),ut=o(li,"A",{href:!0,rel:!0});var Uh=i(ut);Fn=l(Uh,"Model Hub"),Uh.forEach(r),Rn=l(li,". Make sure you are logged into Hugging Face before you begin:"),li.forEach(r),Do=m(e),d(ht.$$.fragment,e),No=m(e),O=o(e,"P",{});var nr=i(O);Hn=l(nr,"Then add the "),Ia=o(nr,"CODE",{});var Ch=i(Ia);Mn=l(Ch,"push_to_hub"),Ch.forEach(r),Gn=l(nr," argument to the script. This argument will create a repository with your Hugging Face username and the folder name specified in "),La=o(nr,"CODE",{});var Dh=i(La);Bn=l(Dh,"output_dir"),Dh.forEach(r),Jn=l(nr,"."),nr.forEach(r),Oo=m(e),_e=o(e,"P",{});var ni=i(_e);Wn=l(ni,"To give your repository a specific name, use the "),Sa=o(ni,"CODE",{});var Nh=i(Sa);Xn=l(Nh,"push_to_hub_model_id"),Nh.forEach(r),Yn=l(ni," argument to add it. The repository will be automatically listed under your namespace."),ni.forEach(r),jo=m(e),er=o(e,"P",{});var Oh=i(er);Vn=l(Oh,"The following example shows how to upload a model with a specific repository name:"),Oh.forEach(r),Fo=m(e),d(mt.$$.fragment,e),this.h()},h(){p(q,"name","hf:doc:metadata"),p(q,"content",JSON.stringify(Jh)),p(P,"id","train-with-a-script"),p(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(P,"href","#train-with-a-script"),p(z,"class","relative group"),p(ct,"href","./noteboks/README"),p(ye,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch"),p(ye,"rel","nofollow"),p(we,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow"),p(we,"rel","nofollow"),p(Ee,"href","https://github.com/huggingface/transformers/tree/master/examples/flax"),p(Ee,"rel","nofollow"),p($e,"href","https://github.com/huggingface/transformers/tree/master/examples/research_projects"),p($e,"rel","nofollow"),p(be,"href","https://github.com/huggingface/transformers/tree/master/examples/legacy"),p(be,"rel","nofollow"),p(xe,"href","https://discuss.huggingface.co/"),p(xe,"rel","nofollow"),p(ze,"href","https://github.com/huggingface/transformers/issues"),p(ze,"rel","nofollow"),p(Te,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization"),p(Te,"rel","nofollow"),p(ke,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization"),p(ke,"rel","nofollow"),p(K,"id","setup"),p(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(K,"href","#setup"),p(H,"class","relative group"),p(vt,"href","https://github.com/huggingface/transformers/tree/v4.5.1/examples"),p(gt,"href","https://github.com/huggingface/transformers/tree/v4.4.2/examples"),p(yt,"href","https://github.com/huggingface/transformers/tree/v4.3.3/examples"),p(wt,"href","https://github.com/huggingface/transformers/tree/v4.2.2/examples"),p(Et,"href","https://github.com/huggingface/transformers/tree/v4.1.1/examples"),p($t,"href","https://github.com/huggingface/transformers/tree/v4.0.1/examples"),p(bt,"href","https://github.com/huggingface/transformers/tree/v3.5.1/examples"),p(xt,"href","https://github.com/huggingface/transformers/tree/v3.4.0/examples"),p(zt,"href","https://github.com/huggingface/transformers/tree/v3.3.1/examples"),p(Tt,"href","https://github.com/huggingface/transformers/tree/v3.2.0/examples"),p(kt,"href","https://github.com/huggingface/transformers/tree/v3.1.0/examples"),p(At,"href","https://github.com/huggingface/transformers/tree/v3.0.2/examples"),p(Pt,"href","https://github.com/huggingface/transformers/tree/v2.11.0/examples"),p(qt,"href","https://github.com/huggingface/transformers/tree/v2.10.0/examples"),p(It,"href","https://github.com/huggingface/transformers/tree/v2.9.1/examples"),p(Lt,"href","https://github.com/huggingface/transformers/tree/v2.8.0/examples"),p(St,"href","https://github.com/huggingface/transformers/tree/v2.7.0/examples"),p(Ut,"href","https://github.com/huggingface/transformers/tree/v2.6.0/examples"),p(Ct,"href","https://github.com/huggingface/transformers/tree/v2.5.1/examples"),p(Dt,"href","https://github.com/huggingface/transformers/tree/v2.4.0/examples"),p(Nt,"href","https://github.com/huggingface/transformers/tree/v2.3.0/examples"),p(Ot,"href","https://github.com/huggingface/transformers/tree/v2.2.0/examples"),p(jt,"href","https://github.com/huggingface/transformers/tree/v2.1.0/examples"),p(Ft,"href","https://github.com/huggingface/transformers/tree/v2.0.0/examples"),p(Rt,"href","https://github.com/huggingface/transformers/tree/v1.2.0/examples"),p(Ht,"href","https://github.com/huggingface/transformers/tree/v1.1.0/examples"),p(Mt,"href","https://github.com/huggingface/transformers/tree/v1.0.0/examples"),p(ee,"id","run-a-script"),p(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ee,"href","#run-a-script"),p(M,"class","relative group"),p(Se,"href","https://huggingface.co/docs/datasets/"),p(Se,"rel","nofollow"),p(Ue,"href","https://huggingface.co/docs/transformers/main_classes/trainer"),p(Ue,"rel","nofollow"),p(Ce,"href","https://huggingface.co/t5-small"),p(Ce,"rel","nofollow"),p(De,"href","https://huggingface.co/datasets/cnn_dailymail"),p(De,"rel","nofollow"),p(te,"id","distributed-training-and-mixed-precision"),p(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(te,"href","#distributed-training-and-mixed-precision"),p(G,"class","relative group"),p(je,"href","https://huggingface.co/docs/transformers/main_classes/trainer"),p(je,"rel","nofollow"),p(Me,"href","https://www.tensorflow.org/guide/distributed_training#mirroredstrategy"),p(Me,"rel","nofollow"),p(ie,"id","run-a-script-on-a-tpu"),p(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ie,"href","#run-a-script-on-a-tpu"),p(B,"class","relative group"),p(Be,"href","https://www.tensorflow.org/xla"),p(Be,"rel","nofollow"),p(Je,"href","https://github.com/pytorch/xla/blob/master/README.md"),p(Je,"rel","nofollow"),p(We,"href","https://www.tensorflow.org/guide/distributed_training#tpustrategy"),p(We,"rel","nofollow"),p(se,"id","run-a-script-with-accelerate"),p(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(se,"href","#run-a-script-with-accelerate"),p(J,"class","relative group"),p(Ve,"href","https://huggingface.co/docs/accelerate/index.html"),p(Ve,"rel","nofollow"),p(ne,"id","use-a-custom-dataset"),p(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ne,"href","#use-a-custom-dataset"),p(W,"class","relative group"),p(ue,"id","test-a-script"),p(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ue,"href","#test-a-script"),p(X,"class","relative group"),p(he,"id","resume-training-from-checkpoint"),p(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(he,"href","#resume-training-from-checkpoint"),p(Y,"class","relative group"),p(fe,"id","share-your-model"),p(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(fe,"href","#share-your-model"),p(V,"class","relative group"),p(ut,"href","https://huggingface.co/models"),p(ut,"rel","nofollow")},m(e,n){t(document.head,q),u(e,ft,n),u(e,z,n),t(z,P),t(P,pr),v(ge,pr,null),t(z,ui),t(z,ur),t(ur,hi),u(e,Ca,n),u(e,b,n),t(b,mi),t(b,ct),t(ct,fi),t(b,ci),t(b,ye),t(ye,_i),t(b,di),t(b,we),t(we,vi),t(b,gi),t(b,Ee),t(Ee,yi),t(b,wi),u(e,Da,n),u(e,I,n),t(I,Ei),t(I,$e),t($e,$i),t(I,bi),t(I,be),t(be,xi),t(I,zi),u(e,Na,n),u(e,_t,n),t(_t,Ti),u(e,Oa,n),u(e,L,n),t(L,ki),t(L,xe),t(xe,Ai),t(L,Pi),t(L,ze),t(ze,qi),t(L,Ii),u(e,ja,n),u(e,S,n),t(S,Li),t(S,Te),t(Te,Si),t(S,Ui),t(S,ke),t(ke,Ci),t(S,Di),u(e,Fa,n),u(e,H,n),t(H,K),t(K,hr),v(Ae,hr,null),t(H,Ni),t(H,mr),t(mr,Oi),u(e,Ra,n),u(e,Q,n),t(Q,ji),t(Q,fr),t(fr,Fi),t(Q,Ri),u(e,Ha,n),v(Pe,e,n),u(e,Ma,n),u(e,dt,n),t(dt,Hi),u(e,Ga,n),u(e,Z,n),t(Z,cr),t(cr,Mi),t(Z,Gi),t(Z,f),t(f,_r),t(_r,vt),t(vt,Bi),t(f,Ji),t(f,dr),t(dr,gt),t(gt,Wi),t(f,Xi),t(f,vr),t(vr,yt),t(yt,Yi),t(f,Vi),t(f,gr),t(gr,wt),t(wt,Ki),t(f,Qi),t(f,yr),t(yr,Et),t(Et,Zi),t(f,es),t(f,wr),t(wr,$t),t($t,ts),t(f,rs),t(f,Er),t(Er,bt),t(bt,as),t(f,os),t(f,$r),t($r,xt),t(xt,is),t(f,ss),t(f,br),t(br,zt),t(zt,ls),t(f,ns),t(f,xr),t(xr,Tt),t(Tt,ps),t(f,us),t(f,zr),t(zr,kt),t(kt,hs),t(f,ms),t(f,Tr),t(Tr,At),t(At,fs),t(f,cs),t(f,kr),t(kr,Pt),t(Pt,_s),t(f,ds),t(f,Ar),t(Ar,qt),t(qt,vs),t(f,gs),t(f,Pr),t(Pr,It),t(It,ys),t(f,ws),t(f,qr),t(qr,Lt),t(Lt,Es),t(f,$s),t(f,Ir),t(Ir,St),t(St,bs),t(f,xs),t(f,Lr),t(Lr,Ut),t(Ut,zs),t(f,Ts),t(f,Sr),t(Sr,Ct),t(Ct,ks),t(f,As),t(f,Ur),t(Ur,Dt),t(Dt,Ps),t(f,qs),t(f,Cr),t(Cr,Nt),t(Nt,Is),t(f,Ls),t(f,Dr),t(Dr,Ot),t(Ot,Ss),t(f,Us),t(f,Nr),t(Nr,jt),t(jt,Cs),t(f,Ds),t(f,Or),t(Or,Ft),t(Ft,Ns),t(f,Os),t(f,jr),t(jr,Rt),t(Rt,js),t(f,Fs),t(f,Fr),t(Fr,Ht),t(Ht,Rs),t(f,Hs),t(f,Rr),t(Rr,Mt),t(Mt,Ms),u(e,Ba,n),u(e,Gt,n),t(Gt,Gs),u(e,Ja,n),v(qe,e,n),u(e,Wa,n),u(e,Bt,n),t(Bt,Bs),u(e,Xa,n),v(Ie,e,n),u(e,Ya,n),u(e,M,n),t(M,ee),t(ee,Hr),v(Le,Hr,null),t(M,Js),t(M,Mr),t(Mr,Ws),u(e,Va,n),u(e,E,n),t(E,Xs),t(E,Se),t(Se,Ys),t(E,Vs),t(E,Ue),t(Ue,Ks),t(E,Qs),t(E,Ce),t(Ce,Zs),t(E,el),t(E,De),t(De,tl),t(E,rl),t(E,Gr),t(Gr,al),t(E,ol),u(e,Ka,n),v(Ne,e,n),u(e,Qa,n),u(e,G,n),t(G,te),t(te,Br),v(Oe,Br,null),t(G,il),t(G,Jr),t(Jr,sl),u(e,Za,n),u(e,re,n),t(re,ll),t(re,je),t(je,nl),t(re,pl),u(e,eo,n),u(e,ae,n),t(ae,Fe),t(Fe,ul),t(Fe,Wr),t(Wr,hl),t(Fe,ml),t(ae,fl),t(ae,Re),t(Re,cl),t(Re,Xr),t(Xr,_l),t(Re,dl),u(e,to,n),v(He,e,n),u(e,ro,n),u(e,oe,n),t(oe,vl),t(oe,Me),t(Me,Yr),t(Yr,gl),t(oe,yl),u(e,ao,n),u(e,B,n),t(B,ie),t(ie,Vr),v(Ge,Vr,null),t(B,wl),t(B,Kr),t(Kr,El),u(e,oo,n),u(e,x,n),t(x,$l),t(x,Be),t(Be,bl),t(x,xl),t(x,Je),t(Je,zl),t(x,Tl),t(x,Qr),t(Qr,kl),t(x,Al),t(x,Zr),t(Zr,Pl),t(x,ql),u(e,io,n),u(e,U,n),t(U,Il),t(U,We),t(We,ea),t(ea,Ll),t(U,Sl),t(U,ta),t(ta,Ul),t(U,Cl),u(e,so,n),v(Xe,e,n),u(e,lo,n),u(e,J,n),t(J,se),t(se,ra),v(Ye,ra,null),t(J,Dl),t(J,aa),t(aa,Nl),u(e,no,n),u(e,le,n),t(le,Ol),t(le,Ve),t(Ve,jl),t(le,Fl),u(e,po,n),v(Ke,e,n),u(e,uo,n),u(e,T,n),t(T,Rl),t(T,oa),t(oa,Hl),t(T,Ml),t(T,ia),t(ia,Gl),t(T,Bl),t(T,sa),t(sa,Jl),t(T,Wl),u(e,ho,n),v(Qe,e,n),u(e,mo,n),u(e,Jt,n),t(Jt,Xl),u(e,fo,n),v(Ze,e,n),u(e,co,n),u(e,Wt,n),t(Wt,Yl),u(e,_o,n),v(et,e,n),u(e,vo,n),u(e,W,n),t(W,ne),t(ne,la),v(tt,la,null),t(W,Vl),t(W,na),t(na,Kl),u(e,go,n),u(e,Xt,n),t(Xt,Ql),u(e,yo,n),u(e,C,n),t(C,pe),t(pe,pa),t(pa,Zl),t(pe,en),t(pe,ua),t(ua,tn),t(pe,rn),t(C,an),t(C,Yt),t(Yt,ha),t(ha,on),t(Yt,sn),t(C,ln),t(C,Vt),t(Vt,ma),t(ma,nn),t(Vt,pn),u(e,wo,n),u(e,Kt,n),t(Kt,un),u(e,Eo,n),v(rt,e,n),u(e,$o,n),u(e,X,n),t(X,ue),t(ue,fa),v(at,fa,null),t(X,hn),t(X,ca),t(ca,mn),u(e,bo,n),u(e,Qt,n),t(Qt,fn),u(e,xo,n),u(e,D,n),t(D,_a),t(_a,da),t(da,cn),t(D,_n),t(D,va),t(va,ga),t(ga,dn),t(D,vn),t(D,ya),t(ya,wa),t(wa,gn),u(e,zo,n),v(ot,e,n),u(e,To,n),u(e,N,n),t(N,yn),t(N,Ea),t(Ea,wn),t(N,En),t(N,$a),t($a,$n),t(N,bn),u(e,ko,n),v(it,e,n),u(e,Ao,n),u(e,Y,n),t(Y,he),t(he,ba),v(st,ba,null),t(Y,xn),t(Y,xa),t(xa,zn),u(e,Po,n),u(e,Zt,n),t(Zt,Tn),u(e,qo,n),u(e,k,n),t(k,kn),t(k,za),t(za,An),t(k,Pn),t(k,Ta),t(Ta,qn),t(k,In),t(k,ka),t(ka,Ln),t(k,Sn),u(e,Io,n),v(lt,e,n),u(e,Lo,n),u(e,me,n),t(me,Un),t(me,Aa),t(Aa,Cn),t(me,Dn),u(e,So,n),v(nt,e,n),u(e,Uo,n),u(e,V,n),t(V,fe),t(fe,Pa),v(pt,Pa,null),t(V,Nn),t(V,qa),t(qa,On),u(e,Co,n),u(e,ce,n),t(ce,jn),t(ce,ut),t(ut,Fn),t(ce,Rn),u(e,Do,n),v(ht,e,n),u(e,No,n),u(e,O,n),t(O,Hn),t(O,Ia),t(Ia,Mn),t(O,Gn),t(O,La),t(La,Bn),t(O,Jn),u(e,Oo,n),u(e,_e,n),t(_e,Wn),t(_e,Sa),t(Sa,Xn),t(_e,Yn),u(e,jo,n),u(e,er,n),t(er,Vn),u(e,Fo,n),v(mt,e,n),Ro=!0},p:Gh,i(e){Ro||(g(ge.$$.fragment,e),g(Ae.$$.fragment,e),g(Pe.$$.fragment,e),g(qe.$$.fragment,e),g(Ie.$$.fragment,e),g(Le.$$.fragment,e),g(Ne.$$.fragment,e),g(Oe.$$.fragment,e),g(He.$$.fragment,e),g(Ge.$$.fragment,e),g(Xe.$$.fragment,e),g(Ye.$$.fragment,e),g(Ke.$$.fragment,e),g(Qe.$$.fragment,e),g(Ze.$$.fragment,e),g(et.$$.fragment,e),g(tt.$$.fragment,e),g(rt.$$.fragment,e),g(at.$$.fragment,e),g(ot.$$.fragment,e),g(it.$$.fragment,e),g(st.$$.fragment,e),g(lt.$$.fragment,e),g(nt.$$.fragment,e),g(pt.$$.fragment,e),g(ht.$$.fragment,e),g(mt.$$.fragment,e),Ro=!0)},o(e){y(ge.$$.fragment,e),y(Ae.$$.fragment,e),y(Pe.$$.fragment,e),y(qe.$$.fragment,e),y(Ie.$$.fragment,e),y(Le.$$.fragment,e),y(Ne.$$.fragment,e),y(Oe.$$.fragment,e),y(He.$$.fragment,e),y(Ge.$$.fragment,e),y(Xe.$$.fragment,e),y(Ye.$$.fragment,e),y(Ke.$$.fragment,e),y(Qe.$$.fragment,e),y(Ze.$$.fragment,e),y(et.$$.fragment,e),y(tt.$$.fragment,e),y(rt.$$.fragment,e),y(at.$$.fragment,e),y(ot.$$.fragment,e),y(it.$$.fragment,e),y(st.$$.fragment,e),y(lt.$$.fragment,e),y(nt.$$.fragment,e),y(pt.$$.fragment,e),y(ht.$$.fragment,e),y(mt.$$.fragment,e),Ro=!1},d(e){r(q),e&&r(ft),e&&r(z),w(ge),e&&r(Ca),e&&r(b),e&&r(Da),e&&r(I),e&&r(Na),e&&r(_t),e&&r(Oa),e&&r(L),e&&r(ja),e&&r(S),e&&r(Fa),e&&r(H),w(Ae),e&&r(Ra),e&&r(Q),e&&r(Ha),w(Pe,e),e&&r(Ma),e&&r(dt),e&&r(Ga),e&&r(Z),e&&r(Ba),e&&r(Gt),e&&r(Ja),w(qe,e),e&&r(Wa),e&&r(Bt),e&&r(Xa),w(Ie,e),e&&r(Ya),e&&r(M),w(Le),e&&r(Va),e&&r(E),e&&r(Ka),w(Ne,e),e&&r(Qa),e&&r(G),w(Oe),e&&r(Za),e&&r(re),e&&r(eo),e&&r(ae),e&&r(to),w(He,e),e&&r(ro),e&&r(oe),e&&r(ao),e&&r(B),w(Ge),e&&r(oo),e&&r(x),e&&r(io),e&&r(U),e&&r(so),w(Xe,e),e&&r(lo),e&&r(J),w(Ye),e&&r(no),e&&r(le),e&&r(po),w(Ke,e),e&&r(uo),e&&r(T),e&&r(ho),w(Qe,e),e&&r(mo),e&&r(Jt),e&&r(fo),w(Ze,e),e&&r(co),e&&r(Wt),e&&r(_o),w(et,e),e&&r(vo),e&&r(W),w(tt),e&&r(go),e&&r(Xt),e&&r(yo),e&&r(C),e&&r(wo),e&&r(Kt),e&&r(Eo),w(rt,e),e&&r($o),e&&r(X),w(at),e&&r(bo),e&&r(Qt),e&&r(xo),e&&r(D),e&&r(zo),w(ot,e),e&&r(To),e&&r(N),e&&r(ko),w(it,e),e&&r(Ao),e&&r(Y),w(st),e&&r(Po),e&&r(Zt),e&&r(qo),e&&r(k),e&&r(Io),w(lt,e),e&&r(Lo),e&&r(me),e&&r(So),w(nt,e),e&&r(Uo),e&&r(V),w(pt),e&&r(Co),e&&r(ce),e&&r(Do),w(ht,e),e&&r(No),e&&r(O),e&&r(Oo),e&&r(_e),e&&r(jo),e&&r(er),e&&r(Fo),w(mt,e)}}}const Jh={local:"train-with-a-script",sections:[{local:"setup",title:"Setup"},{local:"run-a-script",title:"Run a script"},{local:"distributed-training-and-mixed-precision",title:"Distributed training and mixed precision"},{local:"run-a-script-on-a-tpu",title:"Run a script on a TPU"},{local:"run-a-script-with-accelerate",title:"Run a script with \u{1F917} Accelerate"},{local:"use-a-custom-dataset",title:"Use a custom dataset"},{local:"test-a-script",title:"Test a script"},{local:"resume-training-from-checkpoint",title:"Resume training from checkpoint"},{local:"share-your-model",title:"Share your model"}],title:"Train with a script"};function Wh(pi,q,ft){let{fw:z}=q;return pi.$$set=P=>{"fw"in P&&ft(0,z=P.fw)},[z]}class Zh extends Fh{constructor(q){super();Rh(this,q,Wh,Bh,Hh,{fw:0})}}export{Zh as default,Jh as metadata};
258
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/task_summary.mdx-1d811e69.js
import{S as Dg,i as Og,s as Bg,e as l,k as h,w as m,t,M as Rg,c as o,d as a,m as c,a as r,x as g,h as n,b as u,F as e,g as p,y as d,q as f,o as j,B as _}from"../chunks/vendor-4833417e.js";import{T as Hg}from"../chunks/Tip-fffd6df1.js";import{I as es}from"../chunks/IconCopyLink-4b81c553.js";import{C as H}from"../chunks/CodeBlock-6a3d1b46.js";import{C as Hs}from"../chunks/CodeBlockFw-27a176a0.js";import{D as Gg}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function Ug(Ma){let T,G,E,b,as;return{c(){T=l("p"),G=t(`All tasks presented here leverage pre-trained checkpoints that were fine-tuned on specific tasks. Loading a checkpoint that was not fine-tuned on a specific task would load only the base transformer layers and not the additional head that is used for the task, initializing the weights of that head randomly.`),E=h(),b=l("p"),as=t("This would produce random output.")},l(y){T=o(y,"P",{});var U=r(T);G=n(U,`All tasks presented here leverage pre-trained checkpoints that were fine-tuned on specific tasks. Loading a checkpoint that was not fine-tuned on a specific task would load only the base transformer layers and not the additional head that is used for the task, initializing the weights of that head randomly.`),U.forEach(a),E=c(y),b=o(y,"P",{});var fs=r(b);as=n(fs,"This would produce random output."),fs.forEach(a)},m(y,U){p(y,T,U),e(T,G),p(y,E,U),p(y,b,U),e(b,as)},d(y){y&&a(T),y&&a(E),y&&a(b)}}}function Qg(Ma){let T,G,E,b,as,y,U,fs,Mo,yn,Gs,bn,Qe,Lo,vn,js,Po,Ye,Fo,No,qn,We,Co,Tn,_s,ts,So,La,Do,Oo,Us,Bo,Ro,Ho,Qs,Go,Ys,Uo,Qo,En,Xe,Yo,An,ks,Pa,Wo,Xo,Fa,Vo,zn,Ve,Jo,$n,ws,In,ns,xs,Na,Ws,Ko,Ca,Zo,Mn,M,sr,Xs,er,ar,Vs,tr,nr,Js,lr,or,Ks,rr,ir,Ln,Je,pr,Pn,Ke,hr,Fn,Zs,Nn,Ze,cr,Cn,L,Sa,ur,mr,Da,gr,dr,Oa,fr,jr,Ba,_r,kr,Ra,wr,Sn,se,Dn,ls,ys,Ha,ee,xr,Ga,yr,On,Q,br,ae,vr,qr,te,Tr,Er,Bn,sa,Ar,Rn,ne,Hn,ea,zr,Gn,le,Un,aa,$r,Qn,v,Ua,Ir,Mr,Qa,Lr,Pr,Ya,Fr,Nr,Wa,Cr,Sr,Xa,Dr,Or,Va,Br,Rr,Ja,Hr,Yn,oe,Wn,os,bs,Ka,re,Gr,Za,Ur,Xn,ta,Qr,Vn,vs,Yr,ie,Wr,Xr,Jn,rs,qs,st,pe,Vr,et,Jr,Kn,Y,Kr,he,Zr,si,ce,ei,ai,Zn,na,ti,sl,ue,el,la,ni,al,me,tl,oa,li,nl,A,at,oi,ri,ge,ii,tt,pi,hi,ci,nt,ui,mi,lt,gi,di,is,fi,ot,ji,_i,rt,ki,wi,xi,it,yi,ll,de,ol,ra,bi,rl,ps,Ts,pt,fe,vi,ht,qi,il,Es,Ti,je,Ei,Ai,pl,ia,zi,hl,As,$i,ct,Ii,Mi,cl,_e,ul,W,Li,ut,Pi,Fi,mt,Ni,Ci,ml,zs,Si,pa,Di,Oi,gl,hs,$s,gt,ke,Bi,dt,Ri,dl,z,Hi,ft,Gi,Ui,jt,Qi,Yi,_t,Wi,Xi,kt,Vi,Ji,we,Ki,Zi,fl,xe,jl,$,sp,wt,ep,ap,xt,tp,np,ha,lp,op,yt,rp,ip,bt,pp,hp,_l,X,cp,vt,up,mp,qt,gp,dp,kl,ye,wl,k,fp,Tt,jp,_p,Et,kp,wp,At,xp,yp,zt,bp,vp,$t,qp,Tp,It,Ep,Ap,Mt,zp,$p,Lt,Ip,Mp,Pt,Lp,Pp,xl,Is,Fp,be,Np,Cp,yl,cs,Ms,Ft,ve,Sp,Nt,Dp,bl,Ls,Op,qe,Bp,Rp,vl,ca,Hp,ql,w,Ct,Gp,Up,St,Qp,Yp,Dt,Wp,Xp,Ot,Vp,Jp,Bt,Kp,Zp,Rt,sh,eh,Ht,ah,th,Gt,nh,lh,Ut,oh,Tl,V,rh,Te,ih,ph,Ee,hh,ch,El,Ae,Al,ua,uh,zl,ze,$l,ma,mh,Il,ga,gh,Ml,I,Qt,dh,fh,Yt,jh,_h,Wt,kh,wh,Xt,xh,yh,Vt,bh,vh,Jt,qh,Ll,$e,Pl,da,Th,Fl,J,Eh,Kt,Ah,zh,Zt,$h,Ih,Nl,Ie,Cl,us,Ps,sn,Me,Mh,en,Lh,Sl,Fs,Ph,Le,Fh,Nh,Dl,Ns,Ch,Pe,Sh,Dh,Ol,fa,Oh,Bl,Fe,Rl,P,Bh,an,Rh,Hh,tn,Gh,Uh,nn,Qh,Yh,ln,Wh,Xh,Hl,Ne,Gl,ja,Vh,Ul,N,ms,Jh,on,Kh,Zh,rn,sc,ec,ac,pn,tc,nc,hn,lc,oc,Ce,rc,cn,ic,pc,Ql,_a,hc,Yl,Se,Wl,gs,Cs,un,De,cc,mn,uc,Xl,Ss,mc,Oe,gc,dc,Vl,Ds,fc,Be,jc,_c,Jl,ka,kc,Kl,Re,Zl,C,wc,gn,xc,yc,dn,bc,vc,fn,qc,Tc,so,wa,Ec,eo,S,ds,Ac,jn,zc,$c,_n,Ic,Mc,Lc,kn,Pc,Fc,wn,Nc,Cc,He,Sc,xn,Dc,Oc,ao,Ge,to,xa,Bc,no;return y=new es({}),Gs=new Gg({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/task_summary.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/task_summary.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/task_summary.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/task_summary.ipynb"}]}}),ws=new Hg({props:{$$slots:{default:[Ug]},$$scope:{ctx:Ma}}}),Ws=new es({}),Zs=new H({props:{code:`from transformers import pipeline classifier = pipeline("sentiment-analysis") result = classifier("I hate you")[0] print(f"label: {result['label']}, with score: {round(result['score'], 4)}") result = classifier("I love you")[0] print(f"label: {result['label']}, with score: {round(result['score'], 4)}")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>result = classifier(<span class="hljs-string">&quot;I hate you&quot;</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;label: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;label&#x27;</span>]}</span>, with score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>&quot;</span>) label: NEGATIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.9991</span> <span class="hljs-meta">&gt;&gt;&gt; </span>result = classifier(<span class="hljs-string">&quot;I love you&quot;</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;label: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;label&#x27;</span>]}</span>, with score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>&quot;</span>) label: POSITIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.9999</span>`}}),se=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch tokenizer = AutoTokenizer.from_pretrained("bert-base-cased-finetuned-mrpc") model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased-finetuned-mrpc") classes = ["not paraphrase", "is paraphrase"] sequence_0 = "The company HuggingFace is based in New York City" sequence_1 = "Apples are especially bad for your health" sequence_2 = "HuggingFace's headquarters are situated in Manhattan" # The tokenizer will automatically add any model specific separators (i.e. <CLS> and <SEP>) and tokens to # the sequence, as well as compute the attention masks. paraphrase = tokenizer(sequence_0, sequence_2, return_tensors="pt") not_paraphrase = tokenizer(sequence_0, sequence_1, return_tensors="pt") paraphrase_classification_logits = model(**paraphrase).logits not_paraphrase_classification_logits = model(**not_paraphrase).logits paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0] not_paraphrase_results = torch.softmax(not_paraphrase_classification_logits, dim=1).tolist()[0] # Should be paraphrase for i in range(len(classes)): print(f"{classes[i]}: {int(round(paraphrase_results[i] * 100))}%") # Should not be paraphrase for i in range(len(classes)): print(f"{classes[i]}: {int(round(not_paraphrase_results[i] * 100))}%")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased-finetuned-mrpc&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased-finetuned-mrpc&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>classes = [<span class="hljs-string">&quot;not paraphrase&quot;</span>, <span class="hljs-string">&quot;is paraphrase&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_0 = <span class="hljs-string">&quot;The company HuggingFace is based in New York City&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_1 = <span class="hljs-string">&quot;Apples are especially bad for your health&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_2 = <span class="hljs-string">&quot;HuggingFace&#x27;s headquarters are situated in Manhattan&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># The tokenizer will automatically add any model specific separators (i.e. &lt;CLS&gt; and &lt;SEP&gt;) and tokens to</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the sequence, as well as compute the attention masks.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>paraphrase = tokenizer(sequence_0, sequence_2, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>not_paraphrase = tokenizer(sequence_0, sequence_1, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>paraphrase_classification_logits = model(**paraphrase).logits <span class="hljs-meta">&gt;&gt;&gt; </span>not_paraphrase_classification_logits = model(**not_paraphrase).logits <span class="hljs-meta">&gt;&gt;&gt; </span>paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=<span class="hljs-number">1</span>).tolist()[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>not_paraphrase_results = torch.softmax(not_paraphrase_classification_logits, dim=<span class="hljs-number">1</span>).tolist()[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Should be paraphrase</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-built_in">len</span>(classes)): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;<span class="hljs-subst">{classes[i]}</span>: <span class="hljs-subst">{<span class="hljs-built_in">int</span>(<span class="hljs-built_in">round</span>(paraphrase_results[i] * <span class="hljs-number">100</span>))}</span>%&quot;</span>) <span class="hljs-keyword">not</span> paraphrase: <span class="hljs-number">10</span>% <span class="hljs-keyword">is</span> paraphrase: <span class="hljs-number">90</span>% <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Should not be paraphrase</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-built_in">len</span>(classes)): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;<span class="hljs-subst">{classes[i]}</span>: <span class="hljs-subst">{<span class="hljs-built_in">int</span>(<span class="hljs-built_in">round</span>(not_paraphrase_results[i] * <span class="hljs-number">100</span>))}</span>%&quot;</span>) <span class="hljs-keyword">not</span> paraphrase: <span class="hljs-number">94</span>% <span class="hljs-keyword">is</span> paraphrase: <span class="hljs-number">6</span>%`},group2:{id:"tf",code:`from transformers import AutoTokenizer, TFAutoModelForSequenceClassification import tensorflow as tf tokenizer = AutoTokenizer.from_pretrained("bert-base-cased-finetuned-mrpc") model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased-finetuned-mrpc") classes = ["not paraphrase", "is paraphrase"] sequence_0 = "The company HuggingFace is based in New York City" sequence_1 = "Apples are especially bad for your health" sequence_2 = "HuggingFace's headquarters are situated in Manhattan" # The tokenizer will automatically add any model specific separators (i.e. <CLS> and <SEP>) and tokens to # the sequence, as well as compute the attention masks. paraphrase = tokenizer(sequence_0, sequence_2, return_tensors="tf") not_paraphrase = tokenizer(sequence_0, sequence_1, return_tensors="tf") paraphrase_classification_logits = model(paraphrase).logits not_paraphrase_classification_logits = model(not_paraphrase).logits paraphrase_results = tf.nn.softmax(paraphrase_classification_logits, axis=1).numpy()[0] not_paraphrase_results = tf.nn.softmax(not_paraphrase_classification_logits, axis=1).numpy()[0] # Should be paraphrase for i in range(len(classes)): print(f"{classes[i]}: {int(round(paraphrase_results[i] * 100))}%") # Should not be paraphrase for i in range(len(classes)): print(f"{classes[i]}: {int(round(not_paraphrase_results[i] * 100))}%")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased-finetuned-mrpc&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased-finetuned-mrpc&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>classes = [<span class="hljs-string">&quot;not paraphrase&quot;</span>, <span class="hljs-string">&quot;is paraphrase&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_0 = <span class="hljs-string">&quot;The company HuggingFace is based in New York City&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_1 = <span class="hljs-string">&quot;Apples are especially bad for your health&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_2 = <span class="hljs-string">&quot;HuggingFace&#x27;s headquarters are situated in Manhattan&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># The tokenizer will automatically add any model specific separators (i.e. &lt;CLS&gt; and &lt;SEP&gt;) and tokens to</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the sequence, as well as compute the attention masks.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>paraphrase = tokenizer(sequence_0, sequence_2, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>not_paraphrase = tokenizer(sequence_0, sequence_1, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>paraphrase_classification_logits = model(paraphrase).logits <span class="hljs-meta">&gt;&gt;&gt; </span>not_paraphrase_classification_logits = model(not_paraphrase).logits <span class="hljs-meta">&gt;&gt;&gt; </span>paraphrase_results = tf.nn.softmax(paraphrase_classification_logits, axis=<span class="hljs-number">1</span>).numpy()[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>not_paraphrase_results = tf.nn.softmax(not_paraphrase_classification_logits, axis=<span class="hljs-number">1</span>).numpy()[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Should be paraphrase</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-built_in">len</span>(classes)): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;<span class="hljs-subst">{classes[i]}</span>: <span class="hljs-subst">{<span class="hljs-built_in">int</span>(<span class="hljs-built_in">round</span>(paraphrase_results[i] * <span class="hljs-number">100</span>))}</span>%&quot;</span>) <span class="hljs-keyword">not</span> paraphrase: <span class="hljs-number">10</span>% <span class="hljs-keyword">is</span> paraphrase: <span class="hljs-number">90</span>% <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Should not be paraphrase</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-built_in">len</span>(classes)): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;<span class="hljs-subst">{classes[i]}</span>: <span class="hljs-subst">{<span class="hljs-built_in">int</span>(<span class="hljs-built_in">round</span>(not_paraphrase_results[i] * <span class="hljs-number">100</span>))}</span>%&quot;</span>) <span class="hljs-keyword">not</span> paraphrase: <span class="hljs-number">94</span>% <span class="hljs-keyword">is</span> paraphrase: <span class="hljs-number">6</span>%`}}}),ee=new es({}),ne=new H({props:{code:`from transformers import pipeline question_answerer = pipeline("question-answering") context = r""" Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script. """`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>question_answerer = pipeline(<span class="hljs-string">&quot;question-answering&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>context = <span class="hljs-string">r&quot;&quot;&quot; <span class="hljs-meta">... </span>Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a <span class="hljs-meta">... </span>question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune <span class="hljs-meta">... </span>a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script. <span class="hljs-meta">... </span>&quot;&quot;&quot;</span>`}}),le=new H({props:{code:`result = question_answerer(question="What is extractive question answering?", context=context) print( f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}" ) result = question_answerer(question="What is a good example of a question answering dataset?", context=context) print( f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}" )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>result = question_answerer(question=<span class="hljs-string">&quot;What is extractive question answering?&quot;</span>, context=context) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>( <span class="hljs-meta">... </span> <span class="hljs-string">f&quot;Answer: &#x27;<span class="hljs-subst">{result[<span class="hljs-string">&#x27;answer&#x27;</span>]}</span>&#x27;, score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>, start: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;start&#x27;</span>]}</span>, end: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;end&#x27;</span>]}</span>&quot;</span> <span class="hljs-meta">... </span>) Answer: <span class="hljs-string">&#x27;the task of extracting an answer from a text given a question&#x27;</span>, score: <span class="hljs-number">0.6177</span>, start: <span class="hljs-number">34</span>, end: <span class="hljs-number">95</span> <span class="hljs-meta">&gt;&gt;&gt; </span>result = question_answerer(question=<span class="hljs-string">&quot;What is a good example of a question answering dataset?&quot;</span>, context=context) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>( <span class="hljs-meta">... </span> <span class="hljs-string">f&quot;Answer: &#x27;<span class="hljs-subst">{result[<span class="hljs-string">&#x27;answer&#x27;</span>]}</span>&#x27;, score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>, start: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;start&#x27;</span>]}</span>, end: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;end&#x27;</span>]}</span>&quot;</span> <span class="hljs-meta">... </span>) Answer: <span class="hljs-string">&#x27;SQuAD dataset&#x27;</span>, score: <span class="hljs-number">0.5152</span>, start: <span class="hljs-number">147</span>, end: <span class="hljs-number">160</span>`}}),oe=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoTokenizer, AutoModelForQuestionAnswering import torch tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") model = AutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") text = r""" \u{1F917} Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet\u2026) for Natural Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between TensorFlow 2.0 and PyTorch. """ questions = [ "How many pretrained models are available in \u{1F917} Transformers?", "What does \u{1F917} Transformers provide?", "\u{1F917} Transformers provides interoperability between which frameworks?", ] for question in questions: inputs = tokenizer(question, text, add_special_tokens=True, return_tensors="pt") input_ids = inputs["input_ids"].tolist()[0] outputs = model(**inputs) answer_start_scores = outputs.start_logits answer_end_scores = outputs.end_logits # Get the most likely beginning of answer with the argmax of the score answer_start = torch.argmax(answer_start_scores) # Get the most likely end of answer with the argmax of the score answer_end = torch.argmax(answer_end_scores) + 1 answer = tokenizer.convert_tokens_to_string( tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]) ) print(f"Question: {question}") print(f"Answer: {answer}")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased-whole-word-masking-finetuned-squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased-whole-word-masking-finetuned-squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">r&quot;&quot;&quot; <span class="hljs-meta">... </span>\u{1F917} Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose <span class="hljs-meta">... </span>architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet\u2026) for Natural Language Understanding (NLU) and Natural <span class="hljs-meta">... </span>Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between <span class="hljs-meta">... </span>TensorFlow 2.0 and PyTorch. <span class="hljs-meta">... </span>&quot;&quot;&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>questions = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;How many pretrained models are available in \u{1F917} Transformers?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;What does \u{1F917} Transformers provide?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;\u{1F917} Transformers provides interoperability between which frameworks?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> question <span class="hljs-keyword">in</span> questions: <span class="hljs-meta">... </span> inputs = tokenizer(question, text, add_special_tokens=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">... </span> input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].tolist()[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">... </span> answer_start_scores = outputs.start_logits <span class="hljs-meta">... </span> answer_end_scores = outputs.end_logits <span class="hljs-meta">... </span> <span class="hljs-comment"># Get the most likely beginning of answer with the argmax of the score</span> <span class="hljs-meta">... </span> answer_start = torch.argmax(answer_start_scores) <span class="hljs-meta">... </span> <span class="hljs-comment"># Get the most likely end of answer with the argmax of the score</span> <span class="hljs-meta">... </span> answer_end = torch.argmax(answer_end_scores) + <span class="hljs-number">1</span> <span class="hljs-meta">... </span> answer = tokenizer.convert_tokens_to_string( <span class="hljs-meta">... </span> tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]) <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Question: <span class="hljs-subst">{question}</span>&quot;</span>) <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Answer: <span class="hljs-subst">{answer}</span>&quot;</span>) Question: How many pretrained models are available <span class="hljs-keyword">in</span> \u{1F917} Transformers? Answer: over <span class="hljs-number">32</span> + Question: What does \u{1F917} Transformers provide? Answer: general - purpose architectures Question: \u{1F917} Transformers provides interoperability between which frameworks? Answer: tensorflow <span class="hljs-number">2.</span> <span class="hljs-number">0</span> <span class="hljs-keyword">and</span> pytorch`},group2:{id:"tf",code:`from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering import tensorflow as tf tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") text = r""" \u{1F917} Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet\u2026) for Natural Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between TensorFlow 2.0 and PyTorch. """ questions = [ "How many pretrained models are available in \u{1F917} Transformers?", "What does \u{1F917} Transformers provide?", "\u{1F917} Transformers provides interoperability between which frameworks?", ] for question in questions: inputs = tokenizer(question, text, add_special_tokens=True, return_tensors="tf") input_ids = inputs["input_ids"].numpy()[0] outputs = model(inputs) answer_start_scores = outputs.start_logits answer_end_scores = outputs.end_logits # Get the most likely beginning of answer with the argmax of the score answer_start = tf.argmax(answer_start_scores, axis=1).numpy()[0] # Get the most likely end of answer with the argmax of the score answer_end = tf.argmax(answer_end_scores, axis=1).numpy()[0] + 1 answer = tokenizer.convert_tokens_to_string( tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]) ) print(f"Question: {question}") print(f"Answer: {answer}")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, TFAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased-whole-word-masking-finetuned-squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased-whole-word-masking-finetuned-squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">r&quot;&quot;&quot; <span class="hljs-meta">... </span>\u{1F917} Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose <span class="hljs-meta">... </span>architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet\u2026) for Natural Language Understanding (NLU) and Natural <span class="hljs-meta">... </span>Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between <span class="hljs-meta">... </span>TensorFlow 2.0 and PyTorch. <span class="hljs-meta">... </span>&quot;&quot;&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>questions = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;How many pretrained models are available in \u{1F917} Transformers?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;What does \u{1F917} Transformers provide?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;\u{1F917} Transformers provides interoperability between which frameworks?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> question <span class="hljs-keyword">in</span> questions: <span class="hljs-meta">... </span> inputs = tokenizer(question, text, add_special_tokens=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">... </span> input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> outputs = model(inputs) <span class="hljs-meta">... </span> answer_start_scores = outputs.start_logits <span class="hljs-meta">... </span> answer_end_scores = outputs.end_logits <span class="hljs-meta">... </span> <span class="hljs-comment"># Get the most likely beginning of answer with the argmax of the score</span> <span class="hljs-meta">... </span> answer_start = tf.argmax(answer_start_scores, axis=<span class="hljs-number">1</span>).numpy()[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> <span class="hljs-comment"># Get the most likely end of answer with the argmax of the score</span> <span class="hljs-meta">... </span> answer_end = tf.argmax(answer_end_scores, axis=<span class="hljs-number">1</span>).numpy()[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span> <span class="hljs-meta">... </span> answer = tokenizer.convert_tokens_to_string( <span class="hljs-meta">... </span> tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]) <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Question: <span class="hljs-subst">{question}</span>&quot;</span>) <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Answer: <span class="hljs-subst">{answer}</span>&quot;</span>) Question: How many pretrained models are available <span class="hljs-keyword">in</span> \u{1F917} Transformers? Answer: over <span class="hljs-number">32</span> + Question: What does \u{1F917} Transformers provide? Answer: general - purpose architectures Question: \u{1F917} Transformers provides interoperability between which frameworks? Answer: tensorflow <span class="hljs-number">2.</span> <span class="hljs-number">0</span> <span class="hljs-keyword">and</span> pytorch`}}}),re=new es({}),pe=new es({}),ue=new H({props:{code:`from transformers import pipeline unmasker = pipeline("fill-mask")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>unmasker = pipeline(<span class="hljs-string">&quot;fill-mask&quot;</span>)`}}),me=new H({props:{code:`from pprint import pprint pprint( unmasker( f"HuggingFace is creating a {unmasker.tokenizer.mask_token} that the community uses to solve NLP tasks." ) )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> pprint <span class="hljs-keyword">import</span> pprint <span class="hljs-meta">&gt;&gt;&gt; </span>pprint( <span class="hljs-meta">... </span> unmasker( <span class="hljs-meta">... </span> <span class="hljs-string">f&quot;HuggingFace is creating a <span class="hljs-subst">{unmasker.tokenizer.mask_token}</span> that the community uses to solve NLP tasks.&quot;</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>) [{<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.1793</span>, <span class="hljs-string">&#x27;sequence&#x27;</span>: <span class="hljs-string">&#x27;HuggingFace is creating a tool that the community uses to solve &#x27;</span> <span class="hljs-string">&#x27;NLP tasks.&#x27;</span>, <span class="hljs-string">&#x27;token&#x27;</span>: <span class="hljs-number">3944</span>, <span class="hljs-string">&#x27;token_str&#x27;</span>: <span class="hljs-string">&#x27; tool&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.1135</span>, <span class="hljs-string">&#x27;sequence&#x27;</span>: <span class="hljs-string">&#x27;HuggingFace is creating a framework that the community uses to &#x27;</span> <span class="hljs-string">&#x27;solve NLP tasks.&#x27;</span>, <span class="hljs-string">&#x27;token&#x27;</span>: <span class="hljs-number">7208</span>, <span class="hljs-string">&#x27;token_str&#x27;</span>: <span class="hljs-string">&#x27; framework&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0524</span>, <span class="hljs-string">&#x27;sequence&#x27;</span>: <span class="hljs-string">&#x27;HuggingFace is creating a library that the community uses to &#x27;</span> <span class="hljs-string">&#x27;solve NLP tasks.&#x27;</span>, <span class="hljs-string">&#x27;token&#x27;</span>: <span class="hljs-number">5560</span>, <span class="hljs-string">&#x27;token_str&#x27;</span>: <span class="hljs-string">&#x27; library&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0349</span>, <span class="hljs-string">&#x27;sequence&#x27;</span>: <span class="hljs-string">&#x27;HuggingFace is creating a database that the community uses to &#x27;</span> <span class="hljs-string">&#x27;solve NLP tasks.&#x27;</span>, <span class="hljs-string">&#x27;token&#x27;</span>: <span class="hljs-number">8503</span>, <span class="hljs-string">&#x27;token_str&#x27;</span>: <span class="hljs-string">&#x27; database&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0286</span>, <span class="hljs-string">&#x27;sequence&#x27;</span>: <span class="hljs-string">&#x27;HuggingFace is creating a prototype that the community uses to &#x27;</span> <span class="hljs-string">&#x27;solve NLP tasks.&#x27;</span>, <span class="hljs-string">&#x27;token&#x27;</span>: <span class="hljs-number">17715</span>, <span class="hljs-string">&#x27;token_str&#x27;</span>: <span class="hljs-string">&#x27; prototype&#x27;</span>}]`}}),de=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoModelForMaskedLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased") model = AutoModelForMaskedLM.from_pretrained("distilbert-base-cased") sequence = ( "Distilled models are smaller than the models they mimic. Using them instead of the large " f"versions would help {tokenizer.mask_token} our carbon footprint." ) inputs = tokenizer(sequence, return_tensors="pt") mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1] token_logits = model(**inputs).logits mask_token_logits = token_logits[0, mask_token_index, :] top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist() for token in top_5_tokens: print(sequence.replace(tokenizer.mask_token, tokenizer.decode([token])))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMaskedLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&quot;distilbert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = ( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Distilled models are smaller than the models they mimic. Using them instead of the large &quot;</span> <span class="hljs-meta">... </span> <span class="hljs-string">f&quot;versions would help <span class="hljs-subst">{tokenizer.mask_token}</span> our carbon footprint.&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(sequence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_token_index = torch.where(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] == tokenizer.mask_token_id)[<span class="hljs-number">1</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>mask_token_logits = token_logits[<span class="hljs-number">0</span>, mask_token_index, :] <span class="hljs-meta">&gt;&gt;&gt; </span>top_5_tokens = torch.topk(mask_token_logits, <span class="hljs-number">5</span>, dim=<span class="hljs-number">1</span>).indices[<span class="hljs-number">0</span>].tolist() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> token <span class="hljs-keyword">in</span> top_5_tokens: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(sequence.replace(tokenizer.mask_token, tokenizer.decode([token]))) Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> reduce our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> increase our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> decrease our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> offset our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> improve our carbon footprint.`},group2:{id:"tf",code:`from transformers import TFAutoModelForMaskedLM, AutoTokenizer import tensorflow as tf tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased") model = TFAutoModelForMaskedLM.from_pretrained("distilbert-base-cased") sequence = ( "Distilled models are smaller than the models they mimic. Using them instead of the large " f"versions would help {tokenizer.mask_token} our carbon footprint." ) inputs = tokenizer(sequence, return_tensors="tf") mask_token_index = tf.where(inputs["input_ids"] == tokenizer.mask_token_id)[0, 1] token_logits = model(**inputs).logits mask_token_logits = token_logits[0, mask_token_index, :] top_5_tokens = tf.math.top_k(mask_token_logits, 5).indices.numpy() for token in top_5_tokens: print(sequence.replace(tokenizer.mask_token, tokenizer.decode([token])))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMaskedLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&quot;distilbert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = ( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Distilled models are smaller than the models they mimic. Using them instead of the large &quot;</span> <span class="hljs-meta">... </span> <span class="hljs-string">f&quot;versions would help <span class="hljs-subst">{tokenizer.mask_token}</span> our carbon footprint.&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(sequence, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_token_index = tf.where(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] == tokenizer.mask_token_id)[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>token_logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>mask_token_logits = token_logits[<span class="hljs-number">0</span>, mask_token_index, :] <span class="hljs-meta">&gt;&gt;&gt; </span>top_5_tokens = tf.math.top_k(mask_token_logits, <span class="hljs-number">5</span>).indices.numpy() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> token <span class="hljs-keyword">in</span> top_5_tokens: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(sequence.replace(tokenizer.mask_token, tokenizer.decode([token]))) Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> reduce our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> increase our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> decrease our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> offset our carbon footprint. Distilled models are smaller than the models they mimic. Using them instead of the large versions would <span class="hljs-built_in">help</span> improve our carbon footprint.`}}}),fe=new es({}),_e=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoModelForCausalLM, AutoTokenizer, top_k_top_p_filtering import torch from torch import nn tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") sequence = f"Hugging Face is based in DUMBO, New York City, and" inputs = tokenizer(sequence, return_tensors="pt") input_ids = inputs["input_ids"] # get logits of last hidden state next_token_logits = model(**inputs).logits[:, -1, :] # filter filtered_next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=50, top_p=1.0) # sample probs = nn.functional.softmax(filtered_next_token_logits, dim=-1) next_token = torch.multinomial(probs, num_samples=1) generated = torch.cat([input_ids, next_token], dim=-1) resulting_string = tokenizer.decode(generated.tolist()[0]) print(resulting_string)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, AutoTokenizer, top_k_top_p_filtering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = <span class="hljs-string">f&quot;Hugging Face is based in DUMBO, New York City, and&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(sequence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># get logits of last hidden state</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = model(**inputs).logits[:, -<span class="hljs-number">1</span>, :] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># filter</span> <span class="hljs-meta">&gt;&gt;&gt; </span>filtered_next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=<span class="hljs-number">50</span>, top_p=<span class="hljs-number">1.0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># sample</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = nn.functional.softmax(filtered_next_token_logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>next_token = torch.multinomial(probs, num_samples=<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated = torch.cat([input_ids, next_token], dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>resulting_string = tokenizer.decode(generated.tolist()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(resulting_string) Hugging Face <span class="hljs-keyword">is</span> based <span class="hljs-keyword">in</span> DUMBO, New York City, <span class="hljs-keyword">and</span> ...`},group2:{id:"tf",code:`from transformers import TFAutoModelForCausalLM, AutoTokenizer, tf_top_k_top_p_filtering import tensorflow as tf tokenizer = AutoTokenizer.from_pretrained("gpt2") model = TFAutoModelForCausalLM.from_pretrained("gpt2") sequence = f"Hugging Face is based in DUMBO, New York City, and" inputs = tokenizer(sequence, return_tensors="tf") input_ids = inputs["input_ids"] # get logits of last hidden state next_token_logits = model(**inputs).logits[:, -1, :] # filter filtered_next_token_logits = tf_top_k_top_p_filtering(next_token_logits, top_k=50, top_p=1.0) # sample next_token = tf.random.categorical(filtered_next_token_logits, dtype=tf.int32, num_samples=1) generated = tf.concat([input_ids, next_token], axis=1) resulting_string = tokenizer.decode(generated.numpy().tolist()[0]) print(resulting_string)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForCausalLM, AutoTokenizer, tf_top_k_top_p_filtering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = <span class="hljs-string">f&quot;Hugging Face is based in DUMBO, New York City, and&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(sequence, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># get logits of last hidden state</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = model(**inputs).logits[:, -<span class="hljs-number">1</span>, :] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># filter</span> <span class="hljs-meta">&gt;&gt;&gt; </span>filtered_next_token_logits = tf_top_k_top_p_filtering(next_token_logits, top_k=<span class="hljs-number">50</span>, top_p=<span class="hljs-number">1.0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># sample</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token = tf.random.categorical(filtered_next_token_logits, dtype=tf.int32, num_samples=<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated = tf.concat([input_ids, next_token], axis=<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>resulting_string = tokenizer.decode(generated.numpy().tolist()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(resulting_string) Hugging Face <span class="hljs-keyword">is</span> based <span class="hljs-keyword">in</span> DUMBO, New York City, <span class="hljs-keyword">and</span> ...`}}}),ke=new es({}),xe=new H({props:{code:`from transformers import pipeline text_generator = pipeline("text-generation") print(text_generator("As far as I am concerned, I will", max_length=50, do_sample=False))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>text_generator = pipeline(<span class="hljs-string">&quot;text-generation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(text_generator(<span class="hljs-string">&quot;As far as I am concerned, I will&quot;</span>, max_length=<span class="hljs-number">50</span>, do_sample=<span class="hljs-literal">False</span>)) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;As far as I am concerned, I will be the first to admit that I am not a fan of the idea of a &quot;free market.&quot; I think that the idea of a free market is a bit of a stretch. I think that the idea&#x27;</span>}]`}}),ye=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("xlnet-base-cased") tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased") # Padding text helps XLNet with short prompts - proposed by Aman Rusia in https://github.com/rusiaaman/XLNet-gen#methodology PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" prompt = "Today the weather is really nice and I am planning on " inputs = tokenizer(PADDING_TEXT + prompt, add_special_tokens=False, return_tensors="pt")["input_ids"] prompt_length = len(tokenizer.decode(inputs[0])) outputs = model.generate(inputs, max_length=250, do_sample=True, top_p=0.95, top_k=60) generated = prompt + tokenizer.decode(outputs[0])[prompt_length + 1 :] print(generated)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;xlnet-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;xlnet-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Padding text helps XLNet with short prompts - proposed by Aman Rusia in https://github.com/rusiaaman/XLNet-gen#methodology</span> <span class="hljs-meta">&gt;&gt;&gt; </span>PADDING_TEXT = <span class="hljs-string">&quot;&quot;&quot;In 1991, the remains of Russian Tsar Nicholas II and his family <span class="hljs-meta">... </span>(except for Alexei and Maria) are discovered. <span class="hljs-meta">... </span>The voice of Nicholas&#x27;s young son, Tsarevich Alexei Nikolaevich, narrates the <span class="hljs-meta">... </span>remainder of the story. 1883 Western Siberia, <span class="hljs-meta">... </span>a young Grigori Rasputin is asked by his father and a group of men to perform magic. <span class="hljs-meta">... </span>Rasputin has a vision and denounces one of the men as a horse thief. Although his <span class="hljs-meta">... </span>father initially slaps him for making such an accusation, Rasputin watches as the <span class="hljs-meta">... </span>man is chased outside and beaten. Twenty years later, Rasputin sees a vision of <span class="hljs-meta">... </span>the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, <span class="hljs-meta">... </span>with people, even a bishop, begging for his blessing. &lt;eod&gt; &lt;/s&gt; &lt;eos&gt;&quot;&quot;&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today the weather is really nice and I am planning on &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(PADDING_TEXT + prompt, add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>prompt_length = <span class="hljs-built_in">len</span>(tokenizer.decode(inputs[<span class="hljs-number">0</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(inputs, max_length=<span class="hljs-number">250</span>, do_sample=<span class="hljs-literal">True</span>, top_p=<span class="hljs-number">0.95</span>, top_k=<span class="hljs-number">60</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated = prompt + tokenizer.decode(outputs[<span class="hljs-number">0</span>])[prompt_length + <span class="hljs-number">1</span> :] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(generated) Today the weather <span class="hljs-keyword">is</span> really nice <span class="hljs-keyword">and</span> I am planning ...`},group2:{id:"tf",code:`from transformers import TFAutoModelForCausalLM, AutoTokenizer model = TFAutoModelForCausalLM.from_pretrained("xlnet-base-cased") tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased") # Padding text helps XLNet with short prompts - proposed by Aman Rusia in https://github.com/rusiaaman/XLNet-gen#methodology PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" prompt = "Today the weather is really nice and I am planning on " inputs = tokenizer(PADDING_TEXT + prompt, add_special_tokens=False, return_tensors="tf")["input_ids"] prompt_length = len(tokenizer.decode(inputs[0])) outputs = model.generate(inputs, max_length=250, do_sample=True, top_p=0.95, top_k=60) generated = prompt + tokenizer.decode(outputs[0])[prompt_length + 1 :] print(generated)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForCausalLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;xlnet-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;xlnet-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Padding text helps XLNet with short prompts - proposed by Aman Rusia in https://github.com/rusiaaman/XLNet-gen#methodology</span> <span class="hljs-meta">&gt;&gt;&gt; </span>PADDING_TEXT = <span class="hljs-string">&quot;&quot;&quot;In 1991, the remains of Russian Tsar Nicholas II and his family <span class="hljs-meta">... </span>(except for Alexei and Maria) are discovered. <span class="hljs-meta">... </span>The voice of Nicholas&#x27;s young son, Tsarevich Alexei Nikolaevich, narrates the <span class="hljs-meta">... </span>remainder of the story. 1883 Western Siberia, <span class="hljs-meta">... </span>a young Grigori Rasputin is asked by his father and a group of men to perform magic. <span class="hljs-meta">... </span>Rasputin has a vision and denounces one of the men as a horse thief. Although his <span class="hljs-meta">... </span>father initially slaps him for making such an accusation, Rasputin watches as the <span class="hljs-meta">... </span>man is chased outside and beaten. Twenty years later, Rasputin sees a vision of <span class="hljs-meta">... </span>the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, <span class="hljs-meta">... </span>with people, even a bishop, begging for his blessing. &lt;eod&gt; &lt;/s&gt; &lt;eos&gt;&quot;&quot;&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today the weather is really nice and I am planning on &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(PADDING_TEXT + prompt, add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>prompt_length = <span class="hljs-built_in">len</span>(tokenizer.decode(inputs[<span class="hljs-number">0</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(inputs, max_length=<span class="hljs-number">250</span>, do_sample=<span class="hljs-literal">True</span>, top_p=<span class="hljs-number">0.95</span>, top_k=<span class="hljs-number">60</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated = prompt + tokenizer.decode(outputs[<span class="hljs-number">0</span>])[prompt_length + <span class="hljs-number">1</span> :] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(generated) Today the weather <span class="hljs-keyword">is</span> really nice <span class="hljs-keyword">and</span> I am planning ...`}}}),ve=new es({}),Ae=new H({props:{code:`from transformers import pipeline ner_pipe = pipeline("ner") sequence = """Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very close to the Manhattan Bridge which is visible from the window."""`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>ner_pipe = pipeline(<span class="hljs-string">&quot;ner&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = <span class="hljs-string">&quot;&quot;&quot;Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, <span class="hljs-meta">... </span>therefore very close to the Manhattan Bridge which is visible from the window.&quot;&quot;&quot;</span>`}}),ze=new H({props:{code:`for entity in ner_pipe(sequence): print(entity)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> entity <span class="hljs-keyword">in</span> ner_pipe(sequence): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(entity) {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-ORG&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9996</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;Hu&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">2</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-ORG&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9910</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;##gging&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">7</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-ORG&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9982</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;Face&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">8</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">12</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-ORG&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9995</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">4</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;Inc&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">13</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">16</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9994</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">11</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;New&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">40</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">43</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9993</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">12</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;York&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">44</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">48</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9994</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">13</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;City&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">49</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">53</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9863</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">19</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;D&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">79</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">80</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9514</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">20</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;##UM&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">80</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">82</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9337</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">21</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;##BO&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">82</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">84</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9762</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">28</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;Manhattan&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">114</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">123</span>} {<span class="hljs-string">&#x27;entity&#x27;</span>: <span class="hljs-string">&#x27;I-LOC&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9915</span>, <span class="hljs-string">&#x27;index&#x27;</span>: <span class="hljs-number">29</span>, <span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;Bridge&#x27;</span>, <span class="hljs-string">&#x27;start&#x27;</span>: <span class="hljs-number">124</span>, <span class="hljs-string">&#x27;end&#x27;</span>: <span class="hljs-number">130</span>}`}}),$e=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoModelForTokenClassification, AutoTokenizer import torch model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") sequence = ( "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, " "therefore very close to the Manhattan Bridge." ) inputs = tokenizer(sequence, return_tensors="pt") tokens = inputs.tokens() outputs = model(**inputs).logits predictions = torch.argmax(outputs, dim=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-large-cased-finetuned-conll03-english&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = ( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, &quot;</span> <span class="hljs-meta">... </span> <span class="hljs-string">&quot;therefore very close to the Manhattan Bridge.&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(sequence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = inputs.tokens() <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predictions = torch.argmax(outputs, dim=<span class="hljs-number">2</span>)`},group2:{id:"tf",code:`from transformers import TFAutoModelForTokenClassification, AutoTokenizer import tensorflow as tf model = TFAutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") sequence = ( "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, " "therefore very close to the Manhattan Bridge." ) inputs = tokenizer(sequence, return_tensors="tf") tokens = inputs.tokens() outputs = model(**inputs)[0] predictions = tf.argmax(outputs, axis=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-large-cased-finetuned-conll03-english&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sequence = ( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, &quot;</span> <span class="hljs-meta">... </span> <span class="hljs-string">&quot;therefore very close to the Manhattan Bridge.&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(sequence, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = inputs.tokens() <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>predictions = tf.argmax(outputs, axis=<span class="hljs-number">2</span>)`}}}),Ie=new H({props:{code:`for token, prediction in zip(tokens, predictions[0].numpy()): print((token, model.config.id2label[prediction]))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> token, prediction <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(tokens, predictions[<span class="hljs-number">0</span>].numpy()): <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>((token, model.config.id2label[prediction])) (<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;Hu&#x27;</span>, <span class="hljs-string">&#x27;I-ORG&#x27;</span>) (<span class="hljs-string">&#x27;##gging&#x27;</span>, <span class="hljs-string">&#x27;I-ORG&#x27;</span>) (<span class="hljs-string">&#x27;Face&#x27;</span>, <span class="hljs-string">&#x27;I-ORG&#x27;</span>) (<span class="hljs-string">&#x27;Inc&#x27;</span>, <span class="hljs-string">&#x27;I-ORG&#x27;</span>) (<span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;is&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;a&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;company&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;based&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;in&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;New&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;York&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;City&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;Its&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;headquarters&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;are&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;in&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;D&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;##UM&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;##BO&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;,&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;therefore&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;very&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;close&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;to&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;Manhattan&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;Bridge&#x27;</span>, <span class="hljs-string">&#x27;I-LOC&#x27;</span>) (<span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>) (<span class="hljs-string">&#x27;[SEP]&#x27;</span>, <span class="hljs-string">&#x27;O&#x27;</span>)`}}),Me=new es({}),Fe=new H({props:{code:`from transformers import pipeline summarizer = pipeline("summarization") ARTICLE = """ New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\\'s Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18. """`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE = <span class="hljs-string">&quot;&quot;&quot; New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. <span class="hljs-meta">... </span>A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. <span class="hljs-meta">... </span>Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared &quot;I do&quot; five more times, sometimes only within two weeks of each other. <span class="hljs-meta">... </span>In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her &quot;first and only&quot; marriage. <span class="hljs-meta">... </span>Barrientos, now 39, is facing two criminal counts of &quot;offering a false instrument for filing in the first degree,&quot; referring to her false statements on the <span class="hljs-meta">... </span>2010 marriage license application, according to court documents. <span class="hljs-meta">... </span>Prosecutors said the marriages were part of an immigration scam. <span class="hljs-meta">... </span>On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. <span class="hljs-meta">... </span>After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective <span class="hljs-meta">... </span>Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. <span class="hljs-meta">... </span>All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. <span class="hljs-meta">... </span>Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. <span class="hljs-meta">... </span>Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. <span class="hljs-meta">... </span>The case was referred to the Bronx District Attorney\\&#x27;s Office by Immigration and Customs Enforcement and the Department of Homeland Security\\&#x27;s <span class="hljs-meta">... </span>Investigation Division. Seven of the men are from so-called &quot;red-flagged&quot; countries, including Egypt, Turkey, Georgia, Pakistan and Mali. <span class="hljs-meta">... </span>Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. <span class="hljs-meta">... </span>If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18. <span class="hljs-meta">... </span>&quot;&quot;&quot;</span>`}}),Ne=new H({props:{code:"print(summarizer(ARTICLE, max_length=130, min_length=30, do_sample=False))",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(summarizer(ARTICLE, max_length=<span class="hljs-number">130</span>, min_length=<span class="hljs-number">30</span>, do_sample=<span class="hljs-literal">False</span>)) [{<span class="hljs-string">&#x27;summary_text&#x27;</span>: <span class="hljs-string">&#x27; Liana Barrientos, 39, is charged with two counts of &quot;offering a false instrument for filing in the first degree&quot; In total, she has been married 10 times, with nine of her marriages occurring between 1999 and 2002 . At one time, she was married to eight men at once, prosecutors say .&#x27;</span>}]`}}),Se=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoModelForSeq2SeqLM, AutoTokenizer model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") tokenizer = AutoTokenizer.from_pretrained("t5-base") # T5 uses a max_length of 512 so we cut the article to 512 tokens. inputs = tokenizer("summarize: " + ARTICLE, return_tensors="pt", max_length=512, truncation=True) outputs = model.generate( inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True ) print(tokenizer.decode(outputs[0]))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># T5 uses a max_length of 512 so we cut the article to 512 tokens.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;summarize: &quot;</span> + ARTICLE, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, max_length=<span class="hljs-number">512</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate( <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], max_length=<span class="hljs-number">150</span>, min_length=<span class="hljs-number">40</span>, length_penalty=<span class="hljs-number">2.0</span>, num_beams=<span class="hljs-number">4</span>, early_stopping=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>])) &lt;pad&gt; prosecutors say the marriages were part of an immigration scam. <span class="hljs-keyword">if</span> convicted, barrientos faces two criminal counts of <span class="hljs-string">&quot;offering a false instrument for filing in the first degree&quot;</span> she has been married <span class="hljs-number">10</span> times, nine of them between <span class="hljs-number">1999</span> <span class="hljs-keyword">and</span> <span class="hljs-number">2002.</span>&lt;/s&gt;`},group2:{id:"tf",code:`from transformers import TFAutoModelForSeq2SeqLM, AutoTokenizer model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-base") tokenizer = AutoTokenizer.from_pretrained("t5-base") # T5 uses a max_length of 512 so we cut the article to 512 tokens. inputs = tokenizer("summarize: " + ARTICLE, return_tensors="tf", max_length=512) outputs = model.generate( inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True ) print(tokenizer.decode(outputs[0]))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># T5 uses a max_length of 512 so we cut the article to 512 tokens.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;summarize: &quot;</span> + ARTICLE, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, max_length=<span class="hljs-number">512</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate( <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], max_length=<span class="hljs-number">150</span>, min_length=<span class="hljs-number">40</span>, length_penalty=<span class="hljs-number">2.0</span>, num_beams=<span class="hljs-number">4</span>, early_stopping=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>])) &lt;pad&gt; prosecutors say the marriages were part of an immigration scam. <span class="hljs-keyword">if</span> convicted, barrientos faces two criminal counts of <span class="hljs-string">&quot;offering a false instrument for filing in the first degree&quot;</span> she has been married <span class="hljs-number">10</span> times, nine of them between <span class="hljs-number">1999</span> <span class="hljs-keyword">and</span> <span class="hljs-number">2002.</span>`}}}),De=new es({}),Re=new H({props:{code:`from transformers import pipeline translator = pipeline("translation_en_to_de") print(translator("Hugging Face is a technology company based in New York and Paris", max_length=40))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>translator = pipeline(<span class="hljs-string">&quot;translation_en_to_de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(translator(<span class="hljs-string">&quot;Hugging Face is a technology company based in New York and Paris&quot;</span>, max_length=<span class="hljs-number">40</span>)) [{<span class="hljs-string">&#x27;translation_text&#x27;</span>: <span class="hljs-string">&#x27;Hugging Face ist ein Technologieunternehmen mit Sitz in New York und Paris.&#x27;</span>}]`}}),Ge=new Hs({props:{group1:{id:"pt",code:`from transformers import AutoModelForSeq2SeqLM, AutoTokenizer model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") tokenizer = AutoTokenizer.from_pretrained("t5-base") inputs = tokenizer( "translate English to German: Hugging Face is a technology company based in New York and Paris", return_tensors="pt", ) outputs = model.generate(inputs["input_ids"], max_length=40, num_beams=4, early_stopping=True) print(tokenizer.decode(outputs[0]))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;translate English to German: Hugging Face is a technology company based in New York and Paris&quot;</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], max_length=<span class="hljs-number">40</span>, num_beams=<span class="hljs-number">4</span>, early_stopping=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>])) &lt;pad&gt; Hugging Face ist ein Technologieunternehmen mit Sitz <span class="hljs-keyword">in</span> New York und Paris.&lt;/s&gt;`},group2:{id:"tf",code:`from transformers import TFAutoModelForSeq2SeqLM, AutoTokenizer model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-base") tokenizer = AutoTokenizer.from_pretrained("t5-base") inputs = tokenizer( "translate English to German: Hugging Face is a technology company based in New York and Paris", return_tensors="tf", ) outputs = model.generate(inputs["input_ids"], max_length=40, num_beams=4, early_stopping=True) print(tokenizer.decode(outputs[0]))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;translate English to German: Hugging Face is a technology company based in New York and Paris&quot;</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>], max_length=<span class="hljs-number">40</span>, num_beams=<span class="hljs-number">4</span>, early_stopping=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>])) &lt;pad&gt; Hugging Face ist ein Technologieunternehmen mit Sitz <span class="hljs-keyword">in</span> New York und Paris.`}}}),{c(){T=l("meta"),G=h(),E=l("h1"),b=l("a"),as=l("span"),m(y.$$.fragment),U=h(),fs=l("span"),Mo=t("Summary of the tasks"),yn=h(),m(Gs.$$.fragment),bn=h(),Qe=l("p"),Lo=t(`This page shows the most frequent use-cases when using the library. The models available allow for many different configurations and a great versatility in use-cases. The most simple ones are presented here, showcasing usage for tasks such as question answering, sequence classification, named entity recognition and others.`),vn=h(),js=l("p"),Po=t(`These examples leverage auto-models, which are classes that will instantiate a model according to a given checkpoint, automatically selecting the correct model architecture. Please check the `),Ye=l("a"),Fo=t("AutoModel"),No=t(` documentation for more information. Feel free to modify the code to be more specific and adapt it to your specific use-case.`),qn=h(),We=l("p"),Co=t(`In order for a model to perform well on a task, it must be loaded from a checkpoint corresponding to that task. These checkpoints are usually pre-trained on a large corpus of data and fine-tuned on a specific task. This means the following:`),Tn=h(),_s=l("ul"),ts=l("li"),So=t(`Not all models were fine-tuned on all tasks. If you want to fine-tune a model on a specific task, you can leverage one of the `),La=l("em"),Do=t("run_$TASK.py"),Oo=t(" scripts in the "),Us=l("a"),Bo=t("examples"),Ro=t(" directory."),Ho=h(),Qs=l("li"),Go=t(`Fine-tuned models were fine-tuned on a specific dataset. This dataset may or may not overlap with your use-case and domain. As mentioned previously, you may leverage the `),Ys=l("a"),Uo=t("examples"),Qo=t(` scripts to fine-tune your model, or you may create your own training script.`),En=h(),Xe=l("p"),Yo=t("In order to do an inference on a task, several mechanisms are made available by the library:"),An=h(),ks=l("ul"),Pa=l("li"),Wo=t("Pipelines: very easy-to-use abstractions, which require as little as two lines of code."),Xo=h(),Fa=l("li"),Vo=t(`Direct model use: Less abstractions, but more flexibility and power via a direct access to a tokenizer (PyTorch/TensorFlow) and full inference capacity.`),zn=h(),Ve=l("p"),Jo=t("Both approaches are showcased here."),$n=h(),m(ws.$$.fragment),In=h(),ns=l("h2"),xs=l("a"),Na=l("span"),m(Ws.$$.fragment),Ko=h(),Ca=l("span"),Zo=t("Sequence Classification"),Mn=h(),M=l("p"),sr=t(`Sequence classification is the task of classifying sequences according to a given number of classes. An example of sequence classification is the GLUE dataset, which is entirely based on that task. If you would like to fine-tune a model on a GLUE sequence classification task, you may leverage the `),Xs=l("a"),er=t("run_glue.py"),ar=t(", "),Vs=l("a"),tr=t("run_tf_glue.py"),nr=t(", "),Js=l("a"),lr=t("run_tf_text_classification.py"),or=t(" or "),Ks=l("a"),rr=t("run_xnli.py"),ir=t(" scripts."),Ln=h(),Je=l("p"),pr=t(`Here is an example of using pipelines to do sentiment analysis: identifying if a sequence is positive or negative. It leverages a fine-tuned model on sst2, which is a GLUE task.`),Pn=h(),Ke=l("p"),hr=t("This returns a label (\u201CPOSITIVE\u201D or \u201CNEGATIVE\u201D) alongside a score, as follows:"),Fn=h(),m(Zs.$$.fragment),Nn=h(),Ze=l("p"),cr=t(`Here is an example of doing a sequence classification using a model to determine if two sequences are paraphrases of each other. The process is the following:`),Cn=h(),L=l("ol"),Sa=l("li"),ur=t(`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it with the weights stored in the checkpoint.`),mr=h(),Da=l("li"),gr=t(`Build a sequence from the two sentences, with the correct model-specific separators, token type ids and attention masks (which will be created automatically by the tokenizer).`),dr=h(),Oa=l("li"),fr=t(`Pass this sequence through the model so that it is classified in one of the two available classes: 0 (not a paraphrase) and 1 (is a paraphrase).`),jr=h(),Ba=l("li"),_r=t("Compute the softmax of the result to get probabilities over the classes."),kr=h(),Ra=l("li"),wr=t("Print the results."),Sn=h(),m(se.$$.fragment),Dn=h(),ls=l("h2"),ys=l("a"),Ha=l("span"),m(ee.$$.fragment),xr=h(),Ga=l("span"),yr=t("Extractive Question Answering"),On=h(),Q=l("p"),br=t(`Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune a model on a SQuAD task, you may leverage the `),ae=l("a"),vr=t("run_qa.py"),qr=t(` and `),te=l("a"),Tr=t("run_tf_squad.py"),Er=t(` scripts.`),Bn=h(),sa=l("p"),Ar=t(`Here is an example of using pipelines to do question answering: extracting an answer from a text given a question. It leverages a fine-tuned model on SQuAD.`),Rn=h(),m(ne.$$.fragment),Hn=h(),ea=l("p"),zr=t(`This returns an answer extracted from the text, a confidence score, alongside \u201Cstart\u201D and \u201Cend\u201D values, which are the positions of the extracted answer in the text.`),Gn=h(),m(le.$$.fragment),Un=h(),aa=l("p"),$r=t("Here is an example of question answering using a model and a tokenizer. The process is the following:"),Qn=h(),v=l("ol"),Ua=l("li"),Ir=t(`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it with the weights stored in the checkpoint.`),Mr=h(),Qa=l("li"),Lr=t("Define a text and a few questions."),Pr=h(),Ya=l("li"),Fr=t(`Iterate over the questions and build a sequence from the text and the current question, with the correct model-specific separators token type ids and attention masks.`),Nr=h(),Wa=l("li"),Cr=t(`Pass this sequence through the model. This outputs a range of scores across the entire sequence tokens (question and text), for both the start and end positions.`),Sr=h(),Xa=l("li"),Dr=t("Compute the softmax of the result to get probabilities over the tokens."),Or=h(),Va=l("li"),Br=t("Fetch the tokens from the identified start and stop values, convert those tokens to a string."),Rr=h(),Ja=l("li"),Hr=t("Print the results."),Yn=h(),m(oe.$$.fragment),Wn=h(),os=l("h2"),bs=l("a"),Ka=l("span"),m(re.$$.fragment),Gr=h(),Za=l("span"),Ur=t("Language Modeling"),Xn=h(),ta=l("p"),Qr=t(`Language modeling is the task of fitting a model to a corpus, which can be domain specific. All popular transformer-based models are trained using a variant of language modeling, e.g. BERT with masked language modeling, GPT-2 with causal language modeling.`),Vn=h(),vs=l("p"),Yr=t(`Language modeling can be useful outside of pretraining as well, for example to shift the model distribution to be domain-specific: using a language model trained over a very large corpus, and then fine-tuning it to a news dataset or on scientific papers e.g. `),ie=l("a"),Wr=t("LysandreJik/arxiv-nlp"),Xr=t("."),Jn=h(),rs=l("h3"),qs=l("a"),st=l("span"),m(pe.$$.fragment),Vr=h(),et=l("span"),Jr=t("Masked Language Modeling"),Kn=h(),Y=l("p"),Kr=t(`Masked language modeling is the task of masking tokens in a sequence with a masking token, and prompting the model to fill that mask with an appropriate token. This allows the model to attend to both the right context (tokens on the right of the mask) and the left context (tokens on the left of the mask). Such a training creates a strong basis for downstream tasks requiring bi-directional context, such as SQuAD (question answering, see `),he=l("a"),Zr=t("Lewis, Lui, Goyal et al."),si=t(`, part 4.2). If you would like to fine-tune a model on a masked language modeling task, you may leverage the `),ce=l("a"),ei=t("run_mlm.py"),ai=t(" script."),Zn=h(),na=l("p"),ti=t("Here is an example of using pipelines to replace a mask from a sequence:"),sl=h(),m(ue.$$.fragment),el=h(),la=l("p"),ni=t("This outputs the sequences with the mask filled, the confidence score, and the token id in the tokenizer vocabulary:"),al=h(),m(me.$$.fragment),tl=h(),oa=l("p"),li=t("Here is an example of doing masked language modeling using a model and a tokenizer. The process is the following:"),nl=h(),A=l("ol"),at=l("li"),oi=t(`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a DistilBERT model and loads it with the weights stored in the checkpoint.`),ri=h(),ge=l("li"),ii=t("Define a sequence with a masked token, placing the "),tt=l("code"),pi=t("tokenizer.mask_token"),hi=t(" instead of a word."),ci=h(),nt=l("li"),ui=t("Encode that sequence into a list of IDs and find the position of the masked token in that list."),mi=h(),lt=l("li"),gi=t(`Retrieve the predictions at the index of the mask token: this tensor has the same size as the vocabulary, and the values are the scores attributed to each token. The model gives higher score to tokens it deems probable in that context.`),di=h(),is=l("li"),fi=t("Retrieve the top 5 tokens using the PyTorch "),ot=l("code"),ji=t("topk"),_i=t(" or TensorFlow "),rt=l("code"),ki=t("top_k"),wi=t(" methods."),xi=h(),it=l("li"),yi=t("Replace the mask token by the tokens and print the results"),ll=h(),m(de.$$.fragment),ol=h(),ra=l("p"),bi=t("This prints five sequences, with the top 5 tokens predicted by the model."),rl=h(),ps=l("h3"),Ts=l("a"),pt=l("span"),m(fe.$$.fragment),vi=h(),ht=l("span"),qi=t("Causal Language Modeling"),il=h(),Es=l("p"),Ti=t(`Causal language modeling is the task of predicting the token following a sequence of tokens. In this situation, the model only attends to the left context (tokens on the left of the mask). Such a training is particularly interesting for generation tasks. If you would like to fine-tune a model on a causal language modeling task, you may leverage the `),je=l("a"),Ei=t("run_clm.py"),Ai=t(" script."),pl=h(),ia=l("p"),zi=t(`Usually, the next token is predicted by sampling from the logits of the last hidden state the model produces from the input sequence.`),hl=h(),As=l("p"),$i=t(`Here is an example of using the tokenizer and model and leveraging the `),ct=l("code"),Ii=t("PreTrainedModel.top_k_top_p_filtering"),Mi=t(` method to sample the next token following an input sequence of tokens.`),cl=h(),m(_e.$$.fragment),ul=h(),W=l("p"),Li=t("This outputs a (hopefully) coherent next token following the original sequence, which in our case is the word "),ut=l("em"),Pi=t("is"),Fi=t(` or `),mt=l("em"),Ni=t("features"),Ci=t("."),ml=h(),zs=l("p"),Si=t("In the next section, we show how "),pa=l("a"),Di=t("generation_utils.GenerationMixin.generate()"),Oi=t(` can be used to generate multiple tokens up to a specified length instead of one token at a time.`),gl=h(),hs=l("h3"),$s=l("a"),gt=l("span"),m(ke.$$.fragment),Bi=h(),dt=l("span"),Ri=t("Text Generation"),dl=h(),z=l("p"),Hi=t("In text generation ("),ft=l("em"),Gi=t("a.k.a"),Ui=h(),jt=l("em"),Qi=t("open-ended text generation"),Yi=t(`) the goal is to create a coherent portion of text that is a continuation from the given context. The following example shows how `),_t=l("em"),Wi=t("GPT-2"),Xi=t(` can be used in pipelines to generate text. As a default all models apply `),kt=l("em"),Vi=t("Top-K"),Ji=t(` sampling when used in pipelines, as configured in their respective configurations (see `),we=l("a"),Ki=t("gpt-2 config"),Zi=t(" for example)."),fl=h(),m(xe.$$.fragment),jl=h(),$=l("p"),sp=t("Here, the model generates a random text with a total maximal length of "),wt=l("em"),ep=t("50"),ap=t(" tokens from context "),xt=l("em"),tp=t(`\u201CAs far as I am concerned, I will\u201D`),np=t(`. Behind the scenes, the pipeline object calls the method `),ha=l("a"),lp=t("PreTrainedModel.generate()"),op=t(` to generate text. The default arguments for this method can be overridden in the pipeline, as is shown above for the arguments `),yt=l("code"),rp=t("max_length"),ip=t(" and "),bt=l("code"),pp=t("do_sample"),hp=t("."),_l=h(),X=l("p"),cp=t("Below is an example of text generation using "),vt=l("code"),up=t("XLNet"),mp=t(" and its tokenizer, which includes calling "),qt=l("code"),gp=t("generate()"),dp=t(" directly:"),kl=h(),m(ye.$$.fragment),wl=h(),k=l("p"),fp=t("Text generation is currently possible with "),Tt=l("em"),jp=t("GPT-2"),_p=t(", "),Et=l("em"),kp=t("OpenAi-GPT"),wp=t(", "),At=l("em"),xp=t("CTRL"),yp=t(", "),zt=l("em"),bp=t("XLNet"),vp=t(", "),$t=l("em"),qp=t("Transfo-XL"),Tp=t(" and "),It=l("em"),Ep=t("Reformer"),Ap=t(` in PyTorch and for most models in Tensorflow as well. As can be seen in the example above `),Mt=l("em"),zp=t("XLNet"),$p=t(" and "),Lt=l("em"),Ip=t("Transfo-XL"),Mp=t(` often need to be padded to work well. GPT-2 is usually a good choice for `),Pt=l("em"),Lp=t("open-ended text generation"),Pp=t(` because it was trained on millions of webpages with a causal language modeling objective.`),xl=h(),Is=l("p"),Fp=t(`For more information on how to apply different decoding strategies for text generation, please also refer to our text generation blog post `),be=l("a"),Np=t("here"),Cp=t("."),yl=h(),cs=l("h2"),Ms=l("a"),Ft=l("span"),m(ve.$$.fragment),Sp=h(),Nt=l("span"),Dp=t("Named Entity Recognition"),bl=h(),Ls=l("p"),Op=t(`Named Entity Recognition (NER) is the task of classifying tokens according to a class, for example, identifying a token as a person, an organisation or a location. An example of a named entity recognition dataset is the CoNLL-2003 dataset, which is entirely based on that task. If you would like to fine-tune a model on an NER task, you may leverage the `),qe=l("a"),Bp=t("run_ner.py"),Rp=t(" script."),vl=h(),ca=l("p"),Hp=t(`Here is an example of using pipelines to do named entity recognition, specifically, trying to identify tokens as belonging to one of 9 classes:`),ql=h(),w=l("ul"),Ct=l("li"),Gp=t("O, Outside of a named entity"),Up=h(),St=l("li"),Qp=t("B-MIS, Beginning of a miscellaneous entity right after another miscellaneous entity"),Yp=h(),Dt=l("li"),Wp=t("I-MIS, Miscellaneous entity"),Xp=h(),Ot=l("li"),Vp=t("B-PER, Beginning of a person\u2019s name right after another person\u2019s name"),Jp=h(),Bt=l("li"),Kp=t("I-PER, Person\u2019s name"),Zp=h(),Rt=l("li"),sh=t("B-ORG, Beginning of an organisation right after another organisation"),eh=h(),Ht=l("li"),ah=t("I-ORG, Organisation"),th=h(),Gt=l("li"),nh=t("B-LOC, Beginning of a location right after another location"),lh=h(),Ut=l("li"),oh=t("I-LOC, Location"),Tl=h(),V=l("p"),rh=t("It leverages a fine-tuned model on CoNLL-2003, fine-tuned by "),Te=l("a"),ih=t("@stefan-it"),ph=t(" from "),Ee=l("a"),hh=t("dbmdz"),ch=t("."),El=h(),m(Ae.$$.fragment),Al=h(),ua=l("p"),uh=t(`This outputs a list of all words that have been identified as one of the entities from the 9 classes defined above. Here are the expected results:`),zl=h(),m(ze.$$.fragment),$l=h(),ma=l("p"),mh=t(`Note how the tokens of the sequence \u201CHugging Face\u201D have been identified as an organisation, and \u201CNew York City\u201D, \u201CDUMBO\u201D and \u201CManhattan Bridge\u201D have been identified as locations.`),Il=h(),ga=l("p"),gh=t("Here is an example of doing named entity recognition, using a model and a tokenizer. The process is the following:"),Ml=h(),I=l("ol"),Qt=l("li"),dh=t(`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it with the weights stored in the checkpoint.`),fh=h(),Yt=l("li"),jh=t("Define a sequence with known entities, such as \u201CHugging Face\u201D as an organisation and \u201CNew York City\u201D as a location."),_h=h(),Wt=l("li"),kh=t(`Split words into tokens so that they can be mapped to predictions. We use a small hack by, first, completely encoding and decoding the sequence, so that we\u2019re left with a string that contains the special tokens.`),wh=h(),Xt=l("li"),xh=t("Encode that sequence into IDs (special tokens are added automatically)."),yh=h(),Vt=l("li"),bh=t(`Retrieve the predictions by passing the input to the model and getting the first output. This results in a distribution over the 9 possible classes for each token. We take the argmax to retrieve the most likely class for each token.`),vh=h(),Jt=l("li"),qh=t("Zip together each token with its prediction and print it."),Ll=h(),m($e.$$.fragment),Pl=h(),da=l("p"),Th=t(`This outputs a list of each token mapped to its corresponding prediction. Differently from the pipeline, here every token has a prediction as we didn\u2019t remove the \u201C0\u201Dth class, which means that no particular entity was found on that token.`),Fl=h(),J=l("p"),Eh=t("In the above example, "),Kt=l("code"),Ah=t("predictions"),zh=t(` is an integer that corresponds to the predicted class. We can use the `),Zt=l("code"),$h=t("model.config.id2label"),Ih=t(` property in order to recover the class name corresponding to the class number, which is illustrated below:`),Nl=h(),m(Ie.$$.fragment),Cl=h(),us=l("h2"),Ps=l("a"),sn=l("span"),m(Me.$$.fragment),Mh=h(),en=l("span"),Lh=t("Summarization"),Sl=h(),Fs=l("p"),Ph=t(`Summarization is the task of summarizing a document or an article into a shorter text. If you would like to fine-tune a model on a summarization task, you may leverage the `),Le=l("a"),Fh=t("run_summarization.py"),Nh=t(` script.`),Dl=h(),Ns=l("p"),Ch=t(`An example of a summarization dataset is the CNN / Daily Mail dataset, which consists of long news articles and was created for the task of summarization. If you would like to fine-tune a model on a summarization task, various approaches are described in this `),Pe=l("a"),Sh=t("document"),Dh=t("."),Ol=h(),fa=l("p"),Oh=t(`Here is an example of using the pipelines to do summarization. It leverages a Bart model that was fine-tuned on the CNN / Daily Mail data set.`),Bl=h(),m(Fe.$$.fragment),Rl=h(),P=l("p"),Bh=t("Because the summarization pipeline depends on the "),an=l("code"),Rh=t("PreTrainedModel.generate()"),Hh=t(` method, we can override the default arguments of `),tn=l("code"),Gh=t("PreTrainedModel.generate()"),Uh=t(" directly in the pipeline for "),nn=l("code"),Qh=t("max_length"),Yh=t(" and "),ln=l("code"),Wh=t("min_length"),Xh=t(` as shown below. This outputs the following summary:`),Hl=h(),m(Ne.$$.fragment),Gl=h(),ja=l("p"),Vh=t("Here is an example of doing summarization using a model and a tokenizer. The process is the following:"),Ul=h(),N=l("ol"),ms=l("li"),Jh=t(`Instantiate a tokenizer and a model from the checkpoint name. Summarization is usually done using an encoder-decoder model, such as `),on=l("code"),Kh=t("Bart"),Zh=t(" or "),rn=l("code"),sc=t("T5"),ec=t("."),ac=h(),pn=l("li"),tc=t("Define the article that should be summarized."),nc=h(),hn=l("li"),lc=t("Add the T5 specific prefix \u201Csummarize: \u201C."),oc=h(),Ce=l("li"),rc=t("Use the "),cn=l("code"),ic=t("PreTrainedModel.generate()"),pc=t(" method to generate the summary."),Ql=h(),_a=l("p"),hc=t(`In this example we use Google\u2019s T5 model. Even though it was pre-trained only on a multi-task mixed dataset (including CNN / Daily Mail), it yields very good results.`),Yl=h(),m(Se.$$.fragment),Wl=h(),gs=l("h2"),Cs=l("a"),un=l("span"),m(De.$$.fragment),cc=h(),mn=l("span"),uc=t("Translation"),Xl=h(),Ss=l("p"),mc=t(`Translation is the task of translating a text from one language to another. If you would like to fine-tune a model on a translation task, you may leverage the `),Oe=l("a"),gc=t("run_translation.py"),dc=t(" script."),Vl=h(),Ds=l("p"),fc=t(`An example of a translation dataset is the WMT English to German dataset, which has sentences in English as the input data and the corresponding sentences in German as the target data. If you would like to fine-tune a model on a translation task, various approaches are described in this `),Be=l("a"),jc=t("document"),_c=t("."),Jl=h(),ka=l("p"),kc=t(`Here is an example of using the pipelines to do translation. It leverages a T5 model that was only pre-trained on a multi-task mixture dataset (including WMT), yet, yielding impressive translation results.`),Kl=h(),m(Re.$$.fragment),Zl=h(),C=l("p"),wc=t("Because the translation pipeline depends on the "),gn=l("code"),xc=t("PreTrainedModel.generate()"),yc=t(` method, we can override the default arguments of `),dn=l("code"),bc=t("PreTrainedModel.generate()"),vc=t(" directly in the pipeline as is shown for "),fn=l("code"),qc=t("max_length"),Tc=t(" above."),so=h(),wa=l("p"),Ec=t("Here is an example of doing translation using a model and a tokenizer. The process is the following:"),eo=h(),S=l("ol"),ds=l("li"),Ac=t(`Instantiate a tokenizer and a model from the checkpoint name. Summarization is usually done using an encoder-decoder model, such as `),jn=l("code"),zc=t("Bart"),$c=t(" or "),_n=l("code"),Ic=t("T5"),Mc=t("."),Lc=h(),kn=l("li"),Pc=t("Define the article that should be summarized."),Fc=h(),wn=l("li"),Nc=t("Add the T5 specific prefix \u201Ctranslate English to German: \u201D"),Cc=h(),He=l("li"),Sc=t("Use the "),xn=l("code"),Dc=t("PreTrainedModel.generate()"),Oc=t(" method to perform the translation."),ao=h(),m(Ge.$$.fragment),to=h(),xa=l("p"),Bc=t("We get the same translation as with the pipeline example."),this.h()},l(s){const i=Rg('[data-svelte="svelte-1phssyn"]',document.head);T=o(i,"META",{name:!0,content:!0}),i.forEach(a),G=c(s),E=o(s,"H1",{class:!0});var Ue=r(E);b=o(Ue,"A",{id:!0,class:!0,href:!0});var Rc=r(b);as=o(Rc,"SPAN",{});var Hc=r(as);g(y.$$.fragment,Hc),Hc.forEach(a),Rc.forEach(a),U=c(Ue),fs=o(Ue,"SPAN",{});var Gc=r(fs);Mo=n(Gc,"Summary of the tasks"),Gc.forEach(a),Ue.forEach(a),yn=c(s),g(Gs.$$.fragment,s),bn=c(s),Qe=o(s,"P",{});var Uc=r(Qe);Lo=n(Uc,`This page shows the most frequent use-cases when using the library. The models available allow for many different configurations and a great versatility in use-cases. The most simple ones are presented here, showcasing usage for tasks such as question answering, sequence classification, named entity recognition and others.`),Uc.forEach(a),vn=c(s),js=o(s,"P",{});var lo=r(js);Po=n(lo,`These examples leverage auto-models, which are classes that will instantiate a model according to a given checkpoint, automatically selecting the correct model architecture. Please check the `),Ye=o(lo,"A",{href:!0});var Qc=r(Ye);Fo=n(Qc,"AutoModel"),Qc.forEach(a),No=n(lo,` documentation for more information. Feel free to modify the code to be more specific and adapt it to your specific use-case.`),lo.forEach(a),qn=c(s),We=o(s,"P",{});var Yc=r(We);Co=n(Yc,`In order for a model to perform well on a task, it must be loaded from a checkpoint corresponding to that task. These checkpoints are usually pre-trained on a large corpus of data and fine-tuned on a specific task. This means the following:`),Yc.forEach(a),Tn=c(s),_s=o(s,"UL",{});var oo=r(_s);ts=o(oo,"LI",{});var ya=r(ts);So=n(ya,`Not all models were fine-tuned on all tasks. If you want to fine-tune a model on a specific task, you can leverage one of the `),La=o(ya,"EM",{});var Wc=r(La);Do=n(Wc,"run_$TASK.py"),Wc.forEach(a),Oo=n(ya," scripts in the "),Us=o(ya,"A",{href:!0,rel:!0});var Xc=r(Us);Bo=n(Xc,"examples"),Xc.forEach(a),Ro=n(ya," directory."),ya.forEach(a),Ho=c(oo),Qs=o(oo,"LI",{});var ro=r(Qs);Go=n(ro,`Fine-tuned models were fine-tuned on a specific dataset. This dataset may or may not overlap with your use-case and domain. As mentioned previously, you may leverage the `),Ys=o(ro,"A",{href:!0,rel:!0});var Vc=r(Ys);Uo=n(Vc,"examples"),Vc.forEach(a),Qo=n(ro,` scripts to fine-tune your model, or you may create your own training script.`),ro.forEach(a),oo.forEach(a),En=c(s),Xe=o(s,"P",{});var Jc=r(Xe);Yo=n(Jc,"In order to do an inference on a task, several mechanisms are made available by the library:"),Jc.forEach(a),An=c(s),ks=o(s,"UL",{});var io=r(ks);Pa=o(io,"LI",{});var Kc=r(Pa);Wo=n(Kc,"Pipelines: very easy-to-use abstractions, which require as little as two lines of code."),Kc.forEach(a),Xo=c(io),Fa=o(io,"LI",{});var Zc=r(Fa);Vo=n(Zc,`Direct model use: Less abstractions, but more flexibility and power via a direct access to a tokenizer (PyTorch/TensorFlow) and full inference capacity.`),Zc.forEach(a),io.forEach(a),zn=c(s),Ve=o(s,"P",{});var su=r(Ve);Jo=n(su,"Both approaches are showcased here."),su.forEach(a),$n=c(s),g(ws.$$.fragment,s),In=c(s),ns=o(s,"H2",{class:!0});var po=r(ns);xs=o(po,"A",{id:!0,class:!0,href:!0});var eu=r(xs);Na=o(eu,"SPAN",{});var au=r(Na);g(Ws.$$.fragment,au),au.forEach(a),eu.forEach(a),Ko=c(po),Ca=o(po,"SPAN",{});var tu=r(Ca);Zo=n(tu,"Sequence Classification"),tu.forEach(a),po.forEach(a),Mn=c(s),M=o(s,"P",{});var K=r(M);sr=n(K,`Sequence classification is the task of classifying sequences according to a given number of classes. An example of sequence classification is the GLUE dataset, which is entirely based on that task. If you would like to fine-tune a model on a GLUE sequence classification task, you may leverage the `),Xs=o(K,"A",{href:!0,rel:!0});var nu=r(Xs);er=n(nu,"run_glue.py"),nu.forEach(a),ar=n(K,", "),Vs=o(K,"A",{href:!0,rel:!0});var lu=r(Vs);tr=n(lu,"run_tf_glue.py"),lu.forEach(a),nr=n(K,", "),Js=o(K,"A",{href:!0,rel:!0});var ou=r(Js);lr=n(ou,"run_tf_text_classification.py"),ou.forEach(a),or=n(K," or "),Ks=o(K,"A",{href:!0,rel:!0});var ru=r(Ks);rr=n(ru,"run_xnli.py"),ru.forEach(a),ir=n(K," scripts."),K.forEach(a),Ln=c(s),Je=o(s,"P",{});var iu=r(Je);pr=n(iu,`Here is an example of using pipelines to do sentiment analysis: identifying if a sequence is positive or negative. It leverages a fine-tuned model on sst2, which is a GLUE task.`),iu.forEach(a),Pn=c(s),Ke=o(s,"P",{});var pu=r(Ke);hr=n(pu,"This returns a label (\u201CPOSITIVE\u201D or \u201CNEGATIVE\u201D) alongside a score, as follows:"),pu.forEach(a),Fn=c(s),g(Zs.$$.fragment,s),Nn=c(s),Ze=o(s,"P",{});var hu=r(Ze);cr=n(hu,`Here is an example of doing a sequence classification using a model to determine if two sequences are paraphrases of each other. The process is the following:`),hu.forEach(a),Cn=c(s),L=o(s,"OL",{});var Z=r(L);Sa=o(Z,"LI",{});var cu=r(Sa);ur=n(cu,`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it with the weights stored in the checkpoint.`),cu.forEach(a),mr=c(Z),Da=o(Z,"LI",{});var uu=r(Da);gr=n(uu,`Build a sequence from the two sentences, with the correct model-specific separators, token type ids and attention masks (which will be created automatically by the tokenizer).`),uu.forEach(a),dr=c(Z),Oa=o(Z,"LI",{});var mu=r(Oa);fr=n(mu,`Pass this sequence through the model so that it is classified in one of the two available classes: 0 (not a paraphrase) and 1 (is a paraphrase).`),mu.forEach(a),jr=c(Z),Ba=o(Z,"LI",{});var gu=r(Ba);_r=n(gu,"Compute the softmax of the result to get probabilities over the classes."),gu.forEach(a),kr=c(Z),Ra=o(Z,"LI",{});var du=r(Ra);wr=n(du,"Print the results."),du.forEach(a),Z.forEach(a),Sn=c(s),g(se.$$.fragment,s),Dn=c(s),ls=o(s,"H2",{class:!0});var ho=r(ls);ys=o(ho,"A",{id:!0,class:!0,href:!0});var fu=r(ys);Ha=o(fu,"SPAN",{});var ju=r(Ha);g(ee.$$.fragment,ju),ju.forEach(a),fu.forEach(a),xr=c(ho),Ga=o(ho,"SPAN",{});var _u=r(Ga);yr=n(_u,"Extractive Question Answering"),_u.forEach(a),ho.forEach(a),On=c(s),Q=o(s,"P",{});var ba=r(Q);br=n(ba,`Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune a model on a SQuAD task, you may leverage the `),ae=o(ba,"A",{href:!0,rel:!0});var ku=r(ae);vr=n(ku,"run_qa.py"),ku.forEach(a),qr=n(ba,` and `),te=o(ba,"A",{href:!0,rel:!0});var wu=r(te);Tr=n(wu,"run_tf_squad.py"),wu.forEach(a),Er=n(ba,` scripts.`),ba.forEach(a),Bn=c(s),sa=o(s,"P",{});var xu=r(sa);Ar=n(xu,`Here is an example of using pipelines to do question answering: extracting an answer from a text given a question. It leverages a fine-tuned model on SQuAD.`),xu.forEach(a),Rn=c(s),g(ne.$$.fragment,s),Hn=c(s),ea=o(s,"P",{});var yu=r(ea);zr=n(yu,`This returns an answer extracted from the text, a confidence score, alongside \u201Cstart\u201D and \u201Cend\u201D values, which are the positions of the extracted answer in the text.`),yu.forEach(a),Gn=c(s),g(le.$$.fragment,s),Un=c(s),aa=o(s,"P",{});var bu=r(aa);$r=n(bu,"Here is an example of question answering using a model and a tokenizer. The process is the following:"),bu.forEach(a),Qn=c(s),v=o(s,"OL",{});var F=r(v);Ua=o(F,"LI",{});var vu=r(Ua);Ir=n(vu,`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it with the weights stored in the checkpoint.`),vu.forEach(a),Mr=c(F),Qa=o(F,"LI",{});var qu=r(Qa);Lr=n(qu,"Define a text and a few questions."),qu.forEach(a),Pr=c(F),Ya=o(F,"LI",{});var Tu=r(Ya);Fr=n(Tu,`Iterate over the questions and build a sequence from the text and the current question, with the correct model-specific separators token type ids and attention masks.`),Tu.forEach(a),Nr=c(F),Wa=o(F,"LI",{});var Eu=r(Wa);Cr=n(Eu,`Pass this sequence through the model. This outputs a range of scores across the entire sequence tokens (question and text), for both the start and end positions.`),Eu.forEach(a),Sr=c(F),Xa=o(F,"LI",{});var Au=r(Xa);Dr=n(Au,"Compute the softmax of the result to get probabilities over the tokens."),Au.forEach(a),Or=c(F),Va=o(F,"LI",{});var zu=r(Va);Br=n(zu,"Fetch the tokens from the identified start and stop values, convert those tokens to a string."),zu.forEach(a),Rr=c(F),Ja=o(F,"LI",{});var $u=r(Ja);Hr=n($u,"Print the results."),$u.forEach(a),F.forEach(a),Yn=c(s),g(oe.$$.fragment,s),Wn=c(s),os=o(s,"H2",{class:!0});var co=r(os);bs=o(co,"A",{id:!0,class:!0,href:!0});var Iu=r(bs);Ka=o(Iu,"SPAN",{});var Mu=r(Ka);g(re.$$.fragment,Mu),Mu.forEach(a),Iu.forEach(a),Gr=c(co),Za=o(co,"SPAN",{});var Lu=r(Za);Ur=n(Lu,"Language Modeling"),Lu.forEach(a),co.forEach(a),Xn=c(s),ta=o(s,"P",{});var Pu=r(ta);Qr=n(Pu,`Language modeling is the task of fitting a model to a corpus, which can be domain specific. All popular transformer-based models are trained using a variant of language modeling, e.g. BERT with masked language modeling, GPT-2 with causal language modeling.`),Pu.forEach(a),Vn=c(s),vs=o(s,"P",{});var uo=r(vs);Yr=n(uo,`Language modeling can be useful outside of pretraining as well, for example to shift the model distribution to be domain-specific: using a language model trained over a very large corpus, and then fine-tuning it to a news dataset or on scientific papers e.g. `),ie=o(uo,"A",{href:!0,rel:!0});var Fu=r(ie);Wr=n(Fu,"LysandreJik/arxiv-nlp"),Fu.forEach(a),Xr=n(uo,"."),uo.forEach(a),Jn=c(s),rs=o(s,"H3",{class:!0});var mo=r(rs);qs=o(mo,"A",{id:!0,class:!0,href:!0});var Nu=r(qs);st=o(Nu,"SPAN",{});var Cu=r(st);g(pe.$$.fragment,Cu),Cu.forEach(a),Nu.forEach(a),Vr=c(mo),et=o(mo,"SPAN",{});var Su=r(et);Jr=n(Su,"Masked Language Modeling"),Su.forEach(a),mo.forEach(a),Kn=c(s),Y=o(s,"P",{});var va=r(Y);Kr=n(va,`Masked language modeling is the task of masking tokens in a sequence with a masking token, and prompting the model to fill that mask with an appropriate token. This allows the model to attend to both the right context (tokens on the right of the mask) and the left context (tokens on the left of the mask). Such a training creates a strong basis for downstream tasks requiring bi-directional context, such as SQuAD (question answering, see `),he=o(va,"A",{href:!0,rel:!0});var Du=r(he);Zr=n(Du,"Lewis, Lui, Goyal et al."),Du.forEach(a),si=n(va,`, part 4.2). If you would like to fine-tune a model on a masked language modeling task, you may leverage the `),ce=o(va,"A",{href:!0,rel:!0});var Ou=r(ce);ei=n(Ou,"run_mlm.py"),Ou.forEach(a),ai=n(va," script."),va.forEach(a),Zn=c(s),na=o(s,"P",{});var Bu=r(na);ti=n(Bu,"Here is an example of using pipelines to replace a mask from a sequence:"),Bu.forEach(a),sl=c(s),g(ue.$$.fragment,s),el=c(s),la=o(s,"P",{});var Ru=r(la);ni=n(Ru,"This outputs the sequences with the mask filled, the confidence score, and the token id in the tokenizer vocabulary:"),Ru.forEach(a),al=c(s),g(me.$$.fragment,s),tl=c(s),oa=o(s,"P",{});var Hu=r(oa);li=n(Hu,"Here is an example of doing masked language modeling using a model and a tokenizer. The process is the following:"),Hu.forEach(a),nl=c(s),A=o(s,"OL",{});var D=r(A);at=o(D,"LI",{});var Gu=r(at);oi=n(Gu,`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a DistilBERT model and loads it with the weights stored in the checkpoint.`),Gu.forEach(a),ri=c(D),ge=o(D,"LI",{});var go=r(ge);ii=n(go,"Define a sequence with a masked token, placing the "),tt=o(go,"CODE",{});var Uu=r(tt);pi=n(Uu,"tokenizer.mask_token"),Uu.forEach(a),hi=n(go," instead of a word."),go.forEach(a),ci=c(D),nt=o(D,"LI",{});var Qu=r(nt);ui=n(Qu,"Encode that sequence into a list of IDs and find the position of the masked token in that list."),Qu.forEach(a),mi=c(D),lt=o(D,"LI",{});var Yu=r(lt);gi=n(Yu,`Retrieve the predictions at the index of the mask token: this tensor has the same size as the vocabulary, and the values are the scores attributed to each token. The model gives higher score to tokens it deems probable in that context.`),Yu.forEach(a),di=c(D),is=o(D,"LI",{});var qa=r(is);fi=n(qa,"Retrieve the top 5 tokens using the PyTorch "),ot=o(qa,"CODE",{});var Wu=r(ot);ji=n(Wu,"topk"),Wu.forEach(a),_i=n(qa," or TensorFlow "),rt=o(qa,"CODE",{});var Xu=r(rt);ki=n(Xu,"top_k"),Xu.forEach(a),wi=n(qa," methods."),qa.forEach(a),xi=c(D),it=o(D,"LI",{});var Vu=r(it);yi=n(Vu,"Replace the mask token by the tokens and print the results"),Vu.forEach(a),D.forEach(a),ll=c(s),g(de.$$.fragment,s),ol=c(s),ra=o(s,"P",{});var Ju=r(ra);bi=n(Ju,"This prints five sequences, with the top 5 tokens predicted by the model."),Ju.forEach(a),rl=c(s),ps=o(s,"H3",{class:!0});var fo=r(ps);Ts=o(fo,"A",{id:!0,class:!0,href:!0});var Ku=r(Ts);pt=o(Ku,"SPAN",{});var Zu=r(pt);g(fe.$$.fragment,Zu),Zu.forEach(a),Ku.forEach(a),vi=c(fo),ht=o(fo,"SPAN",{});var sm=r(ht);qi=n(sm,"Causal Language Modeling"),sm.forEach(a),fo.forEach(a),il=c(s),Es=o(s,"P",{});var jo=r(Es);Ti=n(jo,`Causal language modeling is the task of predicting the token following a sequence of tokens. In this situation, the model only attends to the left context (tokens on the left of the mask). Such a training is particularly interesting for generation tasks. If you would like to fine-tune a model on a causal language modeling task, you may leverage the `),je=o(jo,"A",{href:!0,rel:!0});var em=r(je);Ei=n(em,"run_clm.py"),em.forEach(a),Ai=n(jo," script."),jo.forEach(a),pl=c(s),ia=o(s,"P",{});var am=r(ia);zi=n(am,`Usually, the next token is predicted by sampling from the logits of the last hidden state the model produces from the input sequence.`),am.forEach(a),hl=c(s),As=o(s,"P",{});var _o=r(As);$i=n(_o,`Here is an example of using the tokenizer and model and leveraging the `),ct=o(_o,"CODE",{});var tm=r(ct);Ii=n(tm,"PreTrainedModel.top_k_top_p_filtering"),tm.forEach(a),Mi=n(_o,` method to sample the next token following an input sequence of tokens.`),_o.forEach(a),cl=c(s),g(_e.$$.fragment,s),ul=c(s),W=o(s,"P",{});var Ta=r(W);Li=n(Ta,"This outputs a (hopefully) coherent next token following the original sequence, which in our case is the word "),ut=o(Ta,"EM",{});var nm=r(ut);Pi=n(nm,"is"),nm.forEach(a),Fi=n(Ta,` or `),mt=o(Ta,"EM",{});var lm=r(mt);Ni=n(lm,"features"),lm.forEach(a),Ci=n(Ta,"."),Ta.forEach(a),ml=c(s),zs=o(s,"P",{});var ko=r(zs);Si=n(ko,"In the next section, we show how "),pa=o(ko,"A",{href:!0});var om=r(pa);Di=n(om,"generation_utils.GenerationMixin.generate()"),om.forEach(a),Oi=n(ko,` can be used to generate multiple tokens up to a specified length instead of one token at a time.`),ko.forEach(a),gl=c(s),hs=o(s,"H3",{class:!0});var wo=r(hs);$s=o(wo,"A",{id:!0,class:!0,href:!0});var rm=r($s);gt=o(rm,"SPAN",{});var im=r(gt);g(ke.$$.fragment,im),im.forEach(a),rm.forEach(a),Bi=c(wo),dt=o(wo,"SPAN",{});var pm=r(dt);Ri=n(pm,"Text Generation"),pm.forEach(a),wo.forEach(a),dl=c(s),z=o(s,"P",{});var O=r(z);Hi=n(O,"In text generation ("),ft=o(O,"EM",{});var hm=r(ft);Gi=n(hm,"a.k.a"),hm.forEach(a),Ui=c(O),jt=o(O,"EM",{});var cm=r(jt);Qi=n(cm,"open-ended text generation"),cm.forEach(a),Yi=n(O,`) the goal is to create a coherent portion of text that is a continuation from the given context. The following example shows how `),_t=o(O,"EM",{});var um=r(_t);Wi=n(um,"GPT-2"),um.forEach(a),Xi=n(O,` can be used in pipelines to generate text. As a default all models apply `),kt=o(O,"EM",{});var mm=r(kt);Vi=n(mm,"Top-K"),mm.forEach(a),Ji=n(O,` sampling when used in pipelines, as configured in their respective configurations (see `),we=o(O,"A",{href:!0,rel:!0});var gm=r(we);Ki=n(gm,"gpt-2 config"),gm.forEach(a),Zi=n(O," for example)."),O.forEach(a),fl=c(s),g(xe.$$.fragment,s),jl=c(s),$=o(s,"P",{});var B=r($);sp=n(B,"Here, the model generates a random text with a total maximal length of "),wt=o(B,"EM",{});var dm=r(wt);ep=n(dm,"50"),dm.forEach(a),ap=n(B," tokens from context "),xt=o(B,"EM",{});var fm=r(xt);tp=n(fm,`\u201CAs far as I am concerned, I will\u201D`),fm.forEach(a),np=n(B,`. Behind the scenes, the pipeline object calls the method `),ha=o(B,"A",{href:!0});var jm=r(ha);lp=n(jm,"PreTrainedModel.generate()"),jm.forEach(a),op=n(B,` to generate text. The default arguments for this method can be overridden in the pipeline, as is shown above for the arguments `),yt=o(B,"CODE",{});var _m=r(yt);rp=n(_m,"max_length"),_m.forEach(a),ip=n(B," and "),bt=o(B,"CODE",{});var km=r(bt);pp=n(km,"do_sample"),km.forEach(a),hp=n(B,"."),B.forEach(a),_l=c(s),X=o(s,"P",{});var Ea=r(X);cp=n(Ea,"Below is an example of text generation using "),vt=o(Ea,"CODE",{});var wm=r(vt);up=n(wm,"XLNet"),wm.forEach(a),mp=n(Ea," and its tokenizer, which includes calling "),qt=o(Ea,"CODE",{});var xm=r(qt);gp=n(xm,"generate()"),xm.forEach(a),dp=n(Ea," directly:"),Ea.forEach(a),kl=c(s),g(ye.$$.fragment,s),wl=c(s),k=o(s,"P",{});var x=r(k);fp=n(x,"Text generation is currently possible with "),Tt=o(x,"EM",{});var ym=r(Tt);jp=n(ym,"GPT-2"),ym.forEach(a),_p=n(x,", "),Et=o(x,"EM",{});var bm=r(Et);kp=n(bm,"OpenAi-GPT"),bm.forEach(a),wp=n(x,", "),At=o(x,"EM",{});var vm=r(At);xp=n(vm,"CTRL"),vm.forEach(a),yp=n(x,", "),zt=o(x,"EM",{});var qm=r(zt);bp=n(qm,"XLNet"),qm.forEach(a),vp=n(x,", "),$t=o(x,"EM",{});var Tm=r($t);qp=n(Tm,"Transfo-XL"),Tm.forEach(a),Tp=n(x," and "),It=o(x,"EM",{});var Em=r(It);Ep=n(Em,"Reformer"),Em.forEach(a),Ap=n(x,` in PyTorch and for most models in Tensorflow as well. As can be seen in the example above `),Mt=o(x,"EM",{});var Am=r(Mt);zp=n(Am,"XLNet"),Am.forEach(a),$p=n(x," and "),Lt=o(x,"EM",{});var zm=r(Lt);Ip=n(zm,"Transfo-XL"),zm.forEach(a),Mp=n(x,` often need to be padded to work well. GPT-2 is usually a good choice for `),Pt=o(x,"EM",{});var $m=r(Pt);Lp=n($m,"open-ended text generation"),$m.forEach(a),Pp=n(x,` because it was trained on millions of webpages with a causal language modeling objective.`),x.forEach(a),xl=c(s),Is=o(s,"P",{});var xo=r(Is);Fp=n(xo,`For more information on how to apply different decoding strategies for text generation, please also refer to our text generation blog post `),be=o(xo,"A",{href:!0,rel:!0});var Im=r(be);Np=n(Im,"here"),Im.forEach(a),Cp=n(xo,"."),xo.forEach(a),yl=c(s),cs=o(s,"H2",{class:!0});var yo=r(cs);Ms=o(yo,"A",{id:!0,class:!0,href:!0});var Mm=r(Ms);Ft=o(Mm,"SPAN",{});var Lm=r(Ft);g(ve.$$.fragment,Lm),Lm.forEach(a),Mm.forEach(a),Sp=c(yo),Nt=o(yo,"SPAN",{});var Pm=r(Nt);Dp=n(Pm,"Named Entity Recognition"),Pm.forEach(a),yo.forEach(a),bl=c(s),Ls=o(s,"P",{});var bo=r(Ls);Op=n(bo,`Named Entity Recognition (NER) is the task of classifying tokens according to a class, for example, identifying a token as a person, an organisation or a location. An example of a named entity recognition dataset is the CoNLL-2003 dataset, which is entirely based on that task. If you would like to fine-tune a model on an NER task, you may leverage the `),qe=o(bo,"A",{href:!0,rel:!0});var Fm=r(qe);Bp=n(Fm,"run_ner.py"),Fm.forEach(a),Rp=n(bo," script."),bo.forEach(a),vl=c(s),ca=o(s,"P",{});var Nm=r(ca);Hp=n(Nm,`Here is an example of using pipelines to do named entity recognition, specifically, trying to identify tokens as belonging to one of 9 classes:`),Nm.forEach(a),ql=c(s),w=o(s,"UL",{});var q=r(w);Ct=o(q,"LI",{});var Cm=r(Ct);Gp=n(Cm,"O, Outside of a named entity"),Cm.forEach(a),Up=c(q),St=o(q,"LI",{});var Sm=r(St);Qp=n(Sm,"B-MIS, Beginning of a miscellaneous entity right after another miscellaneous entity"),Sm.forEach(a),Yp=c(q),Dt=o(q,"LI",{});var Dm=r(Dt);Wp=n(Dm,"I-MIS, Miscellaneous entity"),Dm.forEach(a),Xp=c(q),Ot=o(q,"LI",{});var Om=r(Ot);Vp=n(Om,"B-PER, Beginning of a person\u2019s name right after another person\u2019s name"),Om.forEach(a),Jp=c(q),Bt=o(q,"LI",{});var Bm=r(Bt);Kp=n(Bm,"I-PER, Person\u2019s name"),Bm.forEach(a),Zp=c(q),Rt=o(q,"LI",{});var Rm=r(Rt);sh=n(Rm,"B-ORG, Beginning of an organisation right after another organisation"),Rm.forEach(a),eh=c(q),Ht=o(q,"LI",{});var Hm=r(Ht);ah=n(Hm,"I-ORG, Organisation"),Hm.forEach(a),th=c(q),Gt=o(q,"LI",{});var Gm=r(Gt);nh=n(Gm,"B-LOC, Beginning of a location right after another location"),Gm.forEach(a),lh=c(q),Ut=o(q,"LI",{});var Um=r(Ut);oh=n(Um,"I-LOC, Location"),Um.forEach(a),q.forEach(a),Tl=c(s),V=o(s,"P",{});var Aa=r(V);rh=n(Aa,"It leverages a fine-tuned model on CoNLL-2003, fine-tuned by "),Te=o(Aa,"A",{href:!0,rel:!0});var Qm=r(Te);ih=n(Qm,"@stefan-it"),Qm.forEach(a),ph=n(Aa," from "),Ee=o(Aa,"A",{href:!0,rel:!0});var Ym=r(Ee);hh=n(Ym,"dbmdz"),Ym.forEach(a),ch=n(Aa,"."),Aa.forEach(a),El=c(s),g(Ae.$$.fragment,s),Al=c(s),ua=o(s,"P",{});var Wm=r(ua);uh=n(Wm,`This outputs a list of all words that have been identified as one of the entities from the 9 classes defined above. Here are the expected results:`),Wm.forEach(a),zl=c(s),g(ze.$$.fragment,s),$l=c(s),ma=o(s,"P",{});var Xm=r(ma);mh=n(Xm,`Note how the tokens of the sequence \u201CHugging Face\u201D have been identified as an organisation, and \u201CNew York City\u201D, \u201CDUMBO\u201D and \u201CManhattan Bridge\u201D have been identified as locations.`),Xm.forEach(a),Il=c(s),ga=o(s,"P",{});var Vm=r(ga);gh=n(Vm,"Here is an example of doing named entity recognition, using a model and a tokenizer. The process is the following:"),Vm.forEach(a),Ml=c(s),I=o(s,"OL",{});var R=r(I);Qt=o(R,"LI",{});var Jm=r(Qt);dh=n(Jm,`Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it with the weights stored in the checkpoint.`),Jm.forEach(a),fh=c(R),Yt=o(R,"LI",{});var Km=r(Yt);jh=n(Km,"Define a sequence with known entities, such as \u201CHugging Face\u201D as an organisation and \u201CNew York City\u201D as a location."),Km.forEach(a),_h=c(R),Wt=o(R,"LI",{});var Zm=r(Wt);kh=n(Zm,`Split words into tokens so that they can be mapped to predictions. We use a small hack by, first, completely encoding and decoding the sequence, so that we\u2019re left with a string that contains the special tokens.`),Zm.forEach(a),wh=c(R),Xt=o(R,"LI",{});var sg=r(Xt);xh=n(sg,"Encode that sequence into IDs (special tokens are added automatically)."),sg.forEach(a),yh=c(R),Vt=o(R,"LI",{});var eg=r(Vt);bh=n(eg,`Retrieve the predictions by passing the input to the model and getting the first output. This results in a distribution over the 9 possible classes for each token. We take the argmax to retrieve the most likely class for each token.`),eg.forEach(a),vh=c(R),Jt=o(R,"LI",{});var ag=r(Jt);qh=n(ag,"Zip together each token with its prediction and print it."),ag.forEach(a),R.forEach(a),Ll=c(s),g($e.$$.fragment,s),Pl=c(s),da=o(s,"P",{});var tg=r(da);Th=n(tg,`This outputs a list of each token mapped to its corresponding prediction. Differently from the pipeline, here every token has a prediction as we didn\u2019t remove the \u201C0\u201Dth class, which means that no particular entity was found on that token.`),tg.forEach(a),Fl=c(s),J=o(s,"P",{});var za=r(J);Eh=n(za,"In the above example, "),Kt=o(za,"CODE",{});var ng=r(Kt);Ah=n(ng,"predictions"),ng.forEach(a),zh=n(za,` is an integer that corresponds to the predicted class. We can use the `),Zt=o(za,"CODE",{});var lg=r(Zt);$h=n(lg,"model.config.id2label"),lg.forEach(a),Ih=n(za,` property in order to recover the class name corresponding to the class number, which is illustrated below:`),za.forEach(a),Nl=c(s),g(Ie.$$.fragment,s),Cl=c(s),us=o(s,"H2",{class:!0});var vo=r(us);Ps=o(vo,"A",{id:!0,class:!0,href:!0});var og=r(Ps);sn=o(og,"SPAN",{});var rg=r(sn);g(Me.$$.fragment,rg),rg.forEach(a),og.forEach(a),Mh=c(vo),en=o(vo,"SPAN",{});var ig=r(en);Lh=n(ig,"Summarization"),ig.forEach(a),vo.forEach(a),Sl=c(s),Fs=o(s,"P",{});var qo=r(Fs);Ph=n(qo,`Summarization is the task of summarizing a document or an article into a shorter text. If you would like to fine-tune a model on a summarization task, you may leverage the `),Le=o(qo,"A",{href:!0,rel:!0});var pg=r(Le);Fh=n(pg,"run_summarization.py"),pg.forEach(a),Nh=n(qo,` script.`),qo.forEach(a),Dl=c(s),Ns=o(s,"P",{});var To=r(Ns);Ch=n(To,`An example of a summarization dataset is the CNN / Daily Mail dataset, which consists of long news articles and was created for the task of summarization. If you would like to fine-tune a model on a summarization task, various approaches are described in this `),Pe=o(To,"A",{href:!0,rel:!0});var hg=r(Pe);Sh=n(hg,"document"),hg.forEach(a),Dh=n(To,"."),To.forEach(a),Ol=c(s),fa=o(s,"P",{});var cg=r(fa);Oh=n(cg,`Here is an example of using the pipelines to do summarization. It leverages a Bart model that was fine-tuned on the CNN / Daily Mail data set.`),cg.forEach(a),Bl=c(s),g(Fe.$$.fragment,s),Rl=c(s),P=o(s,"P",{});var ss=r(P);Bh=n(ss,"Because the summarization pipeline depends on the "),an=o(ss,"CODE",{});var ug=r(an);Rh=n(ug,"PreTrainedModel.generate()"),ug.forEach(a),Hh=n(ss,` method, we can override the default arguments of `),tn=o(ss,"CODE",{});var mg=r(tn);Gh=n(mg,"PreTrainedModel.generate()"),mg.forEach(a),Uh=n(ss," directly in the pipeline for "),nn=o(ss,"CODE",{});var gg=r(nn);Qh=n(gg,"max_length"),gg.forEach(a),Yh=n(ss," and "),ln=o(ss,"CODE",{});var dg=r(ln);Wh=n(dg,"min_length"),dg.forEach(a),Xh=n(ss,` as shown below. This outputs the following summary:`),ss.forEach(a),Hl=c(s),g(Ne.$$.fragment,s),Gl=c(s),ja=o(s,"P",{});var fg=r(ja);Vh=n(fg,"Here is an example of doing summarization using a model and a tokenizer. The process is the following:"),fg.forEach(a),Ul=c(s),N=o(s,"OL",{});var Os=r(N);ms=o(Os,"LI",{});var $a=r(ms);Jh=n($a,`Instantiate a tokenizer and a model from the checkpoint name. Summarization is usually done using an encoder-decoder model, such as `),on=o($a,"CODE",{});var jg=r(on);Kh=n(jg,"Bart"),jg.forEach(a),Zh=n($a," or "),rn=o($a,"CODE",{});var _g=r(rn);sc=n(_g,"T5"),_g.forEach(a),ec=n($a,"."),$a.forEach(a),ac=c(Os),pn=o(Os,"LI",{});var kg=r(pn);tc=n(kg,"Define the article that should be summarized."),kg.forEach(a),nc=c(Os),hn=o(Os,"LI",{});var wg=r(hn);lc=n(wg,"Add the T5 specific prefix \u201Csummarize: \u201C."),wg.forEach(a),oc=c(Os),Ce=o(Os,"LI",{});var Eo=r(Ce);rc=n(Eo,"Use the "),cn=o(Eo,"CODE",{});var xg=r(cn);ic=n(xg,"PreTrainedModel.generate()"),xg.forEach(a),pc=n(Eo," method to generate the summary."),Eo.forEach(a),Os.forEach(a),Ql=c(s),_a=o(s,"P",{});var yg=r(_a);hc=n(yg,`In this example we use Google\u2019s T5 model. Even though it was pre-trained only on a multi-task mixed dataset (including CNN / Daily Mail), it yields very good results.`),yg.forEach(a),Yl=c(s),g(Se.$$.fragment,s),Wl=c(s),gs=o(s,"H2",{class:!0});var Ao=r(gs);Cs=o(Ao,"A",{id:!0,class:!0,href:!0});var bg=r(Cs);un=o(bg,"SPAN",{});var vg=r(un);g(De.$$.fragment,vg),vg.forEach(a),bg.forEach(a),cc=c(Ao),mn=o(Ao,"SPAN",{});var qg=r(mn);uc=n(qg,"Translation"),qg.forEach(a),Ao.forEach(a),Xl=c(s),Ss=o(s,"P",{});var zo=r(Ss);mc=n(zo,`Translation is the task of translating a text from one language to another. If you would like to fine-tune a model on a translation task, you may leverage the `),Oe=o(zo,"A",{href:!0,rel:!0});var Tg=r(Oe);gc=n(Tg,"run_translation.py"),Tg.forEach(a),dc=n(zo," script."),zo.forEach(a),Vl=c(s),Ds=o(s,"P",{});var $o=r(Ds);fc=n($o,`An example of a translation dataset is the WMT English to German dataset, which has sentences in English as the input data and the corresponding sentences in German as the target data. If you would like to fine-tune a model on a translation task, various approaches are described in this `),Be=o($o,"A",{href:!0,rel:!0});var Eg=r(Be);jc=n(Eg,"document"),Eg.forEach(a),_c=n($o,"."),$o.forEach(a),Jl=c(s),ka=o(s,"P",{});var Ag=r(ka);kc=n(Ag,`Here is an example of using the pipelines to do translation. It leverages a T5 model that was only pre-trained on a multi-task mixture dataset (including WMT), yet, yielding impressive translation results.`),Ag.forEach(a),Kl=c(s),g(Re.$$.fragment,s),Zl=c(s),C=o(s,"P",{});var Bs=r(C);wc=n(Bs,"Because the translation pipeline depends on the "),gn=o(Bs,"CODE",{});var zg=r(gn);xc=n(zg,"PreTrainedModel.generate()"),zg.forEach(a),yc=n(Bs,` method, we can override the default arguments of `),dn=o(Bs,"CODE",{});var $g=r(dn);bc=n($g,"PreTrainedModel.generate()"),$g.forEach(a),vc=n(Bs," directly in the pipeline as is shown for "),fn=o(Bs,"CODE",{});var Ig=r(fn);qc=n(Ig,"max_length"),Ig.forEach(a),Tc=n(Bs," above."),Bs.forEach(a),so=c(s),wa=o(s,"P",{});var Mg=r(wa);Ec=n(Mg,"Here is an example of doing translation using a model and a tokenizer. The process is the following:"),Mg.forEach(a),eo=c(s),S=o(s,"OL",{});var Rs=r(S);ds=o(Rs,"LI",{});var Ia=r(ds);Ac=n(Ia,`Instantiate a tokenizer and a model from the checkpoint name. Summarization is usually done using an encoder-decoder model, such as `),jn=o(Ia,"CODE",{});var Lg=r(jn);zc=n(Lg,"Bart"),Lg.forEach(a),$c=n(Ia," or "),_n=o(Ia,"CODE",{});var Pg=r(_n);Ic=n(Pg,"T5"),Pg.forEach(a),Mc=n(Ia,"."),Ia.forEach(a),Lc=c(Rs),kn=o(Rs,"LI",{});var Fg=r(kn);Pc=n(Fg,"Define the article that should be summarized."),Fg.forEach(a),Fc=c(Rs),wn=o(Rs,"LI",{});var Ng=r(wn);Nc=n(Ng,"Add the T5 specific prefix \u201Ctranslate English to German: \u201D"),Ng.forEach(a),Cc=c(Rs),He=o(Rs,"LI",{});var Io=r(He);Sc=n(Io,"Use the "),xn=o(Io,"CODE",{});var Cg=r(xn);Dc=n(Cg,"PreTrainedModel.generate()"),Cg.forEach(a),Oc=n(Io," method to perform the translation."),Io.forEach(a),Rs.forEach(a),ao=c(s),g(Ge.$$.fragment,s),to=c(s),xa=o(s,"P",{});var Sg=r(xa);Bc=n(Sg,"We get the same translation as with the pipeline example."),Sg.forEach(a),this.h()},h(){u(T,"name","hf:doc:metadata"),u(T,"content",JSON.stringify(Yg)),u(b,"id","summary-of-the-tasks"),u(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(b,"href","#summary-of-the-tasks"),u(E,"class","relative group"),u(Ye,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModel"),u(Us,"href","https://github.com/huggingface/transformers/tree/master/examples"),u(Us,"rel","nofollow"),u(Ys,"href","https://github.com/huggingface/transformers/tree/master/examples"),u(Ys,"rel","nofollow"),u(xs,"id","sequence-classification"),u(xs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(xs,"href","#sequence-classification"),u(ns,"class","relative group"),u(Xs,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification/run_glue.py"),u(Xs,"rel","nofollow"),u(Vs,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification/run_tf_glue.py"),u(Vs,"rel","nofollow"),u(Js,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification/run_tf_text_classification.py"),u(Js,"rel","nofollow"),u(Ks,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification/run_xnli.py"),u(Ks,"rel","nofollow"),u(ys,"id","extractive-question-answering"),u(ys,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ys,"href","#extractive-question-answering"),u(ls,"class","relative group"),u(ae,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/question-answering/run_qa.py"),u(ae,"rel","nofollow"),u(te,"href","https://github.com/huggingface/transformers/tree/master/examples/tensorflow/question-answering/run_tf_squad.py"),u(te,"rel","nofollow"),u(bs,"id","language-modeling"),u(bs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(bs,"href","#language-modeling"),u(os,"class","relative group"),u(ie,"href","https://huggingface.co/lysandre/arxiv-nlp"),u(ie,"rel","nofollow"),u(qs,"id","masked-language-modeling"),u(qs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(qs,"href","#masked-language-modeling"),u(rs,"class","relative group"),u(he,"href","https://arxiv.org/abs/1910.13461"),u(he,"rel","nofollow"),u(ce,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/language-modeling/run_mlm.py"),u(ce,"rel","nofollow"),u(Ts,"id","causal-language-modeling"),u(Ts,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ts,"href","#causal-language-modeling"),u(ps,"class","relative group"),u(je,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/language-modeling/run_clm.py"),u(je,"rel","nofollow"),u(pa,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),u($s,"id","text-generation"),u($s,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u($s,"href","#text-generation"),u(hs,"class","relative group"),u(we,"href","https://huggingface.co/gpt2/blob/main/config.json"),u(we,"rel","nofollow"),u(ha,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),u(be,"href","https://huggingface.co/blog/how-to-generate"),u(be,"rel","nofollow"),u(Ms,"id","named-entity-recognition"),u(Ms,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ms,"href","#named-entity-recognition"),u(cs,"class","relative group"),u(qe,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/token-classification/run_ner.py"),u(qe,"rel","nofollow"),u(Te,"href","https://github.com/stefan-it"),u(Te,"rel","nofollow"),u(Ee,"href","https://github.com/dbmdz"),u(Ee,"rel","nofollow"),u(Ps,"id","summarization"),u(Ps,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ps,"href","#summarization"),u(us,"class","relative group"),u(Le,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization/run_summarization.py"),u(Le,"rel","nofollow"),u(Pe,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization/README.md"),u(Pe,"rel","nofollow"),u(Cs,"id","translation"),u(Cs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Cs,"href","#translation"),u(gs,"class","relative group"),u(Oe,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/translation/run_translation.py"),u(Oe,"rel","nofollow"),u(Be,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/translation/README.md"),u(Be,"rel","nofollow")},m(s,i){e(document.head,T),p(s,G,i),p(s,E,i),e(E,b),e(b,as),d(y,as,null),e(E,U),e(E,fs),e(fs,Mo),p(s,yn,i),d(Gs,s,i),p(s,bn,i),p(s,Qe,i),e(Qe,Lo),p(s,vn,i),p(s,js,i),e(js,Po),e(js,Ye),e(Ye,Fo),e(js,No),p(s,qn,i),p(s,We,i),e(We,Co),p(s,Tn,i),p(s,_s,i),e(_s,ts),e(ts,So),e(ts,La),e(La,Do),e(ts,Oo),e(ts,Us),e(Us,Bo),e(ts,Ro),e(_s,Ho),e(_s,Qs),e(Qs,Go),e(Qs,Ys),e(Ys,Uo),e(Qs,Qo),p(s,En,i),p(s,Xe,i),e(Xe,Yo),p(s,An,i),p(s,ks,i),e(ks,Pa),e(Pa,Wo),e(ks,Xo),e(ks,Fa),e(Fa,Vo),p(s,zn,i),p(s,Ve,i),e(Ve,Jo),p(s,$n,i),d(ws,s,i),p(s,In,i),p(s,ns,i),e(ns,xs),e(xs,Na),d(Ws,Na,null),e(ns,Ko),e(ns,Ca),e(Ca,Zo),p(s,Mn,i),p(s,M,i),e(M,sr),e(M,Xs),e(Xs,er),e(M,ar),e(M,Vs),e(Vs,tr),e(M,nr),e(M,Js),e(Js,lr),e(M,or),e(M,Ks),e(Ks,rr),e(M,ir),p(s,Ln,i),p(s,Je,i),e(Je,pr),p(s,Pn,i),p(s,Ke,i),e(Ke,hr),p(s,Fn,i),d(Zs,s,i),p(s,Nn,i),p(s,Ze,i),e(Ze,cr),p(s,Cn,i),p(s,L,i),e(L,Sa),e(Sa,ur),e(L,mr),e(L,Da),e(Da,gr),e(L,dr),e(L,Oa),e(Oa,fr),e(L,jr),e(L,Ba),e(Ba,_r),e(L,kr),e(L,Ra),e(Ra,wr),p(s,Sn,i),d(se,s,i),p(s,Dn,i),p(s,ls,i),e(ls,ys),e(ys,Ha),d(ee,Ha,null),e(ls,xr),e(ls,Ga),e(Ga,yr),p(s,On,i),p(s,Q,i),e(Q,br),e(Q,ae),e(ae,vr),e(Q,qr),e(Q,te),e(te,Tr),e(Q,Er),p(s,Bn,i),p(s,sa,i),e(sa,Ar),p(s,Rn,i),d(ne,s,i),p(s,Hn,i),p(s,ea,i),e(ea,zr),p(s,Gn,i),d(le,s,i),p(s,Un,i),p(s,aa,i),e(aa,$r),p(s,Qn,i),p(s,v,i),e(v,Ua),e(Ua,Ir),e(v,Mr),e(v,Qa),e(Qa,Lr),e(v,Pr),e(v,Ya),e(Ya,Fr),e(v,Nr),e(v,Wa),e(Wa,Cr),e(v,Sr),e(v,Xa),e(Xa,Dr),e(v,Or),e(v,Va),e(Va,Br),e(v,Rr),e(v,Ja),e(Ja,Hr),p(s,Yn,i),d(oe,s,i),p(s,Wn,i),p(s,os,i),e(os,bs),e(bs,Ka),d(re,Ka,null),e(os,Gr),e(os,Za),e(Za,Ur),p(s,Xn,i),p(s,ta,i),e(ta,Qr),p(s,Vn,i),p(s,vs,i),e(vs,Yr),e(vs,ie),e(ie,Wr),e(vs,Xr),p(s,Jn,i),p(s,rs,i),e(rs,qs),e(qs,st),d(pe,st,null),e(rs,Vr),e(rs,et),e(et,Jr),p(s,Kn,i),p(s,Y,i),e(Y,Kr),e(Y,he),e(he,Zr),e(Y,si),e(Y,ce),e(ce,ei),e(Y,ai),p(s,Zn,i),p(s,na,i),e(na,ti),p(s,sl,i),d(ue,s,i),p(s,el,i),p(s,la,i),e(la,ni),p(s,al,i),d(me,s,i),p(s,tl,i),p(s,oa,i),e(oa,li),p(s,nl,i),p(s,A,i),e(A,at),e(at,oi),e(A,ri),e(A,ge),e(ge,ii),e(ge,tt),e(tt,pi),e(ge,hi),e(A,ci),e(A,nt),e(nt,ui),e(A,mi),e(A,lt),e(lt,gi),e(A,di),e(A,is),e(is,fi),e(is,ot),e(ot,ji),e(is,_i),e(is,rt),e(rt,ki),e(is,wi),e(A,xi),e(A,it),e(it,yi),p(s,ll,i),d(de,s,i),p(s,ol,i),p(s,ra,i),e(ra,bi),p(s,rl,i),p(s,ps,i),e(ps,Ts),e(Ts,pt),d(fe,pt,null),e(ps,vi),e(ps,ht),e(ht,qi),p(s,il,i),p(s,Es,i),e(Es,Ti),e(Es,je),e(je,Ei),e(Es,Ai),p(s,pl,i),p(s,ia,i),e(ia,zi),p(s,hl,i),p(s,As,i),e(As,$i),e(As,ct),e(ct,Ii),e(As,Mi),p(s,cl,i),d(_e,s,i),p(s,ul,i),p(s,W,i),e(W,Li),e(W,ut),e(ut,Pi),e(W,Fi),e(W,mt),e(mt,Ni),e(W,Ci),p(s,ml,i),p(s,zs,i),e(zs,Si),e(zs,pa),e(pa,Di),e(zs,Oi),p(s,gl,i),p(s,hs,i),e(hs,$s),e($s,gt),d(ke,gt,null),e(hs,Bi),e(hs,dt),e(dt,Ri),p(s,dl,i),p(s,z,i),e(z,Hi),e(z,ft),e(ft,Gi),e(z,Ui),e(z,jt),e(jt,Qi),e(z,Yi),e(z,_t),e(_t,Wi),e(z,Xi),e(z,kt),e(kt,Vi),e(z,Ji),e(z,we),e(we,Ki),e(z,Zi),p(s,fl,i),d(xe,s,i),p(s,jl,i),p(s,$,i),e($,sp),e($,wt),e(wt,ep),e($,ap),e($,xt),e(xt,tp),e($,np),e($,ha),e(ha,lp),e($,op),e($,yt),e(yt,rp),e($,ip),e($,bt),e(bt,pp),e($,hp),p(s,_l,i),p(s,X,i),e(X,cp),e(X,vt),e(vt,up),e(X,mp),e(X,qt),e(qt,gp),e(X,dp),p(s,kl,i),d(ye,s,i),p(s,wl,i),p(s,k,i),e(k,fp),e(k,Tt),e(Tt,jp),e(k,_p),e(k,Et),e(Et,kp),e(k,wp),e(k,At),e(At,xp),e(k,yp),e(k,zt),e(zt,bp),e(k,vp),e(k,$t),e($t,qp),e(k,Tp),e(k,It),e(It,Ep),e(k,Ap),e(k,Mt),e(Mt,zp),e(k,$p),e(k,Lt),e(Lt,Ip),e(k,Mp),e(k,Pt),e(Pt,Lp),e(k,Pp),p(s,xl,i),p(s,Is,i),e(Is,Fp),e(Is,be),e(be,Np),e(Is,Cp),p(s,yl,i),p(s,cs,i),e(cs,Ms),e(Ms,Ft),d(ve,Ft,null),e(cs,Sp),e(cs,Nt),e(Nt,Dp),p(s,bl,i),p(s,Ls,i),e(Ls,Op),e(Ls,qe),e(qe,Bp),e(Ls,Rp),p(s,vl,i),p(s,ca,i),e(ca,Hp),p(s,ql,i),p(s,w,i),e(w,Ct),e(Ct,Gp),e(w,Up),e(w,St),e(St,Qp),e(w,Yp),e(w,Dt),e(Dt,Wp),e(w,Xp),e(w,Ot),e(Ot,Vp),e(w,Jp),e(w,Bt),e(Bt,Kp),e(w,Zp),e(w,Rt),e(Rt,sh),e(w,eh),e(w,Ht),e(Ht,ah),e(w,th),e(w,Gt),e(Gt,nh),e(w,lh),e(w,Ut),e(Ut,oh),p(s,Tl,i),p(s,V,i),e(V,rh),e(V,Te),e(Te,ih),e(V,ph),e(V,Ee),e(Ee,hh),e(V,ch),p(s,El,i),d(Ae,s,i),p(s,Al,i),p(s,ua,i),e(ua,uh),p(s,zl,i),d(ze,s,i),p(s,$l,i),p(s,ma,i),e(ma,mh),p(s,Il,i),p(s,ga,i),e(ga,gh),p(s,Ml,i),p(s,I,i),e(I,Qt),e(Qt,dh),e(I,fh),e(I,Yt),e(Yt,jh),e(I,_h),e(I,Wt),e(Wt,kh),e(I,wh),e(I,Xt),e(Xt,xh),e(I,yh),e(I,Vt),e(Vt,bh),e(I,vh),e(I,Jt),e(Jt,qh),p(s,Ll,i),d($e,s,i),p(s,Pl,i),p(s,da,i),e(da,Th),p(s,Fl,i),p(s,J,i),e(J,Eh),e(J,Kt),e(Kt,Ah),e(J,zh),e(J,Zt),e(Zt,$h),e(J,Ih),p(s,Nl,i),d(Ie,s,i),p(s,Cl,i),p(s,us,i),e(us,Ps),e(Ps,sn),d(Me,sn,null),e(us,Mh),e(us,en),e(en,Lh),p(s,Sl,i),p(s,Fs,i),e(Fs,Ph),e(Fs,Le),e(Le,Fh),e(Fs,Nh),p(s,Dl,i),p(s,Ns,i),e(Ns,Ch),e(Ns,Pe),e(Pe,Sh),e(Ns,Dh),p(s,Ol,i),p(s,fa,i),e(fa,Oh),p(s,Bl,i),d(Fe,s,i),p(s,Rl,i),p(s,P,i),e(P,Bh),e(P,an),e(an,Rh),e(P,Hh),e(P,tn),e(tn,Gh),e(P,Uh),e(P,nn),e(nn,Qh),e(P,Yh),e(P,ln),e(ln,Wh),e(P,Xh),p(s,Hl,i),d(Ne,s,i),p(s,Gl,i),p(s,ja,i),e(ja,Vh),p(s,Ul,i),p(s,N,i),e(N,ms),e(ms,Jh),e(ms,on),e(on,Kh),e(ms,Zh),e(ms,rn),e(rn,sc),e(ms,ec),e(N,ac),e(N,pn),e(pn,tc),e(N,nc),e(N,hn),e(hn,lc),e(N,oc),e(N,Ce),e(Ce,rc),e(Ce,cn),e(cn,ic),e(Ce,pc),p(s,Ql,i),p(s,_a,i),e(_a,hc),p(s,Yl,i),d(Se,s,i),p(s,Wl,i),p(s,gs,i),e(gs,Cs),e(Cs,un),d(De,un,null),e(gs,cc),e(gs,mn),e(mn,uc),p(s,Xl,i),p(s,Ss,i),e(Ss,mc),e(Ss,Oe),e(Oe,gc),e(Ss,dc),p(s,Vl,i),p(s,Ds,i),e(Ds,fc),e(Ds,Be),e(Be,jc),e(Ds,_c),p(s,Jl,i),p(s,ka,i),e(ka,kc),p(s,Kl,i),d(Re,s,i),p(s,Zl,i),p(s,C,i),e(C,wc),e(C,gn),e(gn,xc),e(C,yc),e(C,dn),e(dn,bc),e(C,vc),e(C,fn),e(fn,qc),e(C,Tc),p(s,so,i),p(s,wa,i),e(wa,Ec),p(s,eo,i),p(s,S,i),e(S,ds),e(ds,Ac),e(ds,jn),e(jn,zc),e(ds,$c),e(ds,_n),e(_n,Ic),e(ds,Mc),e(S,Lc),e(S,kn),e(kn,Pc),e(S,Fc),e(S,wn),e(wn,Nc),e(S,Cc),e(S,He),e(He,Sc),e(He,xn),e(xn,Dc),e(He,Oc),p(s,ao,i),d(Ge,s,i),p(s,to,i),p(s,xa,i),e(xa,Bc),no=!0},p(s,[i]){const Ue={};i&2&&(Ue.$$scope={dirty:i,ctx:s}),ws.$set(Ue)},i(s){no||(f(y.$$.fragment,s),f(Gs.$$.fragment,s),f(ws.$$.fragment,s),f(Ws.$$.fragment,s),f(Zs.$$.fragment,s),f(se.$$.fragment,s),f(ee.$$.fragment,s),f(ne.$$.fragment,s),f(le.$$.fragment,s),f(oe.$$.fragment,s),f(re.$$.fragment,s),f(pe.$$.fragment,s),f(ue.$$.fragment,s),f(me.$$.fragment,s),f(de.$$.fragment,s),f(fe.$$.fragment,s),f(_e.$$.fragment,s),f(ke.$$.fragment,s),f(xe.$$.fragment,s),f(ye.$$.fragment,s),f(ve.$$.fragment,s),f(Ae.$$.fragment,s),f(ze.$$.fragment,s),f($e.$$.fragment,s),f(Ie.$$.fragment,s),f(Me.$$.fragment,s),f(Fe.$$.fragment,s),f(Ne.$$.fragment,s),f(Se.$$.fragment,s),f(De.$$.fragment,s),f(Re.$$.fragment,s),f(Ge.$$.fragment,s),no=!0)},o(s){j(y.$$.fragment,s),j(Gs.$$.fragment,s),j(ws.$$.fragment,s),j(Ws.$$.fragment,s),j(Zs.$$.fragment,s),j(se.$$.fragment,s),j(ee.$$.fragment,s),j(ne.$$.fragment,s),j(le.$$.fragment,s),j(oe.$$.fragment,s),j(re.$$.fragment,s),j(pe.$$.fragment,s),j(ue.$$.fragment,s),j(me.$$.fragment,s),j(de.$$.fragment,s),j(fe.$$.fragment,s),j(_e.$$.fragment,s),j(ke.$$.fragment,s),j(xe.$$.fragment,s),j(ye.$$.fragment,s),j(ve.$$.fragment,s),j(Ae.$$.fragment,s),j(ze.$$.fragment,s),j($e.$$.fragment,s),j(Ie.$$.fragment,s),j(Me.$$.fragment,s),j(Fe.$$.fragment,s),j(Ne.$$.fragment,s),j(Se.$$.fragment,s),j(De.$$.fragment,s),j(Re.$$.fragment,s),j(Ge.$$.fragment,s),no=!1},d(s){a(T),s&&a(G),s&&a(E),_(y),s&&a(yn),_(Gs,s),s&&a(bn),s&&a(Qe),s&&a(vn),s&&a(js),s&&a(qn),s&&a(We),s&&a(Tn),s&&a(_s),s&&a(En),s&&a(Xe),s&&a(An),s&&a(ks),s&&a(zn),s&&a(Ve),s&&a($n),_(ws,s),s&&a(In),s&&a(ns),_(Ws),s&&a(Mn),s&&a(M),s&&a(Ln),s&&a(Je),s&&a(Pn),s&&a(Ke),s&&a(Fn),_(Zs,s),s&&a(Nn),s&&a(Ze),s&&a(Cn),s&&a(L),s&&a(Sn),_(se,s),s&&a(Dn),s&&a(ls),_(ee),s&&a(On),s&&a(Q),s&&a(Bn),s&&a(sa),s&&a(Rn),_(ne,s),s&&a(Hn),s&&a(ea),s&&a(Gn),_(le,s),s&&a(Un),s&&a(aa),s&&a(Qn),s&&a(v),s&&a(Yn),_(oe,s),s&&a(Wn),s&&a(os),_(re),s&&a(Xn),s&&a(ta),s&&a(Vn),s&&a(vs),s&&a(Jn),s&&a(rs),_(pe),s&&a(Kn),s&&a(Y),s&&a(Zn),s&&a(na),s&&a(sl),_(ue,s),s&&a(el),s&&a(la),s&&a(al),_(me,s),s&&a(tl),s&&a(oa),s&&a(nl),s&&a(A),s&&a(ll),_(de,s),s&&a(ol),s&&a(ra),s&&a(rl),s&&a(ps),_(fe),s&&a(il),s&&a(Es),s&&a(pl),s&&a(ia),s&&a(hl),s&&a(As),s&&a(cl),_(_e,s),s&&a(ul),s&&a(W),s&&a(ml),s&&a(zs),s&&a(gl),s&&a(hs),_(ke),s&&a(dl),s&&a(z),s&&a(fl),_(xe,s),s&&a(jl),s&&a($),s&&a(_l),s&&a(X),s&&a(kl),_(ye,s),s&&a(wl),s&&a(k),s&&a(xl),s&&a(Is),s&&a(yl),s&&a(cs),_(ve),s&&a(bl),s&&a(Ls),s&&a(vl),s&&a(ca),s&&a(ql),s&&a(w),s&&a(Tl),s&&a(V),s&&a(El),_(Ae,s),s&&a(Al),s&&a(ua),s&&a(zl),_(ze,s),s&&a($l),s&&a(ma),s&&a(Il),s&&a(ga),s&&a(Ml),s&&a(I),s&&a(Ll),_($e,s),s&&a(Pl),s&&a(da),s&&a(Fl),s&&a(J),s&&a(Nl),_(Ie,s),s&&a(Cl),s&&a(us),_(Me),s&&a(Sl),s&&a(Fs),s&&a(Dl),s&&a(Ns),s&&a(Ol),s&&a(fa),s&&a(Bl),_(Fe,s),s&&a(Rl),s&&a(P),s&&a(Hl),_(Ne,s),s&&a(Gl),s&&a(ja),s&&a(Ul),s&&a(N),s&&a(Ql),s&&a(_a),s&&a(Yl),_(Se,s),s&&a(Wl),s&&a(gs),_(De),s&&a(Xl),s&&a(Ss),s&&a(Vl),s&&a(Ds),s&&a(Jl),s&&a(ka),s&&a(Kl),_(Re,s),s&&a(Zl),s&&a(C),s&&a(so),s&&a(wa),s&&a(eo),s&&a(S),s&&a(ao),_(Ge,s),s&&a(to),s&&a(xa)}}}const Yg={local:"summary-of-the-tasks",sections:[{local:"sequence-classification",title:"Sequence Classification"},{local:"extractive-question-answering",title:"Extractive Question Answering"},{local:"language-modeling",sections:[{local:"masked-language-modeling",title:"Masked Language Modeling"},{local:"causal-language-modeling",title:"Causal Language Modeling"},{local:"text-generation",title:"Text Generation"}],title:"Language Modeling"},{local:"named-entity-recognition",title:"Named Entity Recognition"},{local:"summarization",title:"Summarization"},{local:"translation",title:"Translation"}],title:"Summary of the tasks"};function Wg(Ma,T,G){let{fw:E}=T;return Ma.$$set=b=>{"fw"in b&&G(0,E=b.fw)},[E]}class ad extends Dg{constructor(T){super();Og(this,T,Wg,Qg,Bg,{fw:0})}}export{ad as default,Yg as metadata};
259
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/perplexity.mdx-36aeb100.js
import{S as at,i as et,s as tt,e as p,k as r,w as R,t as n,W as P,M as nt,c as o,d as a,m as h,a as m,x as J,h as l,X as z,b as c,N as he,F as e,g as i,y as K,L as lt,q as Q,o as U,B as V}from"../chunks/vendor-4833417e.js";import{I as ce}from"../chunks/IconCopyLink-4b81c553.js";import{C as de}from"../chunks/CodeBlock-6a3d1b46.js";import{D as it}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function pt(ya){let b,Y,u,y,ms,A,xa,rs,ba,Ps,S,zs,$,_a,Z,ka,Pa,Ts,x,za,qs,He='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>X</mi><mo>=</mo><mo stretchy="false">(</mo><msub><mi>x</mi><mn>0</mn></msub><mo separator="true">,</mo><msub><mi>x</mi><mn>1</mn></msub><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msub><mi>x</mi><mi>t</mi></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">X = (x_0, x_1, \\dots, x_t)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6833em;"></span><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">0</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',$s,Es,Re='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>X</mi></mrow><annotation encoding="application/x-tex">X</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6833em;"></span><span class="mord mathnormal" style="margin-right:0.07847em;">X</span></span></span></span>',Ms,js,Je='<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mtext>PPL</mtext><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo><mo>=</mo><mi>exp</mi><mo>\u2061</mo><mrow><mo fence="true">{</mo><mrow><mo>\u2212</mo><mfrac><mn>1</mn><mi>t</mi></mfrac><munderover><mo>\u2211</mo><mi>i</mi><mi>t</mi></munderover><mi>log</mi><mo>\u2061</mo><msub><mi>p</mi><mi>\u03B8</mi></msub><mo stretchy="false">(</mo><msub><mi>x</mi><mi>i</mi></msub><mi mathvariant="normal">\u2223</mi><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>i</mi></mrow></msub><mo stretchy="false">)</mo></mrow><mo fence="true">}</mo></mrow></mrow><annotation encoding="application/x-tex">\\text{PPL}(X) = \\exp \\left\\{ {-\\frac{1}{t}\\sum_i^t \\log p_\\theta (x_i|x_{&lt;i}) } \\right\\}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord text"><span class="mord">PPL</span></span><span class="mopen">(</span><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="mclose">)</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:3.0582em;vertical-align:-1.2777em;"></span><span class="mop">exp</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mord">\u2212</span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.3214em;"><span style="top:-2.314em;"><span class="pstrut" style="height:3em;"></span><span class="mord"><span class="mord mathnormal">t</span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.677em;"><span class="pstrut" style="height:3em;"></span><span class="mord"><span class="mord">1</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.686em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.7806em;"><span style="top:-1.8723em;margin-left:0em;"><span class="pstrut" style="height:3.05em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">i</span></span></span><span style="top:-3.05em;"><span class="pstrut" style="height:3.05em;"></span><span><span class="mop op-symbol large-op">\u2211</span></span></span><span style="top:-4.3em;margin-left:0em;"><span class="pstrut" style="height:3.05em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:1.2777em;"><span></span></span></span></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mop">lo<span style="margin-right:0.01389em;">g</span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">p</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3361em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.02778em;">\u03B8</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">i</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">\u2223</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span><span class="mclose delimcenter" style="top:0em;"><span class="delimsizing size4">}</span></span></span></span></span></span></span>',Ls,_,Ta,Cs,Ke='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>log</mi><mo>\u2061</mo><msub><mi>p</mi><mi>\u03B8</mi></msub><mo stretchy="false">(</mo><msub><mi>x</mi><mi>i</mi></msub><mi mathvariant="normal">\u2223</mi><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>i</mi></mrow></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\\log p_\\theta (x_i|x_{&lt;i})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mop">lo<span style="margin-right:0.01389em;">g</span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">p</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3361em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.02778em;">\u03B8</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">i</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">\u2223</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',Gs,Is,Qe='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>i</mi></mrow></msub></mrow><annotation encoding="application/x-tex">x_{&lt;i}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6079em;vertical-align:-0.1774em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span></span></span></span>',Ws,As,E,qa,B,$a,Ea,Ss,T,M,hs,D,Ma,cs,ja,Bs,ss,La,Ds,j,ge,Fs,f,Ca,as,Ga,Ia,Ns,Ue='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>p</mi><mi>\u03B8</mi></msub><mo stretchy="false">(</mo><msub><mi>x</mi><mi>t</mi></msub><mi mathvariant="normal">\u2223</mi><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>t</mi></mrow></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">p_\\theta(x_t|x_{&lt;t})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord"><span class="mord mathnormal">p</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3361em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.02778em;">\u03B8</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">\u2223</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">t</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',Os,Xs,Ve='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>t</mi></mrow><annotation encoding="application/x-tex">t</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6151em;"></span><span class="mord mathnormal">t</span></span></span></span>',Hs,Rs,w,Wa,Js,Ye='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>k</mi></mrow><annotation encoding="application/x-tex">k</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.03148em;">k</span></span></span></span>',Ks,Qs,Ze='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mi>t</mi></msub></mrow><annotation encoding="application/x-tex">x_t</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',Us,Vs,st='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>k</mi><mo>\u2212</mo><mn>1</mn></mrow><annotation encoding="application/x-tex">k-1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.7778em;vertical-align:-0.0833em;"></span><span class="mord mathnormal" style="margin-right:0.03148em;">k</span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\u2212</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.6444em;"></span><span class="mord">1</span></span></span></span>',Ys,Zs,L,ue,sa,es,Aa,aa,ts,Sa,ea,C,fe,ta,ns,Ba,na,q,G,ds,F,Da,gs,Fa,la,ls,Na,ia,N,pa,is,Oa,oa,O,ma,g,Xa,us,Ha,Ra,fs,Ja,Ka,ws,Qa,Ua,vs,Va,Ya,ra,X,ha,ps,Za,ca,d,se,ys,ae,ee,xs,te,ne,bs,le,ie,_s,pe,oe,ks,me,re,da;return A=new ce({}),S=new it({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/perplexity.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/perplexity.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/perplexity.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/perplexity.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/perplexity.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/perplexity.ipynb"}]}}),D=new ce({}),F=new ce({}),N=new de({props:{code:`from transformers import GPT2LMHeadModel, GPT2TokenizerFast device = "cuda" model_id = "gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2LMHeadModel, GPT2TokenizerFast device = <span class="hljs-string">&quot;cuda&quot;</span> model_id = <span class="hljs-string">&quot;gpt2-large&quot;</span> model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id)`}}),O=new de({props:{code:`from datasets import load_dataset test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") encodings = tokenizer("\\n\\n".join(test["text"]), return_tensors="pt")`,highlighted:`<span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset test = load_dataset(<span class="hljs-string">&quot;wikitext&quot;</span>, <span class="hljs-string">&quot;wikitext-2-raw-v1&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>) encodings = tokenizer(<span class="hljs-string">&quot;\\n\\n&quot;</span>.join(test[<span class="hljs-string">&quot;text&quot;</span>]), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)`}}),X=new de({props:{code:`import torch from tqdm import tqdm max_length = model.config.n_positions stride = 512 nlls = [] for i in tqdm(range(0, encodings.input_ids.size(1), stride)): begin_loc = max(i + stride - max_length, 0) end_loc = min(i + stride, encodings.input_ids.size(1)) trg_len = end_loc - i # may be different from stride on last loop input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) neg_log_likelihood = outputs[0] * trg_len nlls.append(neg_log_likelihood) ppl = torch.exp(torch.stack(nlls).sum() / end_loc)`,highlighted:`<span class="hljs-keyword">import</span> torch <span class="hljs-keyword">from</span> tqdm <span class="hljs-keyword">import</span> tqdm max_length = model.config.n_positions stride = <span class="hljs-number">512</span> nlls = [] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> tqdm(<span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, encodings.input_ids.size(<span class="hljs-number">1</span>), stride)): begin_loc = <span class="hljs-built_in">max</span>(i + stride - max_length, <span class="hljs-number">0</span>) end_loc = <span class="hljs-built_in">min</span>(i + stride, encodings.input_ids.size(<span class="hljs-number">1</span>)) trg_len = end_loc - i <span class="hljs-comment"># may be different from stride on last loop</span> input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -<span class="hljs-number">100</span> <span class="hljs-keyword">with</span> torch.no_grad(): outputs = model(input_ids, labels=target_ids) neg_log_likelihood = outputs[<span class="hljs-number">0</span>] * trg_len nlls.append(neg_log_likelihood) ppl = torch.exp(torch.stack(nlls).<span class="hljs-built_in">sum</span>() / end_loc)`}}),{c(){b=p("meta"),Y=r(),u=p("h1"),y=p("a"),ms=p("span"),R(A.$$.fragment),xa=r(),rs=p("span"),ba=n("Perplexity of fixed-length models"),Ps=r(),R(S.$$.fragment),zs=r(),$=p("p"),_a=n(`Perplexity (PPL) is one of the most common metrics for evaluating language models. Before diving in, we should note that the metric applies specifically to classical language models (sometimes called autoregressive or causal language models) and is not well defined for masked language models like BERT (see `),Z=p("a"),ka=n("summary of the models"),Pa=n(")."),Ts=r(),x=p("p"),za=n(`Perplexity is defined as the exponentiated average negative log-likelihood of a sequence. If we have a tokenized sequence `),qs=new P,$s=n(", then the perplexity of "),Es=new P,Ms=n(` is, `),js=new P,Ls=r(),_=p("p"),Ta=n("where "),Cs=new P,Gs=n(" is the log-likelihood of the ith token conditioned on the preceding tokens "),Is=new P,Ws=n(" according to our model. Intuitively, it can be thought of as an evaluation of the model\u2019s ability to predict uniformly among the set of specified tokens in a corpus. Importantly, this means that the tokenization procedure has a direct impact on a model\u2019s perplexity which should always be taken into consideration when comparing different models."),As=r(),E=p("p"),qa=n(`This is also equivalent to the exponentiation of the cross-entropy between the data and model predictions. For more intuition about perplexity and its relationship to Bits Per Character (BPC) and data compression, check out this `),B=p("a"),$a=n("fantastic blog post on The Gradient"),Ea=n("."),Ss=r(),T=p("h2"),M=p("a"),hs=p("span"),R(D.$$.fragment),Ma=r(),cs=p("span"),ja=n("Calculating PPL with fixed-length models"),Bs=r(),ss=p("p"),La=n(`If we weren\u2019t limited by a model\u2019s context size, we would evaluate the model\u2019s perplexity by autoregressively factorizing a sequence and conditioning on the entire preceding subsequence at each step, as shown below.`),Ds=r(),j=p("img"),Fs=r(),f=p("p"),Ca=n(`When working with approximate models, however, we typically have a constraint on the number of tokens the model can process. The largest version of `),as=p("a"),Ga=n("GPT-2"),Ia=n(`, for example, has a fixed length of 1024 tokens, so we cannot calculate `),Ns=new P,Os=n(" directly when "),Xs=new P,Hs=n(" is greater than 1024."),Rs=r(),w=p("p"),Wa=n(`Instead, the sequence is typically broken into subsequences equal to the model\u2019s maximum input size. If a model\u2019s max input size is `),Js=new P,Ks=n(", we then approximate the likelihood of a token "),Qs=new P,Us=n(` by conditioning only on the `),Vs=new P,Ys=n(` tokens that precede it rather than the entire context. When evaluating the model\u2019s perplexity of a sequence, a tempting but suboptimal approach is to break the sequence into disjoint chunks and add up the decomposed log-likelihoods of each segment independently.`),Zs=r(),L=p("img"),sa=r(),es=p("p"),Aa=n(`This is quick to compute since the perplexity of each segment can be computed in one forward pass, but serves as a poor approximation of the fully-factorized perplexity and will typically yield a higher (worse) PPL because the model will have less context at most of the prediction steps.`),aa=r(),ts=p("p"),Sa=n(`Instead, the PPL of fixed-length models should be evaluated with a sliding-window strategy. This involves repeatedly sliding the context window so that the model has more context when making each prediction.`),ea=r(),C=p("img"),ta=r(),ns=p("p"),Ba=n(`This is a closer approximation to the true decomposition of the sequence probability and will typically yield a more favorable score. The downside is that it requires a separate forward pass for each token in the corpus. A good practical compromise is to employ a strided sliding window, moving the context by larger strides rather than sliding by 1 token a time. This allows computation to proceed much faster while still giving the model a large context to make predictions at each step.`),na=r(),q=p("h2"),G=p("a"),ds=p("span"),R(F.$$.fragment),Da=r(),gs=p("span"),Fa=n("Example: Calculating perplexity with GPT-2 in \u{1F917} Transformers"),la=r(),ls=p("p"),Na=n("Let\u2019s demonstrate this process with GPT-2."),ia=r(),R(N.$$.fragment),pa=r(),is=p("p"),Oa=n(`We\u2019ll load in the WikiText-2 dataset and evaluate the perplexity using a few different sliding-window strategies. Since this dataset is small and we\u2019re just doing one forward pass over the set, we can just load and encode the entire dataset in memory.`),oa=r(),R(O.$$.fragment),ma=r(),g=p("p"),Xa=n("With \u{1F917} Transformers, we can simply pass the "),us=p("code"),Ha=n("input_ids"),Ra=n(" as the "),fs=p("code"),Ja=n("labels"),Ka=n(` to our model, and the average negative log-likelihood for each token is returned as the loss. With our sliding window approach, however, there is overlap in the tokens we pass to the model at each iteration. We don\u2019t want the log-likelihood for the tokens we\u2019re just treating as context to be included in our loss, so we can set these targets to `),ws=p("code"),Qa=n("-100"),Ua=n(` so that they are ignored. The following is an example of how we could do this with a stride of `),vs=p("code"),Va=n("512"),Ya=n(`. This means that the model will have at least 512 tokens for context when calculating the conditional likelihood of any one token (provided there are 512 preceding tokens available to condition on).`),ra=r(),R(X.$$.fragment),ha=r(),ps=p("p"),Za=n(`Running this with the stride length equal to the max input length is equivalent to the suboptimal, non-sliding-window strategy we discussed above. The smaller the stride, the more context the model will have in making each prediction, and the better the reported perplexity will typically be.`),ca=r(),d=p("p"),se=n("When we run the above with "),ys=p("code"),ae=n("stride = 1024"),ee=n(", i.e. no overlap, the resulting PPL is "),xs=p("code"),te=n("19.64"),ne=n(`, which is about the same as the `),bs=p("code"),le=n("19.93"),ie=n(" reported in the GPT-2 paper. By using "),_s=p("code"),pe=n("stride = 512"),oe=n(` and thereby employing our striding window strategy, this jumps down to `),ks=p("code"),me=n("16.53"),re=n(`. This is not only a more favorable score, but is calculated in a way that is closer to the true autoregressive decomposition of a sequence likelihood.`),this.h()},l(s){const t=nt('[data-svelte="svelte-1phssyn"]',document.head);b=o(t,"META",{name:!0,content:!0}),t.forEach(a),Y=h(s),u=o(s,"H1",{class:!0});var ga=m(u);y=o(ga,"A",{id:!0,class:!0,href:!0});var we=m(y);ms=o(we,"SPAN",{});var ve=m(ms);J(A.$$.fragment,ve),ve.forEach(a),we.forEach(a),xa=h(ga),rs=o(ga,"SPAN",{});var ye=m(rs);ba=l(ye,"Perplexity of fixed-length models"),ye.forEach(a),ga.forEach(a),Ps=h(s),J(S.$$.fragment,s),zs=h(s),$=o(s,"P",{});var ua=m($);_a=l(ua,`Perplexity (PPL) is one of the most common metrics for evaluating language models. Before diving in, we should note that the metric applies specifically to classical language models (sometimes called autoregressive or causal language models) and is not well defined for masked language models like BERT (see `),Z=o(ua,"A",{href:!0});var xe=m(Z);ka=l(xe,"summary of the models"),xe.forEach(a),Pa=l(ua,")."),ua.forEach(a),Ts=h(s),x=o(s,"P",{});var H=m(x);za=l(H,`Perplexity is defined as the exponentiated average negative log-likelihood of a sequence. If we have a tokenized sequence `),qs=z(H),$s=l(H,", then the perplexity of "),Es=z(H),Ms=l(H,` is, `),js=z(H),H.forEach(a),Ls=h(s),_=o(s,"P",{});var os=m(_);Ta=l(os,"where "),Cs=z(os),Gs=l(os," is the log-likelihood of the ith token conditioned on the preceding tokens "),Is=z(os),Ws=l(os," according to our model. Intuitively, it can be thought of as an evaluation of the model\u2019s ability to predict uniformly among the set of specified tokens in a corpus. Importantly, this means that the tokenization procedure has a direct impact on a model\u2019s perplexity which should always be taken into consideration when comparing different models."),os.forEach(a),As=h(s),E=o(s,"P",{});var fa=m(E);qa=l(fa,`This is also equivalent to the exponentiation of the cross-entropy between the data and model predictions. For more intuition about perplexity and its relationship to Bits Per Character (BPC) and data compression, check out this `),B=o(fa,"A",{href:!0,rel:!0});var be=m(B);$a=l(be,"fantastic blog post on The Gradient"),be.forEach(a),Ea=l(fa,"."),fa.forEach(a),Ss=h(s),T=o(s,"H2",{class:!0});var wa=m(T);M=o(wa,"A",{id:!0,class:!0,href:!0});var _e=m(M);hs=o(_e,"SPAN",{});var ke=m(hs);J(D.$$.fragment,ke),ke.forEach(a),_e.forEach(a),Ma=h(wa),cs=o(wa,"SPAN",{});var Pe=m(cs);ja=l(Pe,"Calculating PPL with fixed-length models"),Pe.forEach(a),wa.forEach(a),Bs=h(s),ss=o(s,"P",{});var ze=m(ss);La=l(ze,`If we weren\u2019t limited by a model\u2019s context size, we would evaluate the model\u2019s perplexity by autoregressively factorizing a sequence and conditioning on the entire preceding subsequence at each step, as shown below.`),ze.forEach(a),Ds=h(s),j=o(s,"IMG",{width:!0,alt:!0,src:!0}),Fs=h(s),f=o(s,"P",{});var I=m(f);Ca=l(I,`When working with approximate models, however, we typically have a constraint on the number of tokens the model can process. The largest version of `),as=o(I,"A",{href:!0});var Te=m(as);Ga=l(Te,"GPT-2"),Te.forEach(a),Ia=l(I,`, for example, has a fixed length of 1024 tokens, so we cannot calculate `),Ns=z(I),Os=l(I," directly when "),Xs=z(I),Hs=l(I," is greater than 1024."),I.forEach(a),Rs=h(s),w=o(s,"P",{});var W=m(w);Wa=l(W,`Instead, the sequence is typically broken into subsequences equal to the model\u2019s maximum input size. If a model\u2019s max input size is `),Js=z(W),Ks=l(W,", we then approximate the likelihood of a token "),Qs=z(W),Us=l(W,` by conditioning only on the `),Vs=z(W),Ys=l(W,` tokens that precede it rather than the entire context. When evaluating the model\u2019s perplexity of a sequence, a tempting but suboptimal approach is to break the sequence into disjoint chunks and add up the decomposed log-likelihoods of each segment independently.`),W.forEach(a),Zs=h(s),L=o(s,"IMG",{width:!0,alt:!0,src:!0}),sa=h(s),es=o(s,"P",{});var qe=m(es);Aa=l(qe,`This is quick to compute since the perplexity of each segment can be computed in one forward pass, but serves as a poor approximation of the fully-factorized perplexity and will typically yield a higher (worse) PPL because the model will have less context at most of the prediction steps.`),qe.forEach(a),aa=h(s),ts=o(s,"P",{});var $e=m(ts);Sa=l($e,`Instead, the PPL of fixed-length models should be evaluated with a sliding-window strategy. This involves repeatedly sliding the context window so that the model has more context when making each prediction.`),$e.forEach(a),ea=h(s),C=o(s,"IMG",{width:!0,alt:!0,src:!0}),ta=h(s),ns=o(s,"P",{});var Ee=m(ns);Ba=l(Ee,`This is a closer approximation to the true decomposition of the sequence probability and will typically yield a more favorable score. The downside is that it requires a separate forward pass for each token in the corpus. A good practical compromise is to employ a strided sliding window, moving the context by larger strides rather than sliding by 1 token a time. This allows computation to proceed much faster while still giving the model a large context to make predictions at each step.`),Ee.forEach(a),na=h(s),q=o(s,"H2",{class:!0});var va=m(q);G=o(va,"A",{id:!0,class:!0,href:!0});var Me=m(G);ds=o(Me,"SPAN",{});var je=m(ds);J(F.$$.fragment,je),je.forEach(a),Me.forEach(a),Da=h(va),gs=o(va,"SPAN",{});var Le=m(gs);Fa=l(Le,"Example: Calculating perplexity with GPT-2 in \u{1F917} Transformers"),Le.forEach(a),va.forEach(a),la=h(s),ls=o(s,"P",{});var Ce=m(ls);Na=l(Ce,"Let\u2019s demonstrate this process with GPT-2."),Ce.forEach(a),ia=h(s),J(N.$$.fragment,s),pa=h(s),is=o(s,"P",{});var Ge=m(is);Oa=l(Ge,`We\u2019ll load in the WikiText-2 dataset and evaluate the perplexity using a few different sliding-window strategies. Since this dataset is small and we\u2019re just doing one forward pass over the set, we can just load and encode the entire dataset in memory.`),Ge.forEach(a),oa=h(s),J(O.$$.fragment,s),ma=h(s),g=o(s,"P",{});var k=m(g);Xa=l(k,"With \u{1F917} Transformers, we can simply pass the "),us=o(k,"CODE",{});var Ie=m(us);Ha=l(Ie,"input_ids"),Ie.forEach(a),Ra=l(k," as the "),fs=o(k,"CODE",{});var We=m(fs);Ja=l(We,"labels"),We.forEach(a),Ka=l(k,` to our model, and the average negative log-likelihood for each token is returned as the loss. With our sliding window approach, however, there is overlap in the tokens we pass to the model at each iteration. We don\u2019t want the log-likelihood for the tokens we\u2019re just treating as context to be included in our loss, so we can set these targets to `),ws=o(k,"CODE",{});var Ae=m(ws);Qa=l(Ae,"-100"),Ae.forEach(a),Ua=l(k,` so that they are ignored. The following is an example of how we could do this with a stride of `),vs=o(k,"CODE",{});var Se=m(vs);Va=l(Se,"512"),Se.forEach(a),Ya=l(k,`. This means that the model will have at least 512 tokens for context when calculating the conditional likelihood of any one token (provided there are 512 preceding tokens available to condition on).`),k.forEach(a),ra=h(s),J(X.$$.fragment,s),ha=h(s),ps=o(s,"P",{});var Be=m(ps);Za=l(Be,`Running this with the stride length equal to the max input length is equivalent to the suboptimal, non-sliding-window strategy we discussed above. The smaller the stride, the more context the model will have in making each prediction, and the better the reported perplexity will typically be.`),Be.forEach(a),ca=h(s),d=o(s,"P",{});var v=m(d);se=l(v,"When we run the above with "),ys=o(v,"CODE",{});var De=m(ys);ae=l(De,"stride = 1024"),De.forEach(a),ee=l(v,", i.e. no overlap, the resulting PPL is "),xs=o(v,"CODE",{});var Fe=m(xs);te=l(Fe,"19.64"),Fe.forEach(a),ne=l(v,`, which is about the same as the `),bs=o(v,"CODE",{});var Ne=m(bs);le=l(Ne,"19.93"),Ne.forEach(a),ie=l(v," reported in the GPT-2 paper. By using "),_s=o(v,"CODE",{});var Oe=m(_s);pe=l(Oe,"stride = 512"),Oe.forEach(a),oe=l(v,` and thereby employing our striding window strategy, this jumps down to `),ks=o(v,"CODE",{});var Xe=m(ks);me=l(Xe,"16.53"),Xe.forEach(a),re=l(v,`. This is not only a more favorable score, but is calculated in a way that is closer to the true autoregressive decomposition of a sequence likelihood.`),v.forEach(a),this.h()},h(){c(b,"name","hf:doc:metadata"),c(b,"content",JSON.stringify(ot)),c(y,"id","perplexity-of-fixedlength-models"),c(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(y,"href","#perplexity-of-fixedlength-models"),c(u,"class","relative group"),c(Z,"href","model_summary"),qs.a=$s,Es.a=Ms,js.a=null,Cs.a=Gs,Is.a=Ws,c(B,"href","https://thegradient.pub/understanding-evaluation-metrics-for-language-models/"),c(B,"rel","nofollow"),c(M,"id","calculating-ppl-with-fixedlength-models"),c(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(M,"href","#calculating-ppl-with-fixedlength-models"),c(T,"class","relative group"),c(j,"width","600"),c(j,"alt","Full decomposition of a sequence with unlimited context length"),he(j.src,ge="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif")||c(j,"src",ge),c(as,"href","model_doc/gpt2"),Ns.a=Os,Xs.a=Hs,Js.a=Ks,Qs.a=Us,Vs.a=Ys,c(L,"width","600"),c(L,"alt","Suboptimal PPL not taking advantage of full available context"),he(L.src,ue="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif")||c(L,"src",ue),c(C,"width","600"),c(C,"alt","Sliding window PPL taking advantage of all available context"),he(C.src,fe="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif")||c(C,"src",fe),c(G,"id","example-calculating-perplexity-with-gpt2-in-transformers"),c(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(G,"href","#example-calculating-perplexity-with-gpt2-in-transformers"),c(q,"class","relative group")},m(s,t){e(document.head,b),i(s,Y,t),i(s,u,t),e(u,y),e(y,ms),K(A,ms,null),e(u,xa),e(u,rs),e(rs,ba),i(s,Ps,t),K(S,s,t),i(s,zs,t),i(s,$,t),e($,_a),e($,Z),e(Z,ka),e($,Pa),i(s,Ts,t),i(s,x,t),e(x,za),qs.m(He,x),e(x,$s),Es.m(Re,x),e(x,Ms),js.m(Je,x),i(s,Ls,t),i(s,_,t),e(_,Ta),Cs.m(Ke,_),e(_,Gs),Is.m(Qe,_),e(_,Ws),i(s,As,t),i(s,E,t),e(E,qa),e(E,B),e(B,$a),e(E,Ea),i(s,Ss,t),i(s,T,t),e(T,M),e(M,hs),K(D,hs,null),e(T,Ma),e(T,cs),e(cs,ja),i(s,Bs,t),i(s,ss,t),e(ss,La),i(s,Ds,t),i(s,j,t),i(s,Fs,t),i(s,f,t),e(f,Ca),e(f,as),e(as,Ga),e(f,Ia),Ns.m(Ue,f),e(f,Os),Xs.m(Ve,f),e(f,Hs),i(s,Rs,t),i(s,w,t),e(w,Wa),Js.m(Ye,w),e(w,Ks),Qs.m(Ze,w),e(w,Us),Vs.m(st,w),e(w,Ys),i(s,Zs,t),i(s,L,t),i(s,sa,t),i(s,es,t),e(es,Aa),i(s,aa,t),i(s,ts,t),e(ts,Sa),i(s,ea,t),i(s,C,t),i(s,ta,t),i(s,ns,t),e(ns,Ba),i(s,na,t),i(s,q,t),e(q,G),e(G,ds),K(F,ds,null),e(q,Da),e(q,gs),e(gs,Fa),i(s,la,t),i(s,ls,t),e(ls,Na),i(s,ia,t),K(N,s,t),i(s,pa,t),i(s,is,t),e(is,Oa),i(s,oa,t),K(O,s,t),i(s,ma,t),i(s,g,t),e(g,Xa),e(g,us),e(us,Ha),e(g,Ra),e(g,fs),e(fs,Ja),e(g,Ka),e(g,ws),e(ws,Qa),e(g,Ua),e(g,vs),e(vs,Va),e(g,Ya),i(s,ra,t),K(X,s,t),i(s,ha,t),i(s,ps,t),e(ps,Za),i(s,ca,t),i(s,d,t),e(d,se),e(d,ys),e(ys,ae),e(d,ee),e(d,xs),e(xs,te),e(d,ne),e(d,bs),e(bs,le),e(d,ie),e(d,_s),e(_s,pe),e(d,oe),e(d,ks),e(ks,me),e(d,re),da=!0},p:lt,i(s){da||(Q(A.$$.fragment,s),Q(S.$$.fragment,s),Q(D.$$.fragment,s),Q(F.$$.fragment,s),Q(N.$$.fragment,s),Q(O.$$.fragment,s),Q(X.$$.fragment,s),da=!0)},o(s){U(A.$$.fragment,s),U(S.$$.fragment,s),U(D.$$.fragment,s),U(F.$$.fragment,s),U(N.$$.fragment,s),U(O.$$.fragment,s),U(X.$$.fragment,s),da=!1},d(s){a(b),s&&a(Y),s&&a(u),V(A),s&&a(Ps),V(S,s),s&&a(zs),s&&a($),s&&a(Ts),s&&a(x),s&&a(Ls),s&&a(_),s&&a(As),s&&a(E),s&&a(Ss),s&&a(T),V(D),s&&a(Bs),s&&a(ss),s&&a(Ds),s&&a(j),s&&a(Fs),s&&a(f),s&&a(Rs),s&&a(w),s&&a(Zs),s&&a(L),s&&a(sa),s&&a(es),s&&a(aa),s&&a(ts),s&&a(ea),s&&a(C),s&&a(ta),s&&a(ns),s&&a(na),s&&a(q),V(F),s&&a(la),s&&a(ls),s&&a(ia),V(N,s),s&&a(pa),s&&a(is),s&&a(oa),V(O,s),s&&a(ma),s&&a(g),s&&a(ra),V(X,s),s&&a(ha),s&&a(ps),s&&a(ca),s&&a(d)}}}const ot={local:"perplexity-of-fixedlength-models",sections:[{local:"calculating-ppl-with-fixedlength-models",title:"Calculating PPL with fixed-length models"},{local:"example-calculating-perplexity-with-gpt2-in-transformers",title:"Example: Calculating perplexity with GPT-2 in \u{1F917} Transformers"}],title:"Perplexity of fixed-length models"};function mt(ya,b,Y){let{fw:u}=b;return ya.$$set=y=>{"fw"in y&&Y(0,u=y.fw)},[u]}class ut extends at{constructor(b){super();et(this,b,mt,pt,tt,{fw:0})}}export{ut as default,ot as metadata};
260
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/converting_tensorflow_models.mdx-9367eeca.js
import{S as Un,i as jn,s as Kn,e as r,k as c,w as u,t as s,M as Mn,c as n,d as t,m as h,a,x as d,h as l,b as f,F as o,g as p,y as v,q as T,o as E,B as $}from"../chunks/vendor-4833417e.js";import{T as Yn}from"../chunks/Tip-fffd6df1.js";import{I as M}from"../chunks/IconCopyLink-4b81c553.js";import{C as he}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function qn(qe){let _,I,m,A,L,w,k,x,S,Y,O;return{c(){_=r("p"),I=s("Since 2.3.0 the conversion script is now part of the transformers CLI ("),m=r("strong"),A=s("transformers-cli"),L=s(`) available in any transformers >= 2.3.0 installation.`),w=c(),k=r("p"),x=s("The documentation below reflects the "),S=r("strong"),Y=s("transformers-cli convert"),O=s(" command format.")},l(b){_=n(b,"P",{});var y=a(_);I=l(y,"Since 2.3.0 the conversion script is now part of the transformers CLI ("),m=n(y,"STRONG",{});var Ge=a(m);A=l(Ge,"transformers-cli"),Ge.forEach(t),L=l(y,`) available in any transformers >= 2.3.0 installation.`),y.forEach(t),w=h(b),k=n(b,"P",{});var q=a(k);x=l(q,"The documentation below reflects the "),S=n(q,"STRONG",{});var _e=a(S);Y=l(_e,"transformers-cli convert"),_e.forEach(t),O=l(q," command format."),q.forEach(t)},m(b,y){p(b,_,y),o(_,I),o(_,m),o(m,A),o(_,L),p(b,w,y),p(b,k,y),o(k,x),o(k,S),o(S,Y),o(k,O)},d(b){b&&t(_),b&&t(w),b&&t(k)}}}function Jn(qe){let _,I,m,A,L,w,k,x,S,Y,O,b,y,Ge,q,_e,J,At,F,z,Je,me,$o,ze,Po,bt,C,Ao,ue,bo,yo,de,go,wo,yt,P,ko,Qe,Oo,No,Ve,Io,Co,We,Ho,Ro,Xe,Lo,xo,ve,So,Fo,gt,g,Do,Ze,Bo,Go,et,Xo,Uo,tt,jo,Ko,ot,Mo,Yo,wt,Q,qo,rt,Jo,zo,kt,V,Qo,nt,Vo,Wo,Ot,Te,Nt,W,Zo,Ee,er,tr,It,D,Z,at,$e,or,st,rr,Ct,ee,nr,Pe,ar,sr,Ht,H,lr,lt,ir,pr,it,fr,cr,Rt,te,hr,pt,_r,mr,Lt,Ae,xt,oe,ur,be,dr,vr,St,B,re,ft,ye,Tr,ct,Er,Ft,ne,$r,ge,Pr,Ar,Dt,we,Bt,G,ae,ht,ke,br,_t,yr,Gt,se,gr,Oe,wr,kr,Xt,Ne,Ut,X,le,mt,Ie,Or,ut,Nr,jt,ie,Ir,Ce,Cr,Hr,Kt,He,Mt,U,pe,dt,Re,Rr,vt,Lr,Yt,Ue,xr,qt,Le,Jt,j,fe,Tt,xe,Sr,Et,Fr,zt,je,Dr,Qt,Se,Vt,K,ce,$t,Fe,Br,Pt,Gr,Wt,Ke,Xr,Zt,De,eo;return w=new M({}),J=new Yn({props:{$$slots:{default:[qn]},$$scope:{ctx:qe}}}),me=new M({}),Te=new he({props:{code:`export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 transformers-cli convert --model_type bert \\ --tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \\ --config $BERT_BASE_DIR/bert_config.json \\ --pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin`,highlighted:`<span class="hljs-built_in">export</span> BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 transformers-cli convert --model_type bert \\ --tf_checkpoint <span class="hljs-variable">$BERT_BASE_DIR</span>/bert_model.ckpt \\ --config <span class="hljs-variable">$BERT_BASE_DIR</span>/bert_config.json \\ --pytorch_dump_output <span class="hljs-variable">$BERT_BASE_DIR</span>/pytorch_model.bin`}}),$e=new M({}),Ae=new he({props:{code:`export ALBERT_BASE_DIR=/path/to/albert/albert_base transformers-cli convert --model_type albert \\ --tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \\ --config $ALBERT_BASE_DIR/albert_config.json \\ --pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin`,highlighted:`<span class="hljs-built_in">export</span> ALBERT_BASE_DIR=/path/to/albert/albert_base transformers-cli convert --model_type albert \\ --tf_checkpoint <span class="hljs-variable">$ALBERT_BASE_DIR</span>/model.ckpt-best \\ --config <span class="hljs-variable">$ALBERT_BASE_DIR</span>/albert_config.json \\ --pytorch_dump_output <span class="hljs-variable">$ALBERT_BASE_DIR</span>/pytorch_model.bin`}}),ye=new M({}),we=new he({props:{code:`export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights transformers-cli convert --model_type gpt \\ --tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \\ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \\ [--config OPENAI_GPT_CONFIG] \\ [--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \\`,highlighted:`<span class="hljs-built_in">export</span> OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights transformers-cli convert --model_type gpt \\ --tf_checkpoint <span class="hljs-variable">$OPENAI_GPT_CHECKPOINT_FOLDER_PATH</span> \\ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \\ [--config OPENAI_GPT_CONFIG] \\ [--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \\`}}),ke=new M({}),Ne=new he({props:{code:`export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights transformers-cli convert --model_type gpt2 \\ --tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \\ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \\ [--config OPENAI_GPT2_CONFIG] \\ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]`,highlighted:`<span class="hljs-built_in">export</span> OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights transformers-cli convert --model_type gpt2 \\ --tf_checkpoint <span class="hljs-variable">$OPENAI_GPT2_CHECKPOINT_PATH</span> \\ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \\ [--config OPENAI_GPT2_CONFIG] \\ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]`}}),Ie=new M({}),He=new he({props:{code:`export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint transformers-cli convert --model_type transfo_xl \\ --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \\ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \\ [--config TRANSFO_XL_CONFIG] \\ [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]`,highlighted:`<span class="hljs-built_in">export</span> TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint transformers-cli convert --model_type transfo_xl \\ --tf_checkpoint <span class="hljs-variable">$TRANSFO_XL_CHECKPOINT_FOLDER_PATH</span> \\ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \\ [--config TRANSFO_XL_CONFIG] \\ [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]`}}),Re=new M({}),Le=new he({props:{code:`export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config transformers-cli convert --model_type xlnet \\ --tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \\ --config $TRANSFO_XL_CONFIG_PATH \\ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \\ [--finetuning_task_name XLNET_FINETUNED_TASK] \\`,highlighted:`<span class="hljs-built_in">export</span> TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint <span class="hljs-built_in">export</span> TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config transformers-cli convert --model_type xlnet \\ --tf_checkpoint <span class="hljs-variable">$TRANSFO_XL_CHECKPOINT_PATH</span> \\ --config <span class="hljs-variable">$TRANSFO_XL_CONFIG_PATH</span> \\ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \\ [--finetuning_task_name XLNET_FINETUNED_TASK] \\`}}),xe=new M({}),Se=new he({props:{code:`export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint transformers-cli convert --model_type xlm \\ --tf_checkpoint $XLM_CHECKPOINT_PATH \\ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT [--config XML_CONFIG] \\ [--finetuning_task_name XML_FINETUNED_TASK]`,highlighted:`<span class="hljs-built_in">export</span> XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint transformers-cli convert --model_type xlm \\ --tf_checkpoint <span class="hljs-variable">$XLM_CHECKPOINT_PATH</span> \\ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> [--config XML_CONFIG] \\ [--finetuning_task_name XML_FINETUNED_TASK]`}}),Fe=new M({}),De=new he({props:{code:`export T5=/path/to/t5/uncased_L-12_H-768_A-12 transformers-cli convert --model_type t5 \\ --tf_checkpoint $T5/t5_model.ckpt \\ --config $T5/t5_config.json \\ --pytorch_dump_output $T5/pytorch_model.bin`,highlighted:`<span class="hljs-built_in">export</span> T5=/path/to/t5/uncased_L-12_H-768_A-12 transformers-cli convert --model_type t5 \\ --tf_checkpoint <span class="hljs-variable">$T5</span>/t5_model.ckpt \\ --config <span class="hljs-variable">$T5</span>/t5_config.json \\ --pytorch_dump_output <span class="hljs-variable">$T5</span>/pytorch_model.bin`}}),{c(){_=r("meta"),I=c(),m=r("h1"),A=r("a"),L=r("span"),u(w.$$.fragment),k=c(),x=r("span"),S=s("Converting Tensorflow Checkpoints"),Y=c(),O=r("p"),b=s(`A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models that can be loaded using the `),y=r("code"),Ge=s("from_pretrained"),q=s(" methods of the library."),_e=c(),u(J.$$.fragment),At=c(),F=r("h2"),z=r("a"),Je=r("span"),u(me.$$.fragment),$o=c(),ze=r("span"),Po=s("BERT"),bt=c(),C=r("p"),Ao=s("You can convert any TensorFlow checkpoint for BERT (in particular "),ue=r("a"),bo=s("the pre-trained models released by Google"),yo=s(`) in a PyTorch save file by using the `),de=r("a"),go=s("convert_bert_original_tf_checkpoint_to_pytorch.py"),wo=s(" script."),yt=c(),P=r("p"),ko=s("This CLI takes as input a TensorFlow checkpoint (three files starting with "),Qe=r("code"),Oo=s("bert_model.ckpt"),No=s(`) and the associated configuration file (`),Ve=r("code"),Io=s("bert_config.json"),Co=s(`), and creates a PyTorch model for this configuration, loads the weights from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can be imported using `),We=r("code"),Ho=s("from_pretrained()"),Ro=s(" (see example in "),Xe=r("a"),Lo=s("quicktour"),xo=s(" , "),ve=r("a"),So=s("run_glue.py"),Fo=s(" )."),gt=c(),g=r("p"),Do=s("You only need to run this conversion script "),Ze=r("strong"),Bo=s("once"),Go=s(` to get a PyTorch model. You can then disregard the TensorFlow checkpoint (the three files starting with `),et=r("code"),Xo=s("bert_model.ckpt"),Uo=s(`) but be sure to keep the configuration file (\\ `),tt=r("code"),jo=s("bert_config.json"),Ko=s(") and the vocabulary file ("),ot=r("code"),Mo=s("vocab.txt"),Yo=s(") as these are needed for the PyTorch model too."),wt=c(),Q=r("p"),qo=s("To run this specific conversion script you will need to have TensorFlow and PyTorch installed ("),rt=r("code"),Jo=s("pip install tensorflow"),zo=s("). The rest of the repository only requires PyTorch."),kt=c(),V=r("p"),Qo=s("Here is an example of the conversion process for a pre-trained "),nt=r("code"),Vo=s("BERT-Base Uncased"),Wo=s(" model:"),Ot=c(),u(Te.$$.fragment),Nt=c(),W=r("p"),Zo=s("You can download Google\u2019s pre-trained models for the conversion "),Ee=r("a"),er=s("here"),tr=s("."),It=c(),D=r("h2"),Z=r("a"),at=r("span"),u($e.$$.fragment),or=c(),st=r("span"),rr=s("ALBERT"),Ct=c(),ee=r("p"),nr=s(`Convert TensorFlow model checkpoints of ALBERT to PyTorch using the `),Pe=r("a"),ar=s("convert_albert_original_tf_checkpoint_to_pytorch.py"),sr=s(" script."),Ht=c(),H=r("p"),lr=s("The CLI takes as input a TensorFlow checkpoint (three files starting with "),lt=r("code"),ir=s("model.ckpt-best"),pr=s(`) and the accompanying configuration file (`),it=r("code"),fr=s("albert_config.json"),cr=s(`), then creates and saves a PyTorch model. To run this conversion you will need to have TensorFlow and PyTorch installed.`),Rt=c(),te=r("p"),hr=s("Here is an example of the conversion process for the pre-trained "),pt=r("code"),_r=s("ALBERT Base"),mr=s(" model:"),Lt=c(),u(Ae.$$.fragment),xt=c(),oe=r("p"),ur=s("You can download Google\u2019s pre-trained models for the conversion "),be=r("a"),dr=s("here"),vr=s("."),St=c(),B=r("h2"),re=r("a"),ft=r("span"),u(ye.$$.fragment),Tr=c(),ct=r("span"),Er=s("OpenAI GPT"),Ft=c(),ne=r("p"),$r=s(`Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint save as the same format than OpenAI pretrained model (see `),ge=r("a"),Pr=s("here"),Ar=s(`\\ )`),Dt=c(),u(we.$$.fragment),Bt=c(),G=r("h2"),ae=r("a"),ht=r("span"),u(ke.$$.fragment),br=c(),_t=r("span"),yr=s("OpenAI GPT-2"),Gt=c(),se=r("p"),gr=s("Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see "),Oe=r("a"),wr=s("here"),kr=s(")"),Xt=c(),u(Ne.$$.fragment),Ut=c(),X=r("h2"),le=r("a"),mt=r("span"),u(Ie.$$.fragment),Or=c(),ut=r("span"),Nr=s("Transformer-XL"),jt=c(),ie=r("p"),Ir=s("Here is an example of the conversion process for a pre-trained Transformer-XL model (see "),Ce=r("a"),Cr=s("here"),Hr=s(")"),Kt=c(),u(He.$$.fragment),Mt=c(),U=r("h2"),pe=r("a"),dt=r("span"),u(Re.$$.fragment),Rr=c(),vt=r("span"),Lr=s("XLNet"),Yt=c(),Ue=r("p"),xr=s("Here is an example of the conversion process for a pre-trained XLNet model:"),qt=c(),u(Le.$$.fragment),Jt=c(),j=r("h2"),fe=r("a"),Tt=r("span"),u(xe.$$.fragment),Sr=c(),Et=r("span"),Fr=s("XLM"),zt=c(),je=r("p"),Dr=s("Here is an example of the conversion process for a pre-trained XLM model:"),Qt=c(),u(Se.$$.fragment),Vt=c(),K=r("h2"),ce=r("a"),$t=r("span"),u(Fe.$$.fragment),Br=c(),Pt=r("span"),Gr=s("T5"),Wt=c(),Ke=r("p"),Xr=s("Here is an example of the conversion process for a pre-trained T5 model:"),Zt=c(),u(De.$$.fragment),this.h()},l(e){const i=Mn('[data-svelte="svelte-1phssyn"]',document.head);_=n(i,"META",{name:!0,content:!0}),i.forEach(t),I=h(e),m=n(e,"H1",{class:!0});var Be=a(m);A=n(Be,"A",{id:!0,class:!0,href:!0});var Ur=a(A);L=n(Ur,"SPAN",{});var jr=a(L);d(w.$$.fragment,jr),jr.forEach(t),Ur.forEach(t),k=h(Be),x=n(Be,"SPAN",{});var Kr=a(x);S=l(Kr,"Converting Tensorflow Checkpoints"),Kr.forEach(t),Be.forEach(t),Y=h(e),O=n(e,"P",{});var to=a(O);b=l(to,`A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models that can be loaded using the `),y=n(to,"CODE",{});var Mr=a(y);Ge=l(Mr,"from_pretrained"),Mr.forEach(t),q=l(to," methods of the library."),to.forEach(t),_e=h(e),d(J.$$.fragment,e),At=h(e),F=n(e,"H2",{class:!0});var oo=a(F);z=n(oo,"A",{id:!0,class:!0,href:!0});var Yr=a(z);Je=n(Yr,"SPAN",{});var qr=a(Je);d(me.$$.fragment,qr),qr.forEach(t),Yr.forEach(t),$o=h(oo),ze=n(oo,"SPAN",{});var Jr=a(ze);Po=l(Jr,"BERT"),Jr.forEach(t),oo.forEach(t),bt=h(e),C=n(e,"P",{});var Me=a(C);Ao=l(Me,"You can convert any TensorFlow checkpoint for BERT (in particular "),ue=n(Me,"A",{href:!0,rel:!0});var zr=a(ue);bo=l(zr,"the pre-trained models released by Google"),zr.forEach(t),yo=l(Me,`) in a PyTorch save file by using the `),de=n(Me,"A",{href:!0,rel:!0});var Qr=a(de);go=l(Qr,"convert_bert_original_tf_checkpoint_to_pytorch.py"),Qr.forEach(t),wo=l(Me," script."),Me.forEach(t),yt=h(e),P=n(e,"P",{});var N=a(P);ko=l(N,"This CLI takes as input a TensorFlow checkpoint (three files starting with "),Qe=n(N,"CODE",{});var Vr=a(Qe);Oo=l(Vr,"bert_model.ckpt"),Vr.forEach(t),No=l(N,`) and the associated configuration file (`),Ve=n(N,"CODE",{});var Wr=a(Ve);Io=l(Wr,"bert_config.json"),Wr.forEach(t),Co=l(N,`), and creates a PyTorch model for this configuration, loads the weights from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can be imported using `),We=n(N,"CODE",{});var Zr=a(We);Ho=l(Zr,"from_pretrained()"),Zr.forEach(t),Ro=l(N," (see example in "),Xe=n(N,"A",{href:!0});var en=a(Xe);Lo=l(en,"quicktour"),en.forEach(t),xo=l(N," , "),ve=n(N,"A",{href:!0,rel:!0});var tn=a(ve);So=l(tn,"run_glue.py"),tn.forEach(t),Fo=l(N," )."),N.forEach(t),gt=h(e),g=n(e,"P",{});var R=a(g);Do=l(R,"You only need to run this conversion script "),Ze=n(R,"STRONG",{});var on=a(Ze);Bo=l(on,"once"),on.forEach(t),Go=l(R,` to get a PyTorch model. You can then disregard the TensorFlow checkpoint (the three files starting with `),et=n(R,"CODE",{});var rn=a(et);Xo=l(rn,"bert_model.ckpt"),rn.forEach(t),Uo=l(R,`) but be sure to keep the configuration file (\\ `),tt=n(R,"CODE",{});var nn=a(tt);jo=l(nn,"bert_config.json"),nn.forEach(t),Ko=l(R,") and the vocabulary file ("),ot=n(R,"CODE",{});var an=a(ot);Mo=l(an,"vocab.txt"),an.forEach(t),Yo=l(R,") as these are needed for the PyTorch model too."),R.forEach(t),wt=h(e),Q=n(e,"P",{});var ro=a(Q);qo=l(ro,"To run this specific conversion script you will need to have TensorFlow and PyTorch installed ("),rt=n(ro,"CODE",{});var sn=a(rt);Jo=l(sn,"pip install tensorflow"),sn.forEach(t),zo=l(ro,"). The rest of the repository only requires PyTorch."),ro.forEach(t),kt=h(e),V=n(e,"P",{});var no=a(V);Qo=l(no,"Here is an example of the conversion process for a pre-trained "),nt=n(no,"CODE",{});var ln=a(nt);Vo=l(ln,"BERT-Base Uncased"),ln.forEach(t),Wo=l(no," model:"),no.forEach(t),Ot=h(e),d(Te.$$.fragment,e),Nt=h(e),W=n(e,"P",{});var ao=a(W);Zo=l(ao,"You can download Google\u2019s pre-trained models for the conversion "),Ee=n(ao,"A",{href:!0,rel:!0});var pn=a(Ee);er=l(pn,"here"),pn.forEach(t),tr=l(ao,"."),ao.forEach(t),It=h(e),D=n(e,"H2",{class:!0});var so=a(D);Z=n(so,"A",{id:!0,class:!0,href:!0});var fn=a(Z);at=n(fn,"SPAN",{});var cn=a(at);d($e.$$.fragment,cn),cn.forEach(t),fn.forEach(t),or=h(so),st=n(so,"SPAN",{});var hn=a(st);rr=l(hn,"ALBERT"),hn.forEach(t),so.forEach(t),Ct=h(e),ee=n(e,"P",{});var lo=a(ee);nr=l(lo,`Convert TensorFlow model checkpoints of ALBERT to PyTorch using the `),Pe=n(lo,"A",{href:!0,rel:!0});var _n=a(Pe);ar=l(_n,"convert_albert_original_tf_checkpoint_to_pytorch.py"),_n.forEach(t),sr=l(lo," script."),lo.forEach(t),Ht=h(e),H=n(e,"P",{});var Ye=a(H);lr=l(Ye,"The CLI takes as input a TensorFlow checkpoint (three files starting with "),lt=n(Ye,"CODE",{});var mn=a(lt);ir=l(mn,"model.ckpt-best"),mn.forEach(t),pr=l(Ye,`) and the accompanying configuration file (`),it=n(Ye,"CODE",{});var un=a(it);fr=l(un,"albert_config.json"),un.forEach(t),cr=l(Ye,`), then creates and saves a PyTorch model. To run this conversion you will need to have TensorFlow and PyTorch installed.`),Ye.forEach(t),Rt=h(e),te=n(e,"P",{});var io=a(te);hr=l(io,"Here is an example of the conversion process for the pre-trained "),pt=n(io,"CODE",{});var dn=a(pt);_r=l(dn,"ALBERT Base"),dn.forEach(t),mr=l(io," model:"),io.forEach(t),Lt=h(e),d(Ae.$$.fragment,e),xt=h(e),oe=n(e,"P",{});var po=a(oe);ur=l(po,"You can download Google\u2019s pre-trained models for the conversion "),be=n(po,"A",{href:!0,rel:!0});var vn=a(be);dr=l(vn,"here"),vn.forEach(t),vr=l(po,"."),po.forEach(t),St=h(e),B=n(e,"H2",{class:!0});var fo=a(B);re=n(fo,"A",{id:!0,class:!0,href:!0});var Tn=a(re);ft=n(Tn,"SPAN",{});var En=a(ft);d(ye.$$.fragment,En),En.forEach(t),Tn.forEach(t),Tr=h(fo),ct=n(fo,"SPAN",{});var $n=a(ct);Er=l($n,"OpenAI GPT"),$n.forEach(t),fo.forEach(t),Ft=h(e),ne=n(e,"P",{});var co=a(ne);$r=l(co,`Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint save as the same format than OpenAI pretrained model (see `),ge=n(co,"A",{href:!0,rel:!0});var Pn=a(ge);Pr=l(Pn,"here"),Pn.forEach(t),Ar=l(co,`\\ )`),co.forEach(t),Dt=h(e),d(we.$$.fragment,e),Bt=h(e),G=n(e,"H2",{class:!0});var ho=a(G);ae=n(ho,"A",{id:!0,class:!0,href:!0});var An=a(ae);ht=n(An,"SPAN",{});var bn=a(ht);d(ke.$$.fragment,bn),bn.forEach(t),An.forEach(t),br=h(ho),_t=n(ho,"SPAN",{});var yn=a(_t);yr=l(yn,"OpenAI GPT-2"),yn.forEach(t),ho.forEach(t),Gt=h(e),se=n(e,"P",{});var _o=a(se);gr=l(_o,"Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see "),Oe=n(_o,"A",{href:!0,rel:!0});var gn=a(Oe);wr=l(gn,"here"),gn.forEach(t),kr=l(_o,")"),_o.forEach(t),Xt=h(e),d(Ne.$$.fragment,e),Ut=h(e),X=n(e,"H2",{class:!0});var mo=a(X);le=n(mo,"A",{id:!0,class:!0,href:!0});var wn=a(le);mt=n(wn,"SPAN",{});var kn=a(mt);d(Ie.$$.fragment,kn),kn.forEach(t),wn.forEach(t),Or=h(mo),ut=n(mo,"SPAN",{});var On=a(ut);Nr=l(On,"Transformer-XL"),On.forEach(t),mo.forEach(t),jt=h(e),ie=n(e,"P",{});var uo=a(ie);Ir=l(uo,"Here is an example of the conversion process for a pre-trained Transformer-XL model (see "),Ce=n(uo,"A",{href:!0,rel:!0});var Nn=a(Ce);Cr=l(Nn,"here"),Nn.forEach(t),Hr=l(uo,")"),uo.forEach(t),Kt=h(e),d(He.$$.fragment,e),Mt=h(e),U=n(e,"H2",{class:!0});var vo=a(U);pe=n(vo,"A",{id:!0,class:!0,href:!0});var In=a(pe);dt=n(In,"SPAN",{});var Cn=a(dt);d(Re.$$.fragment,Cn),Cn.forEach(t),In.forEach(t),Rr=h(vo),vt=n(vo,"SPAN",{});var Hn=a(vt);Lr=l(Hn,"XLNet"),Hn.forEach(t),vo.forEach(t),Yt=h(e),Ue=n(e,"P",{});var Rn=a(Ue);xr=l(Rn,"Here is an example of the conversion process for a pre-trained XLNet model:"),Rn.forEach(t),qt=h(e),d(Le.$$.fragment,e),Jt=h(e),j=n(e,"H2",{class:!0});var To=a(j);fe=n(To,"A",{id:!0,class:!0,href:!0});var Ln=a(fe);Tt=n(Ln,"SPAN",{});var xn=a(Tt);d(xe.$$.fragment,xn),xn.forEach(t),Ln.forEach(t),Sr=h(To),Et=n(To,"SPAN",{});var Sn=a(Et);Fr=l(Sn,"XLM"),Sn.forEach(t),To.forEach(t),zt=h(e),je=n(e,"P",{});var Fn=a(je);Dr=l(Fn,"Here is an example of the conversion process for a pre-trained XLM model:"),Fn.forEach(t),Qt=h(e),d(Se.$$.fragment,e),Vt=h(e),K=n(e,"H2",{class:!0});var Eo=a(K);ce=n(Eo,"A",{id:!0,class:!0,href:!0});var Dn=a(ce);$t=n(Dn,"SPAN",{});var Bn=a($t);d(Fe.$$.fragment,Bn),Bn.forEach(t),Dn.forEach(t),Br=h(Eo),Pt=n(Eo,"SPAN",{});var Gn=a(Pt);Gr=l(Gn,"T5"),Gn.forEach(t),Eo.forEach(t),Wt=h(e),Ke=n(e,"P",{});var Xn=a(Ke);Xr=l(Xn,"Here is an example of the conversion process for a pre-trained T5 model:"),Xn.forEach(t),Zt=h(e),d(De.$$.fragment,e),this.h()},h(){f(_,"name","hf:doc:metadata"),f(_,"content",JSON.stringify(zn)),f(A,"id","converting-tensorflow-checkpoints"),f(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(A,"href","#converting-tensorflow-checkpoints"),f(m,"class","relative group"),f(z,"id","bert"),f(z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(z,"href","#bert"),f(F,"class","relative group"),f(ue,"href","https://github.com/google-research/bert#pre-trained-models"),f(ue,"rel","nofollow"),f(de,"href","https://github.com/huggingface/transformers/tree/master/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py"),f(de,"rel","nofollow"),f(Xe,"href","quicktour"),f(ve,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification/run_glue.py"),f(ve,"rel","nofollow"),f(Ee,"href","https://github.com/google-research/bert#pre-trained-models"),f(Ee,"rel","nofollow"),f(Z,"id","albert"),f(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Z,"href","#albert"),f(D,"class","relative group"),f(Pe,"href","https://github.com/huggingface/transformers/tree/master/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py"),f(Pe,"rel","nofollow"),f(be,"href","https://github.com/google-research/albert#pre-trained-models"),f(be,"rel","nofollow"),f(re,"id","openai-gpt"),f(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(re,"href","#openai-gpt"),f(B,"class","relative group"),f(ge,"href","https://github.com/openai/finetune-transformer-lm"),f(ge,"rel","nofollow"),f(ae,"id","openai-gpt2"),f(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ae,"href","#openai-gpt2"),f(G,"class","relative group"),f(Oe,"href","https://github.com/openai/gpt-2"),f(Oe,"rel","nofollow"),f(le,"id","transformerxl"),f(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(le,"href","#transformerxl"),f(X,"class","relative group"),f(Ce,"href","https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models"),f(Ce,"rel","nofollow"),f(pe,"id","xlnet"),f(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(pe,"href","#xlnet"),f(U,"class","relative group"),f(fe,"id","xlm"),f(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(fe,"href","#xlm"),f(j,"class","relative group"),f(ce,"id","t5"),f(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ce,"href","#t5"),f(K,"class","relative group")},m(e,i){o(document.head,_),p(e,I,i),p(e,m,i),o(m,A),o(A,L),v(w,L,null),o(m,k),o(m,x),o(x,S),p(e,Y,i),p(e,O,i),o(O,b),o(O,y),o(y,Ge),o(O,q),p(e,_e,i),v(J,e,i),p(e,At,i),p(e,F,i),o(F,z),o(z,Je),v(me,Je,null),o(F,$o),o(F,ze),o(ze,Po),p(e,bt,i),p(e,C,i),o(C,Ao),o(C,ue),o(ue,bo),o(C,yo),o(C,de),o(de,go),o(C,wo),p(e,yt,i),p(e,P,i),o(P,ko),o(P,Qe),o(Qe,Oo),o(P,No),o(P,Ve),o(Ve,Io),o(P,Co),o(P,We),o(We,Ho),o(P,Ro),o(P,Xe),o(Xe,Lo),o(P,xo),o(P,ve),o(ve,So),o(P,Fo),p(e,gt,i),p(e,g,i),o(g,Do),o(g,Ze),o(Ze,Bo),o(g,Go),o(g,et),o(et,Xo),o(g,Uo),o(g,tt),o(tt,jo),o(g,Ko),o(g,ot),o(ot,Mo),o(g,Yo),p(e,wt,i),p(e,Q,i),o(Q,qo),o(Q,rt),o(rt,Jo),o(Q,zo),p(e,kt,i),p(e,V,i),o(V,Qo),o(V,nt),o(nt,Vo),o(V,Wo),p(e,Ot,i),v(Te,e,i),p(e,Nt,i),p(e,W,i),o(W,Zo),o(W,Ee),o(Ee,er),o(W,tr),p(e,It,i),p(e,D,i),o(D,Z),o(Z,at),v($e,at,null),o(D,or),o(D,st),o(st,rr),p(e,Ct,i),p(e,ee,i),o(ee,nr),o(ee,Pe),o(Pe,ar),o(ee,sr),p(e,Ht,i),p(e,H,i),o(H,lr),o(H,lt),o(lt,ir),o(H,pr),o(H,it),o(it,fr),o(H,cr),p(e,Rt,i),p(e,te,i),o(te,hr),o(te,pt),o(pt,_r),o(te,mr),p(e,Lt,i),v(Ae,e,i),p(e,xt,i),p(e,oe,i),o(oe,ur),o(oe,be),o(be,dr),o(oe,vr),p(e,St,i),p(e,B,i),o(B,re),o(re,ft),v(ye,ft,null),o(B,Tr),o(B,ct),o(ct,Er),p(e,Ft,i),p(e,ne,i),o(ne,$r),o(ne,ge),o(ge,Pr),o(ne,Ar),p(e,Dt,i),v(we,e,i),p(e,Bt,i),p(e,G,i),o(G,ae),o(ae,ht),v(ke,ht,null),o(G,br),o(G,_t),o(_t,yr),p(e,Gt,i),p(e,se,i),o(se,gr),o(se,Oe),o(Oe,wr),o(se,kr),p(e,Xt,i),v(Ne,e,i),p(e,Ut,i),p(e,X,i),o(X,le),o(le,mt),v(Ie,mt,null),o(X,Or),o(X,ut),o(ut,Nr),p(e,jt,i),p(e,ie,i),o(ie,Ir),o(ie,Ce),o(Ce,Cr),o(ie,Hr),p(e,Kt,i),v(He,e,i),p(e,Mt,i),p(e,U,i),o(U,pe),o(pe,dt),v(Re,dt,null),o(U,Rr),o(U,vt),o(vt,Lr),p(e,Yt,i),p(e,Ue,i),o(Ue,xr),p(e,qt,i),v(Le,e,i),p(e,Jt,i),p(e,j,i),o(j,fe),o(fe,Tt),v(xe,Tt,null),o(j,Sr),o(j,Et),o(Et,Fr),p(e,zt,i),p(e,je,i),o(je,Dr),p(e,Qt,i),v(Se,e,i),p(e,Vt,i),p(e,K,i),o(K,ce),o(ce,$t),v(Fe,$t,null),o(K,Br),o(K,Pt),o(Pt,Gr),p(e,Wt,i),p(e,Ke,i),o(Ke,Xr),p(e,Zt,i),v(De,e,i),eo=!0},p(e,[i]){const Be={};i&2&&(Be.$$scope={dirty:i,ctx:e}),J.$set(Be)},i(e){eo||(T(w.$$.fragment,e),T(J.$$.fragment,e),T(me.$$.fragment,e),T(Te.$$.fragment,e),T($e.$$.fragment,e),T(Ae.$$.fragment,e),T(ye.$$.fragment,e),T(we.$$.fragment,e),T(ke.$$.fragment,e),T(Ne.$$.fragment,e),T(Ie.$$.fragment,e),T(He.$$.fragment,e),T(Re.$$.fragment,e),T(Le.$$.fragment,e),T(xe.$$.fragment,e),T(Se.$$.fragment,e),T(Fe.$$.fragment,e),T(De.$$.fragment,e),eo=!0)},o(e){E(w.$$.fragment,e),E(J.$$.fragment,e),E(me.$$.fragment,e),E(Te.$$.fragment,e),E($e.$$.fragment,e),E(Ae.$$.fragment,e),E(ye.$$.fragment,e),E(we.$$.fragment,e),E(ke.$$.fragment,e),E(Ne.$$.fragment,e),E(Ie.$$.fragment,e),E(He.$$.fragment,e),E(Re.$$.fragment,e),E(Le.$$.fragment,e),E(xe.$$.fragment,e),E(Se.$$.fragment,e),E(Fe.$$.fragment,e),E(De.$$.fragment,e),eo=!1},d(e){t(_),e&&t(I),e&&t(m),$(w),e&&t(Y),e&&t(O),e&&t(_e),$(J,e),e&&t(At),e&&t(F),$(me),e&&t(bt),e&&t(C),e&&t(yt),e&&t(P),e&&t(gt),e&&t(g),e&&t(wt),e&&t(Q),e&&t(kt),e&&t(V),e&&t(Ot),$(Te,e),e&&t(Nt),e&&t(W),e&&t(It),e&&t(D),$($e),e&&t(Ct),e&&t(ee),e&&t(Ht),e&&t(H),e&&t(Rt),e&&t(te),e&&t(Lt),$(Ae,e),e&&t(xt),e&&t(oe),e&&t(St),e&&t(B),$(ye),e&&t(Ft),e&&t(ne),e&&t(Dt),$(we,e),e&&t(Bt),e&&t(G),$(ke),e&&t(Gt),e&&t(se),e&&t(Xt),$(Ne,e),e&&t(Ut),e&&t(X),$(Ie),e&&t(jt),e&&t(ie),e&&t(Kt),$(He,e),e&&t(Mt),e&&t(U),$(Re),e&&t(Yt),e&&t(Ue),e&&t(qt),$(Le,e),e&&t(Jt),e&&t(j),$(xe),e&&t(zt),e&&t(je),e&&t(Qt),$(Se,e),e&&t(Vt),e&&t(K),$(Fe),e&&t(Wt),e&&t(Ke),e&&t(Zt),$(De,e)}}}const zn={local:"converting-tensorflow-checkpoints",sections:[{local:"bert",title:"BERT"},{local:"albert",title:"ALBERT"},{local:"openai-gpt",title:"OpenAI GPT"},{local:"openai-gpt2",title:"OpenAI GPT-2"},{local:"transformerxl",title:"Transformer-XL"},{local:"xlnet",title:"XLNet"},{local:"xlm",title:"XLM"},{local:"t5",title:"T5"}],title:"Converting Tensorflow Checkpoints"};function Qn(qe,_,I){let{fw:m}=_;return qe.$$set=A=>{"fw"in A&&I(0,m=A.fw)},[m]}class oa extends Un{constructor(_){super();jn(this,_,Qn,Jn,Kn,{fw:0})}}export{oa as default,zn as metadata};
261
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/add_new_pipeline.mdx-86ba54ca.js
import{S as sl,i as tl,s as al,e as l,k as d,w as re,t as a,M as ol,c as r,d as t,m as c,a as n,x as ne,h as o,b as f,F as e,g as p,y as ie,L as ll,q as pe,o as de,B as ce}from"../chunks/vendor-4833417e.js";import{I as Qa}from"../chunks/IconCopyLink-4b81c553.js";import{C as Va}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function rl(Js){let j,ee,m,g,he,B,Bs,ue,Gs,ms,q,Rs,fe,Ks,Qs,me,Vs,Ws,_s,_,Zs,_e,et,st,we,tt,at,ye,ot,lt,ws,h,rt,ke,nt,it,ve,pt,dt,ge,ct,ht,Ee,ut,ft,be,mt,_t,ys,G,ks,se,wt,vs,O,je,yt,kt,qe,vt,gt,gs,E,Ce,Et,bt,De,jt,qt,Te,Ct,Dt,Es,P,Oe,Tt,Ot,Pe,Pt,$t,bs,b,$e,At,xt,Ae,St,It,xe,zt,Nt,js,u,Lt,Se,Ut,Ht,Ie,Mt,Yt,ze,Xt,Ft,Ne,Jt,Bt,qs,x,Gt,Le,Rt,Kt,Cs,R,Ds,w,Qt,Ue,Vt,Wt,He,Zt,ea,Me,sa,ta,Ts,K,Os,te,aa,Ps,$,S,Ye,Q,oa,Xe,la,$s,C,ra,Fe,na,ia,Je,pa,da,As,A,I,Be,V,ca,Ge,ha,xs,z,ua,Re,fa,ma,Ss,y,_a,Ke,wa,ya,Qe,ka,va,Ve,ga,Ea,Is,D,ba,We,ja,qa,Ze,Ca,Da,zs,N,Ta,es,Oa,Pa,Ns,k,L,ss,$a,Aa,ts,xa,Sa,Ia,U,as,za,Na,os,La,Ua,Ha,H,ls,Ma,Ya,rs,Xa,Fa,Ja,M,ns,Ba,Ga,is,Ra,Ka,Ls;return B=new Qa({}),G=new Va({props:{code:`from transformers import Pipeline class MyPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, inputs, maybe_arg=2): model_input = Tensor(inputs["input_ids"]) return {"model_input": model_input} def _forward(self, model_inputs): # model_inputs == {"model_input": model_input} outputs = self.model(**model_inputs) # Maybe {"logits": Tensor(...)} return outputs def postprocess(self, model_outputs): best_class = model_outputs["logits"].softmax(-1) return best_class`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Pipeline <span class="hljs-keyword">class</span> <span class="hljs-title class_">MyPipeline</span>(<span class="hljs-title class_ inherited__">Pipeline</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">_sanitize_parameters</span>(<span class="hljs-params">self, **kwargs</span>): preprocess_kwargs = {} <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;maybe_arg&quot;</span> <span class="hljs-keyword">in</span> kwargs: preprocess_kwargs[<span class="hljs-string">&quot;maybe_arg&quot;</span>] = kwargs[<span class="hljs-string">&quot;maybe_arg&quot;</span>] <span class="hljs-keyword">return</span> preprocess_kwargs, {}, {} <span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess</span>(<span class="hljs-params">self, inputs, maybe_arg=<span class="hljs-number">2</span></span>): model_input = Tensor(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-keyword">return</span> {<span class="hljs-string">&quot;model_input&quot;</span>: model_input} <span class="hljs-keyword">def</span> <span class="hljs-title function_">_forward</span>(<span class="hljs-params">self, model_inputs</span>): <span class="hljs-comment"># model_inputs == {&quot;model_input&quot;: model_input}</span> outputs = self.model(**model_inputs) <span class="hljs-comment"># Maybe {&quot;logits&quot;: Tensor(...)}</span> <span class="hljs-keyword">return</span> outputs <span class="hljs-keyword">def</span> <span class="hljs-title function_">postprocess</span>(<span class="hljs-params">self, model_outputs</span>): best_class = model_outputs[<span class="hljs-string">&quot;logits&quot;</span>].softmax(-<span class="hljs-number">1</span>) <span class="hljs-keyword">return</span> best_class`}}),R=new Va({props:{code:`pipe = pipeline("my-new-task") pipe("This is a test") pipe("This is a test", top_k=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;my-new-task&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This is a test&quot;</span>) [{<span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-string">&quot;1-star&quot;</span>, <span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-number">0.8</span>}, {<span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-string">&quot;2-star&quot;</span>, <span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-number">0.1</span>}, {<span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-string">&quot;3-star&quot;</span>, <span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-number">0.05</span>} {<span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-string">&quot;4-star&quot;</span>, <span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-number">0.025</span>}, {<span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-string">&quot;5-star&quot;</span>, <span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-number">0.025</span>}] <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This is a test&quot;</span>, top_k=<span class="hljs-number">2</span>) [{<span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-string">&quot;1-star&quot;</span>, <span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-number">0.8</span>}, {<span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-string">&quot;2-star&quot;</span>, <span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-number">0.1</span>}]`}}),K=new Va({props:{code:`def postprocess(self, model_outputs, top_k=5): best_class = model_outputs["logits"].softmax(-1) # Add logic to handle top_k return best_class def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] postprocess_kwargs = {} if "top_k" in kwargs: preprocess_kwargs["top_k"] = kwargs["top_k"] return preprocess_kwargs, {}, postprocess_kwargs`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">postprocess</span>(<span class="hljs-params">self, model_outputs, top_k=<span class="hljs-number">5</span></span>): best_class = model_outputs[<span class="hljs-string">&quot;logits&quot;</span>].softmax(-<span class="hljs-number">1</span>) <span class="hljs-comment"># Add logic to handle top_k</span> <span class="hljs-keyword">return</span> best_class <span class="hljs-keyword">def</span> <span class="hljs-title function_">_sanitize_parameters</span>(<span class="hljs-params">self, **kwargs</span>): preprocess_kwargs = {} <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;maybe_arg&quot;</span> <span class="hljs-keyword">in</span> kwargs: preprocess_kwargs[<span class="hljs-string">&quot;maybe_arg&quot;</span>] = kwargs[<span class="hljs-string">&quot;maybe_arg&quot;</span>] postprocess_kwargs = {} <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;top_k&quot;</span> <span class="hljs-keyword">in</span> kwargs: preprocess_kwargs[<span class="hljs-string">&quot;top_k&quot;</span>] = kwargs[<span class="hljs-string">&quot;top_k&quot;</span>] <span class="hljs-keyword">return</span> preprocess_kwargs, {}, postprocess_kwargs`}}),Q=new Qa({}),V=new Qa({}),{c(){j=l("meta"),ee=d(),m=l("h1"),g=l("a"),he=l("span"),re(B.$$.fragment),Bs=d(),ue=l("span"),Gs=a("How to add a pipeline to \u{1F917} Transformers?"),ms=d(),q=l("p"),Rs=a(`First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes, dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible as it makes compatibility easier (even through other languages via JSON). Those will be the `),fe=l("code"),Ks=a("inputs"),Qs=a(` of the pipeline (`),me=l("code"),Vs=a("preprocess"),Ws=a(")."),_s=d(),_=l("p"),Zs=a("Then define the "),_e=l("code"),et=a("outputs"),st=a(". Same policy as the "),we=l("code"),tt=a("inputs"),at=a(`. The simpler, the better. Those will be the outputs of `),ye=l("code"),ot=a("postprocess"),lt=a(" method."),ws=d(),h=l("p"),rt=a("Start by inheriting the base class "),ke=l("code"),nt=a("Pipeline"),it=a(". with the 4 methods needed to implement "),ve=l("code"),pt=a("preprocess"),dt=a(`, `),ge=l("code"),ct=a("_forward"),ht=a(", "),Ee=l("code"),ut=a("postprocess"),ft=a(" and "),be=l("code"),mt=a("_sanitize_parameters"),_t=a("."),ys=d(),re(G.$$.fragment),ks=d(),se=l("p"),wt=a(`The structure of this breakdown is to support relatively seamless support for CPU/GPU, while supporting doing pre/postprocessing on the CPU on different threads`),vs=d(),O=l("p"),je=l("code"),yt=a("preprocess"),kt=a(` will take the originally defined inputs, and turn them into something feedable to the model. It might contain more information and is usually a `),qe=l("code"),vt=a("Dict"),gt=a("."),gs=d(),E=l("p"),Ce=l("code"),Et=a("_forward"),bt=a(" is the implementation detail and is not meant to be called directly. "),De=l("code"),jt=a("forward"),qt=a(` is the preferred called method as it contains safeguards to make sure everything is working on the expected device. If anything is linked to a real model it belongs in the `),Te=l("code"),Ct=a("_forward"),Dt=a(" method, anything else is in the preprocess/postprocess."),Es=d(),P=l("p"),Oe=l("code"),Tt=a("postprocess"),Ot=a(" methods will take the output of "),Pe=l("code"),Pt=a("_forward"),$t=a(` and turn it into the final output that were decided earlier.`),bs=d(),b=l("p"),$e=l("code"),At=a("_sanitize_parameters"),xt=a(` exists to allow users to pass any parameters whenever they wish, be it at initialization time `),Ae=l("code"),St=a("pipeline(...., maybe_arg=4)"),It=a(" or at call time "),xe=l("code"),zt=a("pipe = pipeline(...); output = pipe(...., maybe_arg=4)"),Nt=a("."),js=d(),u=l("p"),Lt=a("The returns of "),Se=l("code"),Ut=a("_sanitize_parameters"),Ht=a(" are the 3 dicts of kwargs that will be passed directly to "),Ie=l("code"),Mt=a("preprocess"),Yt=a(`, `),ze=l("code"),Xt=a("_forward"),Ft=a(" and "),Ne=l("code"),Jt=a("postprocess"),Bt=a(`. Don\u2019t fill anything if the caller didn\u2019t call with any extra parameter. That allows to keep the default arguments in the function definition which is always more \u201Cnatural\u201D.`),qs=d(),x=l("p"),Gt=a("A classic example would be a "),Le=l("code"),Rt=a("top_k"),Kt=a(" argument in the post processing in classification tasks."),Cs=d(),re(R.$$.fragment),Ds=d(),w=l("p"),Qt=a("In order to achieve that, we\u2019ll update our "),Ue=l("code"),Vt=a("postprocess"),Wt=a(" method with a default parameter to "),He=l("code"),Zt=a("5"),ea=a(`. and edit `),Me=l("code"),sa=a("_sanitize_parameters"),ta=a(" to allow this new parameter."),Ts=d(),re(K.$$.fragment),Os=d(),te=l("p"),aa=a(`Try to keep the inputs/outputs very simple and ideally JSON-serializable as it makes the pipeline usage very easy without requiring users to understand new kind of objects. It\u2019s also relatively common to support many different types of arguments for ease of use (audio files, can be filenames, URLs or pure bytes)`),Ps=d(),$=l("h2"),S=l("a"),Ye=l("span"),re(Q.$$.fragment),oa=d(),Xe=l("span"),la=a("Adding it to the list of supported tasks"),$s=d(),C=l("p"),ra=a("Go to "),Fe=l("code"),na=a("src/transformers/pipelines/__init__.py"),ia=a(" and fill in "),Je=l("code"),pa=a("SUPPORTED_TASKS"),da=a(` with your newly created pipeline. If possible it should provide a default model.`),As=d(),A=l("h2"),I=l("a"),Be=l("span"),re(V.$$.fragment),ca=d(),Ge=l("span"),ha=a("Adding tests"),xs=d(),z=l("p"),ua=a("Create a new file "),Re=l("code"),fa=a("tests/test_pipelines_MY_PIPELINE.py"),ma=a(" with example with the other tests."),Ss=d(),y=l("p"),_a=a("The "),Ke=l("code"),wa=a("run_pipeline_test"),ya=a(` function will be very generic and run on small random models on every possible architecture as defined by `),Qe=l("code"),ka=a("model_mapping"),va=a(" and "),Ve=l("code"),ga=a("tf_model_mapping"),Ea=a("."),Is=d(),D=l("p"),ba=a(`This is very important to test future compatibility, meaning if someone adds a new model for `),We=l("code"),ja=a("XXXForQuestionAnswering"),qa=a(` then the pipeline test will attempt to run on it. Because the models are random it\u2019s impossible to check for actual values, that\u2019s why There is a helper `),Ze=l("code"),Ca=a("ANY"),Da=a(` that will simply attempt to match the output of the pipeline TYPE.`),zs=d(),N=l("p"),Ta=a("You also "),es=l("em"),Oa=a("need"),Pa=a(" to implement 2 (ideally 4) tests."),Ns=d(),k=l("ul"),L=l("li"),ss=l("code"),$a=a("test_small_model_pt"),Aa=a(` : Define 1 small model for this pipeline (doesn\u2019t matter if the results don\u2019t make sense) and test the pipeline outputs. The results should be the same as `),ts=l("code"),xa=a("test_small_model_tf"),Sa=a("."),Ia=d(),U=l("li"),as=l("code"),za=a("test_small_model_tf"),Na=a(` : Define 1 small model for this pipeline (doesn\u2019t matter if the results don\u2019t make sense) and test the pipeline outputs. The results should be the same as `),os=l("code"),La=a("test_small_model_pt"),Ua=a("."),Ha=d(),H=l("li"),ls=l("code"),Ma=a("test_large_model_pt"),Ya=a(" ("),rs=l("code"),Xa=a("optional"),Fa=a(`): Tests the pipeline on a real pipeline where the results are supposed to make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make sure there is no drift in future releases`),Ja=d(),M=l("li"),ns=l("code"),Ba=a("test_large_model_tf"),Ga=a(" ("),is=l("code"),Ra=a("optional"),Ka=a(`): Tests the pipeline on a real pipeline where the results are supposed to make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make sure there is no drift in future releases`),this.h()},l(s){const i=ol('[data-svelte="svelte-1phssyn"]',document.head);j=r(i,"META",{name:!0,content:!0}),i.forEach(t),ee=c(s),m=r(s,"H1",{class:!0});var Us=n(m);g=r(Us,"A",{id:!0,class:!0,href:!0});var Wa=n(g);he=r(Wa,"SPAN",{});var Za=n(he);ne(B.$$.fragment,Za),Za.forEach(t),Wa.forEach(t),Bs=c(Us),ue=r(Us,"SPAN",{});var eo=n(ue);Gs=o(eo,"How to add a pipeline to \u{1F917} Transformers?"),eo.forEach(t),Us.forEach(t),ms=c(s),q=r(s,"P",{});var ae=n(q);Rs=o(ae,`First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes, dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible as it makes compatibility easier (even through other languages via JSON). Those will be the `),fe=r(ae,"CODE",{});var so=n(fe);Ks=o(so,"inputs"),so.forEach(t),Qs=o(ae,` of the pipeline (`),me=r(ae,"CODE",{});var to=n(me);Vs=o(to,"preprocess"),to.forEach(t),Ws=o(ae,")."),ae.forEach(t),_s=c(s),_=r(s,"P",{});var Y=n(_);Zs=o(Y,"Then define the "),_e=r(Y,"CODE",{});var ao=n(_e);et=o(ao,"outputs"),ao.forEach(t),st=o(Y,". Same policy as the "),we=r(Y,"CODE",{});var oo=n(we);tt=o(oo,"inputs"),oo.forEach(t),at=o(Y,`. The simpler, the better. Those will be the outputs of `),ye=r(Y,"CODE",{});var lo=n(ye);ot=o(lo,"postprocess"),lo.forEach(t),lt=o(Y," method."),Y.forEach(t),ws=c(s),h=r(s,"P",{});var v=n(h);rt=o(v,"Start by inheriting the base class "),ke=r(v,"CODE",{});var ro=n(ke);nt=o(ro,"Pipeline"),ro.forEach(t),it=o(v,". with the 4 methods needed to implement "),ve=r(v,"CODE",{});var no=n(ve);pt=o(no,"preprocess"),no.forEach(t),dt=o(v,`, `),ge=r(v,"CODE",{});var io=n(ge);ct=o(io,"_forward"),io.forEach(t),ht=o(v,", "),Ee=r(v,"CODE",{});var po=n(Ee);ut=o(po,"postprocess"),po.forEach(t),ft=o(v," and "),be=r(v,"CODE",{});var co=n(be);mt=o(co,"_sanitize_parameters"),co.forEach(t),_t=o(v,"."),v.forEach(t),ys=c(s),ne(G.$$.fragment,s),ks=c(s),se=r(s,"P",{});var ho=n(se);wt=o(ho,`The structure of this breakdown is to support relatively seamless support for CPU/GPU, while supporting doing pre/postprocessing on the CPU on different threads`),ho.forEach(t),vs=c(s),O=r(s,"P",{});var ps=n(O);je=r(ps,"CODE",{});var uo=n(je);yt=o(uo,"preprocess"),uo.forEach(t),kt=o(ps,` will take the originally defined inputs, and turn them into something feedable to the model. It might contain more information and is usually a `),qe=r(ps,"CODE",{});var fo=n(qe);vt=o(fo,"Dict"),fo.forEach(t),gt=o(ps,"."),ps.forEach(t),gs=c(s),E=r(s,"P",{});var W=n(E);Ce=r(W,"CODE",{});var mo=n(Ce);Et=o(mo,"_forward"),mo.forEach(t),bt=o(W," is the implementation detail and is not meant to be called directly. "),De=r(W,"CODE",{});var _o=n(De);jt=o(_o,"forward"),_o.forEach(t),qt=o(W,` is the preferred called method as it contains safeguards to make sure everything is working on the expected device. If anything is linked to a real model it belongs in the `),Te=r(W,"CODE",{});var wo=n(Te);Ct=o(wo,"_forward"),wo.forEach(t),Dt=o(W," method, anything else is in the preprocess/postprocess."),W.forEach(t),Es=c(s),P=r(s,"P",{});var ds=n(P);Oe=r(ds,"CODE",{});var yo=n(Oe);Tt=o(yo,"postprocess"),yo.forEach(t),Ot=o(ds," methods will take the output of "),Pe=r(ds,"CODE",{});var ko=n(Pe);Pt=o(ko,"_forward"),ko.forEach(t),$t=o(ds,` and turn it into the final output that were decided earlier.`),ds.forEach(t),bs=c(s),b=r(s,"P",{});var Z=n(b);$e=r(Z,"CODE",{});var vo=n($e);At=o(vo,"_sanitize_parameters"),vo.forEach(t),xt=o(Z,` exists to allow users to pass any parameters whenever they wish, be it at initialization time `),Ae=r(Z,"CODE",{});var go=n(Ae);St=o(go,"pipeline(...., maybe_arg=4)"),go.forEach(t),It=o(Z," or at call time "),xe=r(Z,"CODE",{});var Eo=n(xe);zt=o(Eo,"pipe = pipeline(...); output = pipe(...., maybe_arg=4)"),Eo.forEach(t),Nt=o(Z,"."),Z.forEach(t),js=c(s),u=r(s,"P",{});var T=n(u);Lt=o(T,"The returns of "),Se=r(T,"CODE",{});var bo=n(Se);Ut=o(bo,"_sanitize_parameters"),bo.forEach(t),Ht=o(T," are the 3 dicts of kwargs that will be passed directly to "),Ie=r(T,"CODE",{});var jo=n(Ie);Mt=o(jo,"preprocess"),jo.forEach(t),Yt=o(T,`, `),ze=r(T,"CODE",{});var qo=n(ze);Xt=o(qo,"_forward"),qo.forEach(t),Ft=o(T," and "),Ne=r(T,"CODE",{});var Co=n(Ne);Jt=o(Co,"postprocess"),Co.forEach(t),Bt=o(T,`. Don\u2019t fill anything if the caller didn\u2019t call with any extra parameter. That allows to keep the default arguments in the function definition which is always more \u201Cnatural\u201D.`),T.forEach(t),qs=c(s),x=r(s,"P",{});var Hs=n(x);Gt=o(Hs,"A classic example would be a "),Le=r(Hs,"CODE",{});var Do=n(Le);Rt=o(Do,"top_k"),Do.forEach(t),Kt=o(Hs," argument in the post processing in classification tasks."),Hs.forEach(t),Cs=c(s),ne(R.$$.fragment,s),Ds=c(s),w=r(s,"P",{});var X=n(w);Qt=o(X,"In order to achieve that, we\u2019ll update our "),Ue=r(X,"CODE",{});var To=n(Ue);Vt=o(To,"postprocess"),To.forEach(t),Wt=o(X," method with a default parameter to "),He=r(X,"CODE",{});var Oo=n(He);Zt=o(Oo,"5"),Oo.forEach(t),ea=o(X,`. and edit `),Me=r(X,"CODE",{});var Po=n(Me);sa=o(Po,"_sanitize_parameters"),Po.forEach(t),ta=o(X," to allow this new parameter."),X.forEach(t),Ts=c(s),ne(K.$$.fragment,s),Os=c(s),te=r(s,"P",{});var $o=n(te);aa=o($o,`Try to keep the inputs/outputs very simple and ideally JSON-serializable as it makes the pipeline usage very easy without requiring users to understand new kind of objects. It\u2019s also relatively common to support many different types of arguments for ease of use (audio files, can be filenames, URLs or pure bytes)`),$o.forEach(t),Ps=c(s),$=r(s,"H2",{class:!0});var Ms=n($);S=r(Ms,"A",{id:!0,class:!0,href:!0});var Ao=n(S);Ye=r(Ao,"SPAN",{});var xo=n(Ye);ne(Q.$$.fragment,xo),xo.forEach(t),Ao.forEach(t),oa=c(Ms),Xe=r(Ms,"SPAN",{});var So=n(Xe);la=o(So,"Adding it to the list of supported tasks"),So.forEach(t),Ms.forEach(t),$s=c(s),C=r(s,"P",{});var oe=n(C);ra=o(oe,"Go to "),Fe=r(oe,"CODE",{});var Io=n(Fe);na=o(Io,"src/transformers/pipelines/__init__.py"),Io.forEach(t),ia=o(oe," and fill in "),Je=r(oe,"CODE",{});var zo=n(Je);pa=o(zo,"SUPPORTED_TASKS"),zo.forEach(t),da=o(oe,` with your newly created pipeline. If possible it should provide a default model.`),oe.forEach(t),As=c(s),A=r(s,"H2",{class:!0});var Ys=n(A);I=r(Ys,"A",{id:!0,class:!0,href:!0});var No=n(I);Be=r(No,"SPAN",{});var Lo=n(Be);ne(V.$$.fragment,Lo),Lo.forEach(t),No.forEach(t),ca=c(Ys),Ge=r(Ys,"SPAN",{});var Uo=n(Ge);ha=o(Uo,"Adding tests"),Uo.forEach(t),Ys.forEach(t),xs=c(s),z=r(s,"P",{});var Xs=n(z);ua=o(Xs,"Create a new file "),Re=r(Xs,"CODE",{});var Ho=n(Re);fa=o(Ho,"tests/test_pipelines_MY_PIPELINE.py"),Ho.forEach(t),ma=o(Xs," with example with the other tests."),Xs.forEach(t),Ss=c(s),y=r(s,"P",{});var F=n(y);_a=o(F,"The "),Ke=r(F,"CODE",{});var Mo=n(Ke);wa=o(Mo,"run_pipeline_test"),Mo.forEach(t),ya=o(F,` function will be very generic and run on small random models on every possible architecture as defined by `),Qe=r(F,"CODE",{});var Yo=n(Qe);ka=o(Yo,"model_mapping"),Yo.forEach(t),va=o(F," and "),Ve=r(F,"CODE",{});var Xo=n(Ve);ga=o(Xo,"tf_model_mapping"),Xo.forEach(t),Ea=o(F,"."),F.forEach(t),Is=c(s),D=r(s,"P",{});var le=n(D);ba=o(le,`This is very important to test future compatibility, meaning if someone adds a new model for `),We=r(le,"CODE",{});var Fo=n(We);ja=o(Fo,"XXXForQuestionAnswering"),Fo.forEach(t),qa=o(le,` then the pipeline test will attempt to run on it. Because the models are random it\u2019s impossible to check for actual values, that\u2019s why There is a helper `),Ze=r(le,"CODE",{});var Jo=n(Ze);Ca=o(Jo,"ANY"),Jo.forEach(t),Da=o(le,` that will simply attempt to match the output of the pipeline TYPE.`),le.forEach(t),zs=c(s),N=r(s,"P",{});var Fs=n(N);Ta=o(Fs,"You also "),es=r(Fs,"EM",{});var Bo=n(es);Oa=o(Bo,"need"),Bo.forEach(t),Pa=o(Fs," to implement 2 (ideally 4) tests."),Fs.forEach(t),Ns=c(s),k=r(s,"UL",{});var J=n(k);L=r(J,"LI",{});var cs=n(L);ss=r(cs,"CODE",{});var Go=n(ss);$a=o(Go,"test_small_model_pt"),Go.forEach(t),Aa=o(cs,` : Define 1 small model for this pipeline (doesn\u2019t matter if the results don\u2019t make sense) and test the pipeline outputs. The results should be the same as `),ts=r(cs,"CODE",{});var Ro=n(ts);xa=o(Ro,"test_small_model_tf"),Ro.forEach(t),Sa=o(cs,"."),cs.forEach(t),Ia=c(J),U=r(J,"LI",{});var hs=n(U);as=r(hs,"CODE",{});var Ko=n(as);za=o(Ko,"test_small_model_tf"),Ko.forEach(t),Na=o(hs,` : Define 1 small model for this pipeline (doesn\u2019t matter if the results don\u2019t make sense) and test the pipeline outputs. The results should be the same as `),os=r(hs,"CODE",{});var Qo=n(os);La=o(Qo,"test_small_model_pt"),Qo.forEach(t),Ua=o(hs,"."),hs.forEach(t),Ha=c(J),H=r(J,"LI",{});var us=n(H);ls=r(us,"CODE",{});var Vo=n(ls);Ma=o(Vo,"test_large_model_pt"),Vo.forEach(t),Ya=o(us," ("),rs=r(us,"CODE",{});var Wo=n(rs);Xa=o(Wo,"optional"),Wo.forEach(t),Fa=o(us,`): Tests the pipeline on a real pipeline where the results are supposed to make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make sure there is no drift in future releases`),us.forEach(t),Ja=c(J),M=r(J,"LI",{});var fs=n(M);ns=r(fs,"CODE",{});var Zo=n(ns);Ba=o(Zo,"test_large_model_tf"),Zo.forEach(t),Ga=o(fs," ("),is=r(fs,"CODE",{});var el=n(is);Ra=o(el,"optional"),el.forEach(t),Ka=o(fs,`): Tests the pipeline on a real pipeline where the results are supposed to make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make sure there is no drift in future releases`),fs.forEach(t),J.forEach(t),this.h()},h(){f(j,"name","hf:doc:metadata"),f(j,"content",JSON.stringify(nl)),f(g,"id","how-to-add-a-pipeline-to-transformers"),f(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(g,"href","#how-to-add-a-pipeline-to-transformers"),f(m,"class","relative group"),f(S,"id","adding-it-to-the-list-of-supported-tasks"),f(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(S,"href","#adding-it-to-the-list-of-supported-tasks"),f($,"class","relative group"),f(I,"id","adding-tests"),f(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(I,"href","#adding-tests"),f(A,"class","relative group")},m(s,i){e(document.head,j),p(s,ee,i),p(s,m,i),e(m,g),e(g,he),ie(B,he,null),e(m,Bs),e(m,ue),e(ue,Gs),p(s,ms,i),p(s,q,i),e(q,Rs),e(q,fe),e(fe,Ks),e(q,Qs),e(q,me),e(me,Vs),e(q,Ws),p(s,_s,i),p(s,_,i),e(_,Zs),e(_,_e),e(_e,et),e(_,st),e(_,we),e(we,tt),e(_,at),e(_,ye),e(ye,ot),e(_,lt),p(s,ws,i),p(s,h,i),e(h,rt),e(h,ke),e(ke,nt),e(h,it),e(h,ve),e(ve,pt),e(h,dt),e(h,ge),e(ge,ct),e(h,ht),e(h,Ee),e(Ee,ut),e(h,ft),e(h,be),e(be,mt),e(h,_t),p(s,ys,i),ie(G,s,i),p(s,ks,i),p(s,se,i),e(se,wt),p(s,vs,i),p(s,O,i),e(O,je),e(je,yt),e(O,kt),e(O,qe),e(qe,vt),e(O,gt),p(s,gs,i),p(s,E,i),e(E,Ce),e(Ce,Et),e(E,bt),e(E,De),e(De,jt),e(E,qt),e(E,Te),e(Te,Ct),e(E,Dt),p(s,Es,i),p(s,P,i),e(P,Oe),e(Oe,Tt),e(P,Ot),e(P,Pe),e(Pe,Pt),e(P,$t),p(s,bs,i),p(s,b,i),e(b,$e),e($e,At),e(b,xt),e(b,Ae),e(Ae,St),e(b,It),e(b,xe),e(xe,zt),e(b,Nt),p(s,js,i),p(s,u,i),e(u,Lt),e(u,Se),e(Se,Ut),e(u,Ht),e(u,Ie),e(Ie,Mt),e(u,Yt),e(u,ze),e(ze,Xt),e(u,Ft),e(u,Ne),e(Ne,Jt),e(u,Bt),p(s,qs,i),p(s,x,i),e(x,Gt),e(x,Le),e(Le,Rt),e(x,Kt),p(s,Cs,i),ie(R,s,i),p(s,Ds,i),p(s,w,i),e(w,Qt),e(w,Ue),e(Ue,Vt),e(w,Wt),e(w,He),e(He,Zt),e(w,ea),e(w,Me),e(Me,sa),e(w,ta),p(s,Ts,i),ie(K,s,i),p(s,Os,i),p(s,te,i),e(te,aa),p(s,Ps,i),p(s,$,i),e($,S),e(S,Ye),ie(Q,Ye,null),e($,oa),e($,Xe),e(Xe,la),p(s,$s,i),p(s,C,i),e(C,ra),e(C,Fe),e(Fe,na),e(C,ia),e(C,Je),e(Je,pa),e(C,da),p(s,As,i),p(s,A,i),e(A,I),e(I,Be),ie(V,Be,null),e(A,ca),e(A,Ge),e(Ge,ha),p(s,xs,i),p(s,z,i),e(z,ua),e(z,Re),e(Re,fa),e(z,ma),p(s,Ss,i),p(s,y,i),e(y,_a),e(y,Ke),e(Ke,wa),e(y,ya),e(y,Qe),e(Qe,ka),e(y,va),e(y,Ve),e(Ve,ga),e(y,Ea),p(s,Is,i),p(s,D,i),e(D,ba),e(D,We),e(We,ja),e(D,qa),e(D,Ze),e(Ze,Ca),e(D,Da),p(s,zs,i),p(s,N,i),e(N,Ta),e(N,es),e(es,Oa),e(N,Pa),p(s,Ns,i),p(s,k,i),e(k,L),e(L,ss),e(ss,$a),e(L,Aa),e(L,ts),e(ts,xa),e(L,Sa),e(k,Ia),e(k,U),e(U,as),e(as,za),e(U,Na),e(U,os),e(os,La),e(U,Ua),e(k,Ha),e(k,H),e(H,ls),e(ls,Ma),e(H,Ya),e(H,rs),e(rs,Xa),e(H,Fa),e(k,Ja),e(k,M),e(M,ns),e(ns,Ba),e(M,Ga),e(M,is),e(is,Ra),e(M,Ka),Ls=!0},p:ll,i(s){Ls||(pe(B.$$.fragment,s),pe(G.$$.fragment,s),pe(R.$$.fragment,s),pe(K.$$.fragment,s),pe(Q.$$.fragment,s),pe(V.$$.fragment,s),Ls=!0)},o(s){de(B.$$.fragment,s),de(G.$$.fragment,s),de(R.$$.fragment,s),de(K.$$.fragment,s),de(Q.$$.fragment,s),de(V.$$.fragment,s),Ls=!1},d(s){t(j),s&&t(ee),s&&t(m),ce(B),s&&t(ms),s&&t(q),s&&t(_s),s&&t(_),s&&t(ws),s&&t(h),s&&t(ys),ce(G,s),s&&t(ks),s&&t(se),s&&t(vs),s&&t(O),s&&t(gs),s&&t(E),s&&t(Es),s&&t(P),s&&t(bs),s&&t(b),s&&t(js),s&&t(u),s&&t(qs),s&&t(x),s&&t(Cs),ce(R,s),s&&t(Ds),s&&t(w),s&&t(Ts),ce(K,s),s&&t(Os),s&&t(te),s&&t(Ps),s&&t($),ce(Q),s&&t($s),s&&t(C),s&&t(As),s&&t(A),ce(V),s&&t(xs),s&&t(z),s&&t(Ss),s&&t(y),s&&t(Is),s&&t(D),s&&t(zs),s&&t(N),s&&t(Ns),s&&t(k)}}}const nl={local:"how-to-add-a-pipeline-to-transformers",sections:[{local:"adding-it-to-the-list-of-supported-tasks",title:"Adding it to the list of supported tasks"},{local:"adding-tests",title:"Adding tests"}],title:"How to add a pipeline to \u{1F917} Transformers?"};function il(Js,j,ee){let{fw:m}=j;return Js.$$set=g=>{"fw"in g&&ee(0,m=g.fw)},[m]}class ul extends sl{constructor(j){super();tl(this,j,il,rl,al,{fw:0})}}export{ul as default,nl as metadata};
262
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tokenizer_summary.mdx-9d9de8d8.js
import{S as _m,i as zm,s as qm,e as o,k as c,w as f,t as a,W as ko,M as jm,c as l,d as s,m as u,a as r,x as g,h as n,X as Eo,b as h,F as t,g as p,y as b,L as $m,q as v,o as y,B as w}from"../chunks/vendor-4833417e.js";import{Y as _o}from"../chunks/Youtube-27813aed.js";import{I as De}from"../chunks/IconCopyLink-4b81c553.js";import{C as re}from"../chunks/CodeBlock-6a3d1b46.js";import{D as xm}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function Pm(zo){let V,ht,U,K,ts,Se,qo,ss,jo,Ha,Ce,Ra,ct,$o,Xa,Be,Fa,D,xo,ut,Po,To,mt,Ao,Do,dt,So,Co,ft,Bo,Oo,Ga,J,Mo,gt,No,Wo,bt,Io,Lo,Ka,ie,ve,as,Oe,Uo,ns,Ho,Va,Me,Ro,os,Xo,Ja,Ne,Ya,vt,Fo,Qa,We,Za,S,Go,ls,Ko,Vo,rs,Jo,Yo,is,Qo,Zo,ps,el,tl,en,Ie,tn,C,sl,hs,al,nl,cs,ol,ll,us,rl,il,ms,pl,hl,sn,W,Le,cl,ul,Ue,ml,dl,ds,fl,gl,fs,bl,vl,an,He,nn,Y,yl,gs,wl,kl,yt,El,_l,on,wt,zl,ln,kt,ql,rn,Re,pn,B,jl,bs,$l,xl,vs,Pl,Tl,ys,Al,Dl,ws,Sl,Cl,hn,pe,ye,ks,Xe,Bl,Es,Ol,cn,Fe,un,k,Ml,_s,Nl,Wl,zs,Il,Ll,qs,Ul,Hl,js,Rl,Xl,$s,Fl,Gl,xs,Kl,Vl,Ps,Jl,Yl,Ts,Ql,Zl,mn,Q,er,Et,tr,sr,As,ar,nr,dn,Ge,fn,P,or,Ds,lr,rr,Ss,ir,pr,Cs,hr,cr,Bs,ur,mr,Os,dr,fr,gn,we,gr,_t,br,vr,bn,Ke,vn,T,yr,Ms,wr,kr,zt,Er,_r,Ns,zr,qr,Ws,jr,$r,Is,xr,Pr,yn,qt,Tr,wn,jt,kn,he,ke,Ls,Ve,Ar,Us,Dr,En,q,Sr,Je,Cr,Br,$t,Or,Mr,xt,Nr,Wr,Pt,Ir,Lr,Tt,Ur,Hr,At,Rr,Xr,_n,Dt,Fr,zn,St,Gr,qn,Ye,jn,Ee,Kr,Hs,Vr,Jr,$n,Qe,xn,m,Yr,Rs,Qr,Zr,Xs,ei,ti,Fs,si,ai,Gs,ni,oi,Ks,li,ri,Vs,ii,pi,Js,hi,ci,Ys,ui,mi,Qs,di,fi,Zs,gi,bi,ea,vi,yi,Pn,Ze,Tn,E,wi,ta,ki,Ei,sa,_i,zi,aa,qi,ji,na,$i,xi,oa,Pi,Ti,la,Ai,Di,ra,Si,Ci,ia,Bi,Oi,An,_e,Mi,pa,Ni,Wi,Dn,et,Sn,z,Ii,ha,Li,Ui,ca,Hi,Ri,ua,Xi,Fi,ma,Gi,Ki,da,Vi,Ji,fa,Yi,Qi,ga,Zi,ep,Cn,Z,tp,ba,sp,ap,Ct,np,op,Bn,ce,ze,va,tt,lp,ya,rp,On,H,ip,wa,pp,hp,st,cp,up,Bt,mp,dp,Mn,Ot,Nn,ue,qe,ka,at,fp,Ea,gp,Wn,O,bp,Mt,vp,yp,Nt,wp,kp,Wt,Ep,_p,nt,zp,qp,In,_,jp,_a,$p,xp,za,Pp,Tp,qa,Ap,Dp,ja,Sp,Cp,$a,Bp,Op,xa,Mp,Np,Pa,Wp,Ip,Ta,Lp,Up,Ln,It,Un,me,je,Aa,ot,Hp,Da,Rp,Hn,ee,Xp,lt,Fp,Gp,Lt,Kp,Vp,Rn,$e,Jp,Sa,Yp,Qp,Xn,Ut,Zp,Fn,rt,Gn,I,Ca,eh,th,Ba,sh,ah,Oa,nh,oh,Ma,lh,rh,Kn,L,ih,Vn,ym='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mn>1</mn></msub><mo separator="true">,</mo><mo>\u2026</mo><mo separator="true">,</mo><msub><mi>x</mi><mi>N</mi></msub></mrow><annotation encoding="application/x-tex">x_{1}, \\dots, x_{N}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.625em;vertical-align:-0.1944em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">\u2026</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3283em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight" style="margin-right:0.10903em;">N</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',Jn,Yn,wm='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mi>i</mi></msub></mrow><annotation encoding="application/x-tex">x_{i}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',Qn,Zn,km='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>S</mi><mo stretchy="false">(</mo><msub><mi>x</mi><mi>i</mi></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">S(x_{i})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathnormal" style="margin-right:0.05764em;">S</span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span>',eo,to,Em=`<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mi mathvariant="script">L</mi><mo>=</mo><mo>\u2212</mo><munderover><mo>\u2211</mo><mrow><mi>i</mi><mo>=</mo><mn>1</mn></mrow><mi>N</mi></munderover><mi>log</mi><mo>\u2061</mo><mrow><mo fence="true">(</mo><munder><mo>\u2211</mo><mrow><mi>x</mi><mo>\u2208</mo><mi>S</mi><mo stretchy="false">(</mo><msub><mi>x</mi><mi>i</mi></msub><mo stretchy="false">)</mo></mrow></munder><mi>p</mi><mo stretchy="false">(</mo><mi>x</mi><mo stretchy="false">)</mo><mo fence="true">)</mo></mrow></mrow><annotation encoding="application/x-tex">\\mathcal{L} = -\\sum_{i=1}^{N} \\log \\left ( \\sum_{x \\in S(x_{i})} p(x) \\right )</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6833em;"></span><span class="mord mathcal">L</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:3.6em;vertical-align:-1.55em;"></span><span class="mord">\u2212</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.8283em;"><span style="top:-1.8723em;margin-left:0em;"><span class="pstrut" style="height:3.05em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span><span class="mrel mtight">=</span><span class="mord mtight">1</span></span></span></span><span style="top:-3.05em;"><span class="pstrut" style="height:3.05em;"></span><span><span class="mop op-symbol large-op">\u2211</span></span></span><span style="top:-4.3em;margin-left:0em;"><span class="pstrut" style="height:3.05em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight" style="margin-right:0.10903em;">N</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:1.2777em;"><span></span></span></span></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mop">lo<span style="margin-right:0.01389em;">g</span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner"><span class="mopen"><span class="delimsizing mult"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:2.05em;"><span style="top:-2.25em;"><span class="pstrut" style="height:3.155em;"></span><span class="delimsizinginner delim-size4"><span>\u239D</span></span></span><span style="top:-3.397em;"><span class="pstrut" style="height:3.155em;"></span><span style="height:0.016em;width:0.875em;"><svg xmlns="http://www.w3.org/2000/svg" width='0.875em' height='0.016em' style='width:0.875em' viewBox='0 0 875 16' preserveAspectRatio='xMinYMin'><path d='M291 0 H417 V16 H291z M291 0 H417 V16 H291z'/></svg></span></span><span style="top:-4.05em;"><span class="pstrut" style="height:3.155em;"></span><span class="delimsizinginner delim-size4"><span>\u239B</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:1.55em;"><span></span></span></span></span></span></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.05em;"><span style="top:-1.809em;margin-left:0em;"><span class="pstrut" style="height:3.05em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">x</span><span class="mrel mtight">\u2208</span><span class="mord mathnormal mtight" style="margin-right:0.05764em;">S</span><span class="mopen mtight">(</span><span class="mord mtight"><span class="mord mathnormal mtight">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3281em;"><span style="top:-2.357em;margin-left:0em;margin-right:0.0714em;"><span class="pstrut" style="height:2.5em;"></span><span class="sizing reset-size3 size1 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.143em;"><span></span></span></span></span></span></span><span class="mclose mtight">)</span></span></span></span><span style="top:-3.05em;"><span class="pstrut" style="height:3.05em;"></span><span><span class="mop op-symbol large-op">\u2211</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:1.516em;"><span></span></span></span></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord mathnormal">p</span><span class="mopen">(</span><span class="mord mathnormal">x</span><span class="mclose">)</span><span class="mclose"><span class="delimsizing mult"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:2.05em;"><span style="top:-2.25em;"><span class="pstrut" style="height:3.155em;"></span><span class="delimsizinginner delim-size4"><span>\u23A0</span></span></span><span style="top:-3.397em;"><span class="pstrut" style="height:3.155em;"></span><span style="height:0.016em;width:0.875em;"><svg xmlns="http://www.w3.org/2000/svg" width='0.875em' height='0.016em' style='width:0.875em' viewBox='0 0 875 16' preserveAspectRatio='xMinYMin'><path d='M457 0 H583 V16 H457z M457 0 H583 V16 H457z'/></svg></span></span><span style="top:-4.05em;"><span class="pstrut" style="height:3.155em;"></span><span class="delimsizinginner delim-size4"><span>\u239E</span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:1.55em;"><span></span></span></span></span></span></span></span></span></span></span></span>`,so,Ht,ao,de,xe,Na,it,ph,Wa,hh,no,R,ch,Ia,uh,mh,Rt,dh,fh,pt,gh,bh,oo,X,vh,Xt,yh,wh,La,kh,Eh,Ua,_h,zh,lo,M,qh,Ft,jh,$h,Gt,xh,Ph,Kt,Th,Ah,Vt,Dh,Sh,ro;return Se=new De({}),Ce=new xm({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/tokenizer_summary.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/tokenizer_summary.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/tokenizer_summary.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/tokenizer_summary.ipynb"}]}}),Be=new _o({props:{id:"VFp38yj8h3A"}}),Oe=new De({}),Ne=new _o({props:{id:"nhJxYji1aho"}}),We=new re({props:{code:`["Don't", "you", "love", "\u{1F917}", "Transformers?", "We", "sure", "do."]`,highlighted:'<span class="hljs-selector-attr">[<span class="hljs-string">&quot;Don&#x27;t&quot;</span>, <span class="hljs-string">&quot;you&quot;</span>, <span class="hljs-string">&quot;love&quot;</span>, <span class="hljs-string">&quot;\u{1F917}&quot;</span>, <span class="hljs-string">&quot;Transformers?&quot;</span>, <span class="hljs-string">&quot;We&quot;</span>, <span class="hljs-string">&quot;sure&quot;</span>, <span class="hljs-string">&quot;do.&quot;</span>]</span>'}}),Ie=new re({props:{code:`["Don", "'", "t", "you", "love", "\u{1F917}", "Transformers", "?", "We", "sure", "do", "."]`,highlighted:'<span class="hljs-selector-attr">[<span class="hljs-string">&quot;Don&quot;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&quot;t&quot;</span>, <span class="hljs-string">&quot;you&quot;</span>, <span class="hljs-string">&quot;love&quot;</span>, <span class="hljs-string">&quot;\u{1F917}&quot;</span>, <span class="hljs-string">&quot;Transformers&quot;</span>, <span class="hljs-string">&quot;?&quot;</span>, <span class="hljs-string">&quot;We&quot;</span>, <span class="hljs-string">&quot;sure&quot;</span>, <span class="hljs-string">&quot;do&quot;</span>, <span class="hljs-string">&quot;.&quot;</span>]</span>'}}),He=new re({props:{code:`["Do", "n't", "you", "love", "\u{1F917}", "Transformers", "?", "We", "sure", "do", "."]`,highlighted:'<span class="hljs-selector-attr">[<span class="hljs-string">&quot;Do&quot;</span>, <span class="hljs-string">&quot;n&#x27;t&quot;</span>, <span class="hljs-string">&quot;you&quot;</span>, <span class="hljs-string">&quot;love&quot;</span>, <span class="hljs-string">&quot;\u{1F917}&quot;</span>, <span class="hljs-string">&quot;Transformers&quot;</span>, <span class="hljs-string">&quot;?&quot;</span>, <span class="hljs-string">&quot;We&quot;</span>, <span class="hljs-string">&quot;sure&quot;</span>, <span class="hljs-string">&quot;do&quot;</span>, <span class="hljs-string">&quot;.&quot;</span>]</span>'}}),Re=new _o({props:{id:"ssLq_EK2jLE"}}),Xe=new De({}),Fe=new _o({props:{id:"zHvTiHr506c"}}),Ge=new re({props:{code:`from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") tokenizer.tokenize("I have a new GPU!")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.tokenize(<span class="hljs-string">&quot;I have a new GPU!&quot;</span>) [<span class="hljs-string">&quot;i&quot;</span>, <span class="hljs-string">&quot;have&quot;</span>, <span class="hljs-string">&quot;a&quot;</span>, <span class="hljs-string">&quot;new&quot;</span>, <span class="hljs-string">&quot;gp&quot;</span>, <span class="hljs-string">&quot;##u&quot;</span>, <span class="hljs-string">&quot;!&quot;</span>]`}}),Ke=new re({props:{code:`from transformers import XLNetTokenizer tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased") tokenizer.tokenize("Don't you love \u{1F917} Transformers? We sure do.")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLNetTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLNetTokenizer.from_pretrained(<span class="hljs-string">&quot;xlnet-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.tokenize(<span class="hljs-string">&quot;Don&#x27;t you love \u{1F917} Transformers? We sure do.&quot;</span>) [<span class="hljs-string">&quot;\u2581Don&quot;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&quot;t&quot;</span>, <span class="hljs-string">&quot;\u2581you&quot;</span>, <span class="hljs-string">&quot;\u2581love&quot;</span>, <span class="hljs-string">&quot;\u2581&quot;</span>, <span class="hljs-string">&quot;\u{1F917}&quot;</span>, <span class="hljs-string">&quot;\u2581&quot;</span>, <span class="hljs-string">&quot;Transform&quot;</span>, <span class="hljs-string">&quot;ers&quot;</span>, <span class="hljs-string">&quot;?&quot;</span>, <span class="hljs-string">&quot;\u2581We&quot;</span>, <span class="hljs-string">&quot;\u2581sure&quot;</span>, <span class="hljs-string">&quot;\u2581do&quot;</span>, <span class="hljs-string">&quot;.&quot;</span>]`}}),Ve=new De({}),Ye=new re({props:{code:'("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)',highlighted:'(<span class="hljs-string">&quot;hug&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">10</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;pug&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;pun&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">12</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;bun&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">4</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;hugs&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)'}}),Qe=new re({props:{code:'("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5)',highlighted:'(<span class="hljs-string">&quot;h&quot;</span> <span class="hljs-string">&quot;u&quot;</span> <span class="hljs-string">&quot;g&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">10</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;p&quot;</span> <span class="hljs-string">&quot;u&quot;</span> <span class="hljs-string">&quot;g&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;p&quot;</span> <span class="hljs-string">&quot;u&quot;</span> <span class="hljs-string">&quot;n&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">12</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;b&quot;</span> <span class="hljs-string">&quot;u&quot;</span> <span class="hljs-string">&quot;n&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">4</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;h&quot;</span> <span class="hljs-string">&quot;u&quot;</span> <span class="hljs-string">&quot;g&quot;</span> <span class="hljs-string">&quot;s&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)'}}),Ze=new re({props:{code:'("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5)',highlighted:'(<span class="hljs-string">&quot;h&quot;</span> <span class="hljs-string">&quot;ug&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">10</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;p&quot;</span> <span class="hljs-string">&quot;ug&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;p&quot;</span> <span class="hljs-string">&quot;u&quot;</span> <span class="hljs-string">&quot;n&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">12</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;b&quot;</span> <span class="hljs-string">&quot;u&quot;</span> <span class="hljs-string">&quot;n&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">4</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;h&quot;</span> <span class="hljs-string">&quot;ug&quot;</span> <span class="hljs-string">&quot;s&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)'}}),et=new re({props:{code:'("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5)',highlighted:'(<span class="hljs-string">&quot;hug&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">10</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;p&quot;</span> <span class="hljs-string">&quot;ug&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;p&quot;</span> <span class="hljs-string">&quot;un&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">12</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;b&quot;</span> <span class="hljs-string">&quot;un&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">4</span>)<span class="hljs-punctuation">,</span> (<span class="hljs-string">&quot;hug&quot;</span> <span class="hljs-string">&quot;s&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-number">5</span>)'}}),tt=new De({}),at=new De({}),ot=new De({}),rt=new re({props:{code:'["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"],',highlighted:'<span class="hljs-selector-attr">[<span class="hljs-string">&quot;b&quot;</span>, <span class="hljs-string">&quot;g&quot;</span>, <span class="hljs-string">&quot;h&quot;</span>, <span class="hljs-string">&quot;n&quot;</span>, <span class="hljs-string">&quot;p&quot;</span>, <span class="hljs-string">&quot;s&quot;</span>, <span class="hljs-string">&quot;u&quot;</span>, <span class="hljs-string">&quot;ug&quot;</span>, <span class="hljs-string">&quot;un&quot;</span>, <span class="hljs-string">&quot;hug&quot;</span>]</span>,'}}),it=new De({}),{c(){V=o("meta"),ht=c(),U=o("h1"),K=o("a"),ts=o("span"),f(Se.$$.fragment),qo=c(),ss=o("span"),jo=a("Summary of the tokenizers"),Ha=c(),f(Ce.$$.fragment),Ra=c(),ct=o("p"),$o=a("On this page, we will have a closer look at tokenization."),Xa=c(),f(Be.$$.fragment),Fa=c(),D=o("p"),xo=a("As we saw in "),ut=o("a"),Po=a("the preprocessing tutorial"),To=a(`, tokenizing a text is splitting it into words or subwords, which then are converted to ids through a look-up table. Converting words or subwords to ids is straightforward, so in this summary, we will focus on splitting a text into words or subwords (i.e. tokenizing a text). More specifically, we will look at the three main types of tokenizers used in \u{1F917} Transformers: `),mt=o("a"),Ao=a(`Byte-Pair Encoding (BPE)`),Do=a(", "),dt=o("a"),So=a("WordPiece"),Co=a(", and "),ft=o("a"),Bo=a("SentencePiece"),Oo=a(`, and show examples of which tokenizer type is used by which model.`),Ga=c(),J=o("p"),Mo=a(`Note that on each model page, you can look at the documentation of the associated tokenizer to know which tokenizer type was used by the pretrained model. For instance, if we look at `),gt=o("a"),No=a("BertTokenizer"),Wo=a(`, we can see that the model uses `),bt=o("a"),Io=a("WordPiece"),Lo=a("."),Ka=c(),ie=o("h2"),ve=o("a"),as=o("span"),f(Oe.$$.fragment),Uo=c(),ns=o("span"),Ho=a("Introduction"),Va=c(),Me=o("p"),Ro=a(`Splitting a text into smaller chunks is a task that is harder than it looks, and there are multiple ways of doing so. For instance, let\u2019s look at the sentence `),os=o("code"),Xo=a(`"Don't you love \u{1F917} Transformers? We sure do."`),Ja=c(),f(Ne.$$.fragment),Ya=c(),vt=o("p"),Fo=a("A simple way of tokenizing this text is to split it by spaces, which would give:"),Qa=c(),f(We.$$.fragment),Za=c(),S=o("p"),Go=a("This is a sensible first step, but if we look at the tokens "),ls=o("code"),Ko=a('"Transformers?"'),Vo=a(" and "),rs=o("code"),Jo=a('"do."'),Yo=a(`, we notice that the punctuation is attached to the words `),is=o("code"),Qo=a('"Transformer"'),Zo=a(" and "),ps=o("code"),el=a('"do"'),tl=a(`, which is suboptimal. We should take the punctuation into account so that a model does not have to learn a different representation of a word and every possible punctuation symbol that could follow it, which would explode the number of representations the model has to learn. Taking punctuation into account, tokenizing our exemplary text would give:`),en=c(),f(Ie.$$.fragment),tn=c(),C=o("p"),sl=a("Better. However, it is disadvantageous, how the tokenization dealt with the word "),hs=o("code"),al=a(`"Don't"`),nl=a(". "),cs=o("code"),ol=a(`"Don't"`),ll=a(` stands for `),us=o("code"),rl=a('"do not"'),il=a(", so it would be better tokenized as "),ms=o("code"),pl=a(`["Do", "n't"]`),hl=a(`. This is where things start getting complicated, and part of the reason each model has its own tokenizer type. Depending on the rules we apply for tokenizing a text, a different tokenized output is generated for the same text. A pretrained model only performs properly if you feed it an input that was tokenized with the same rules that were used to tokenize its training data.`),sn=c(),W=o("p"),Le=o("a"),cl=a("spaCy"),ul=a(" and "),Ue=o("a"),ml=a("Moses"),dl=a(` are two popular rule-based tokenizers. Applying them on our example, `),ds=o("em"),fl=a("spaCy"),gl=a(" and "),fs=o("em"),bl=a("Moses"),vl=a(" would output something like:"),an=c(),f(He.$$.fragment),nn=c(),Y=o("p"),yl=a(`As can be seen space and punctuation tokenization, as well as rule-based tokenization, is used here. Space and punctuation tokenization and rule-based tokenization are both examples of word tokenization, which is loosely defined as splitting sentences into words. While it\u2019s the most intuitive way to split texts into smaller chunks, this tokenization method can lead to problems for massive text corpora. In this case, space and punctuation tokenization usually generates a very big vocabulary (the set of all unique words and tokens used). `),gs=o("em"),wl=a("E.g."),kl=a(", "),yt=o("a"),El=a("Transformer XL"),_l=a(" uses space and punctuation tokenization, resulting in a vocabulary size of 267,735!"),on=c(),wt=o("p"),zl=a(`Such a big vocabulary size forces the model to have an enormous embedding matrix as the input and output layer, which causes both an increased memory and time complexity. In general, transformers models rarely have a vocabulary size greater than 50,000, especially if they are pretrained only on a single language.`),ln=c(),kt=o("p"),ql=a("So if simple space and punctuation tokenization is unsatisfactory, why not simply tokenize on characters?"),rn=c(),f(Re.$$.fragment),pn=c(),B=o("p"),jl=a(`While character tokenization is very simple and would greatly reduce memory and time complexity it makes it much harder for the model to learn meaningful input representations. `),bs=o("em"),$l=a("E.g."),xl=a(` learning a meaningful context-independent representation for the letter `),vs=o("code"),Pl=a('"t"'),Tl=a(` is much harder than learning a context-independent representation for the word `),ys=o("code"),Al=a('"today"'),Dl=a(`. Therefore, character tokenization is often accompanied by a loss of performance. So to get the best of both worlds, transformers models use a hybrid between word-level and character-level tokenization called `),ws=o("strong"),Sl=a("subword"),Cl=a(` tokenization.`),hn=c(),pe=o("h3"),ye=o("a"),ks=o("span"),f(Xe.$$.fragment),Bl=c(),Es=o("span"),Ol=a("Subword tokenization"),cn=c(),f(Fe.$$.fragment),un=c(),k=o("p"),Ml=a(`Subword tokenization algorithms rely on the principle that frequently used words should not be split into smaller subwords, but rare words should be decomposed into meaningful subwords. For instance `),_s=o("code"),Nl=a('"annoyingly"'),Wl=a(` might be considered a rare word and could be decomposed into `),zs=o("code"),Il=a('"annoying"'),Ll=a(" and "),qs=o("code"),Ul=a('"ly"'),Hl=a(". Both "),js=o("code"),Rl=a('"annoying"'),Xl=a(" and "),$s=o("code"),Fl=a('"ly"'),Gl=a(` as stand-alone subwords would appear more frequently while at the same time the meaning of `),xs=o("code"),Kl=a('"annoyingly"'),Vl=a(` is kept by the composite meaning of `),Ps=o("code"),Jl=a('"annoying"'),Yl=a(" and "),Ts=o("code"),Ql=a('"ly"'),Zl=a(`. This is especially useful in agglutinative languages such as Turkish, where you can form (almost) arbitrarily long complex words by stringing together subwords.`),mn=c(),Q=o("p"),er=a(`Subword tokenization allows the model to have a reasonable vocabulary size while being able to learn meaningful context-independent representations. In addition, subword tokenization enables the model to process words it has never seen before, by decomposing them into known subwords. For instance, the `),Et=o("a"),tr=a("BertTokenizer"),sr=a(` tokenizes `),As=o("code"),ar=a('"I have a new GPU!"'),nr=a(" as follows:"),dn=c(),f(Ge.$$.fragment),fn=c(),P=o("p"),or=a("Because we are considering the uncased model, the sentence was lowercased first. We can see that the words "),Ds=o("code"),lr=a('["i", "have", "a", "new"]'),rr=a(" are present in the tokenizer\u2019s vocabulary, but the word "),Ss=o("code"),ir=a('"gpu"'),pr=a(` is not. Consequently, the tokenizer splits `),Cs=o("code"),hr=a('"gpu"'),cr=a(" into known subwords: "),Bs=o("code"),ur=a('["gp" and "##u"]'),mr=a(". "),Os=o("code"),dr=a('"##"'),fr=a(` means that the rest of the token should be attached to the previous one, without space (for decoding or reversal of the tokenization).`),gn=c(),we=o("p"),gr=a("As another example, "),_t=o("a"),br=a("XLNetTokenizer"),vr=a(" tokenizes our previously exemplary text as follows:"),bn=c(),f(Ke.$$.fragment),vn=c(),T=o("p"),yr=a("We\u2019ll get back to the meaning of those "),Ms=o("code"),wr=a('"\u2581"'),kr=a(" when we look at "),zt=o("a"),Er=a("SentencePiece"),_r=a(`. As one can see, the rare word `),Ns=o("code"),zr=a('"Transformers"'),qr=a(" has been split into the more frequent subwords "),Ws=o("code"),jr=a('"Transform"'),$r=a(" and "),Is=o("code"),xr=a('"ers"'),Pr=a("."),yn=c(),qt=o("p"),Tr=a(`Let\u2019s now look at how the different subword tokenization algorithms work. Note that all of those tokenization algorithms rely on some form of training which is usually done on the corpus the corresponding model will be trained on.`),wn=c(),jt=o("a"),kn=c(),he=o("h2"),ke=o("a"),Ls=o("span"),f(Ve.$$.fragment),Ar=c(),Us=o("span"),Dr=a("Byte-Pair Encoding (BPE)"),En=c(),q=o("p"),Sr=a("Byte-Pair Encoding (BPE) was introduced in "),Je=o("a"),Cr=a(`Neural Machine Translation of Rare Words with Subword Units (Sennrich et al., 2015)`),Br=a(`. BPE relies on a pre-tokenizer that splits the training data into words. Pretokenization can be as simple as space tokenization, e.g. `),$t=o("a"),Or=a("GPT-2"),Mr=a(", "),xt=o("a"),Nr=a("Roberta"),Wr=a(". More advanced pre-tokenization include rule-based tokenization, e.g. "),Pt=o("a"),Ir=a("XLM"),Lr=a(`, `),Tt=o("a"),Ur=a("FlauBERT"),Hr=a(" which uses Moses for most languages, or "),At=o("a"),Rr=a("GPT"),Xr=a(` which uses Spacy and ftfy, to count the frequency of each word in the training corpus.`),_n=c(),Dt=o("p"),Fr=a(`After pre-tokenization, a set of unique words has been created and the frequency of each word it occurred in the training data has been determined. Next, BPE creates a base vocabulary consisting of all symbols that occur in the set of unique words and learns merge rules to form a new symbol from two symbols of the base vocabulary. It does so until the vocabulary has attained the desired vocabulary size. Note that the desired vocabulary size is a hyperparameter to define before training the tokenizer.`),zn=c(),St=o("p"),Gr=a(`As an example, let\u2019s assume that after pre-tokenization, the following set of words including their frequency has been determined:`),qn=c(),f(Ye.$$.fragment),jn=c(),Ee=o("p"),Kr=a("Consequently, the base vocabulary is "),Hs=o("code"),Vr=a('["b", "g", "h", "n", "p", "s", "u"]'),Jr=a(`. Splitting all words into symbols of the base vocabulary, we obtain:`),$n=c(),f(Qe.$$.fragment),xn=c(),m=o("p"),Yr=a(`BPE then counts the frequency of each possible symbol pair and picks the symbol pair that occurs most frequently. In the example above `),Rs=o("code"),Qr=a('"h"'),Zr=a(" followed by "),Xs=o("code"),ei=a('"u"'),ti=a(" is present "),Fs=o("em"),si=a("10 + 5 = 15"),ai=a(` times (10 times in the 10 occurrences of `),Gs=o("code"),ni=a('"hug"'),oi=a(", 5 times in the 5 occurrences of "),Ks=o("code"),li=a('"hugs"'),ri=a("). However, the most frequent symbol pair is "),Vs=o("code"),ii=a('"u"'),pi=a(` followed by `),Js=o("code"),hi=a('"g"'),ci=a(", occurring "),Ys=o("em"),ui=a("10 + 5 + 5 = 20"),mi=a(` times in total. Thus, the first merge rule the tokenizer learns is to group all `),Qs=o("code"),di=a('"u"'),fi=a(" symbols followed by a "),Zs=o("code"),gi=a('"g"'),bi=a(" symbol together. Next, "),ea=o("code"),vi=a('"ug"'),yi=a(` is added to the vocabulary. The set of words then becomes`),Pn=c(),f(Ze.$$.fragment),Tn=c(),E=o("p"),wi=a("BPE then identifies the next most common symbol pair. It\u2019s "),ta=o("code"),ki=a('"u"'),Ei=a(" followed by "),sa=o("code"),_i=a('"n"'),zi=a(", which occurs 16 times. "),aa=o("code"),qi=a('"u"'),ji=a(`, `),na=o("code"),$i=a('"n"'),xi=a(" is merged to "),oa=o("code"),Pi=a('"un"'),Ti=a(" and added to the vocabulary. The next most frequent symbol pair is "),la=o("code"),Ai=a('"h"'),Di=a(` followed by `),ra=o("code"),Si=a('"ug"'),Ci=a(", occurring 15 times. Again the pair is merged and "),ia=o("code"),Bi=a('"hug"'),Oi=a(" can be added to the vocabulary."),An=c(),_e=o("p"),Mi=a("At this stage, the vocabulary is "),pa=o("code"),Ni=a('["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]'),Wi=a(` and our set of unique words is represented as`),Dn=c(),f(et.$$.fragment),Sn=c(),z=o("p"),Ii=a(`Assuming, that the Byte-Pair Encoding training would stop at this point, the learned merge rules would then be applied to new words (as long as those new words do not include symbols that were not in the base vocabulary). For instance, the word `),ha=o("code"),Li=a('"bug"'),Ui=a(" would be tokenized to "),ca=o("code"),Hi=a('["b", "ug"]'),Ri=a(" but "),ua=o("code"),Xi=a('"mug"'),Fi=a(" would be tokenized as "),ma=o("code"),Gi=a('["<unk>", "ug"]'),Ki=a(` since the symbol `),da=o("code"),Vi=a('"m"'),Ji=a(" is not in the base vocabulary. In general, single letters such as "),fa=o("code"),Yi=a('"m"'),Qi=a(` are not replaced by the `),ga=o("code"),Zi=a('"<unk>"'),ep=a(` symbol because the training data usually includes at least one occurrence of each letter, but it is likely to happen for very special characters like emojis.`),Cn=c(),Z=o("p"),tp=a("As mentioned earlier, the vocabulary size, "),ba=o("em"),sp=a("i.e."),ap=a(` the base vocabulary size + the number of merges, is a hyperparameter to choose. For instance `),Ct=o("a"),np=a("GPT"),op=a(` has a vocabulary size of 40,478 since they have 478 base characters and chose to stop training after 40,000 merges.`),Bn=c(),ce=o("h3"),ze=o("a"),va=o("span"),f(tt.$$.fragment),lp=c(),ya=o("span"),rp=a("Byte-level BPE"),On=c(),H=o("p"),ip=a("A base vocabulary that includes all possible base characters can be quite large if "),wa=o("em"),pp=a("e.g."),hp=a(` all unicode characters are considered as base characters. To have a better base vocabulary, `),st=o("a"),cp=a("GPT-2"),up=a(` uses bytes as the base vocabulary, which is a clever trick to force the base vocabulary to be of size 256 while ensuring that every base character is included in the vocabulary. With some additional rules to deal with punctuation, the GPT2\u2019s tokenizer can tokenize every text without the need for the <unk> symbol. `),Bt=o("a"),mp=a("GPT-2"),dp=a(` has a vocabulary size of 50,257, which corresponds to the 256 bytes base tokens, a special end-of-text token and the symbols learned with 50,000 merges.`),Mn=c(),Ot=o("a"),Nn=c(),ue=o("h4"),qe=o("a"),ka=o("span"),f(at.$$.fragment),fp=c(),Ea=o("span"),gp=a("WordPiece"),Wn=c(),O=o("p"),bp=a("WordPiece is the subword tokenization algorithm used for "),Mt=o("a"),vp=a("BERT"),yp=a(", "),Nt=o("a"),wp=a("DistilBERT"),kp=a(", and "),Wt=o("a"),Ep=a("Electra"),_p=a(". The algorithm was outlined in "),nt=o("a"),zp=a(`Japanese and Korean Voice Search (Schuster et al., 2012)`),qp=a(` and is very similar to BPE. WordPiece first initializes the vocabulary to include every character present in the training data and progressively learns a given number of merge rules. In contrast to BPE, WordPiece does not choose the most frequent symbol pair, but the one that maximizes the likelihood of the training data once added to the vocabulary.`),In=c(),_=o("p"),jp=a(`So what does this mean exactly? Referring to the previous example, maximizing the likelihood of the training data is equivalent to finding the symbol pair, whose probability divided by the probabilities of its first symbol followed by its second symbol is the greatest among all symbol pairs. `),_a=o("em"),$p=a("E.g."),xp=c(),za=o("code"),Pp=a('"u"'),Tp=a(", followed by "),qa=o("code"),Ap=a('"g"'),Dp=a(` would have only been merged if the probability of `),ja=o("code"),Sp=a('"ug"'),Cp=a(" divided by "),$a=o("code"),Bp=a('"u"'),Op=a(", "),xa=o("code"),Mp=a('"g"'),Np=a(` would have been greater than for any other symbol pair. Intuitively, WordPiece is slightly different to BPE in that it evaluates what it `),Pa=o("em"),Wp=a("loses"),Ip=a(` by merging two symbols to ensure it\u2019s `),Ta=o("em"),Lp=a("worth it"),Up=a("."),Ln=c(),It=o("a"),Un=c(),me=o("h4"),je=o("a"),Aa=o("span"),f(ot.$$.fragment),Hp=c(),Da=o("span"),Rp=a("Unigram"),Hn=c(),ee=o("p"),Xp=a("Unigram is a subword tokenization algorithm introduced in "),lt=o("a"),Fp=a(`Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates (Kudo, 2018)`),Gp=a(`. In contrast to BPE or WordPiece, Unigram initializes its base vocabulary to a large number of symbols and progressively trims down each symbol to obtain a smaller vocabulary. The base vocabulary could for instance correspond to all pre-tokenized words and the most common substrings. Unigram is not used directly for any of the models in the transformers, but it\u2019s used in conjunction with `),Lt=o("a"),Kp=a("SentencePiece"),Vp=a("."),Rn=c(),$e=o("p"),Jp=a(`At each training step, the Unigram algorithm defines a loss (often defined as the log-likelihood) over the training data given the current vocabulary and a unigram language model. Then, for each symbol in the vocabulary, the algorithm computes how much the overall loss would increase if the symbol was to be removed from the vocabulary. Unigram then removes p (with p usually being 10% or 20%) percent of the symbols whose loss increase is the lowest, `),Sa=o("em"),Yp=a("i.e."),Qp=a(` those symbols that least affect the overall loss over the training data. This process is repeated until the vocabulary has reached the desired size. The Unigram algorithm always keeps the base characters so that any word can be tokenized.`),Xn=c(),Ut=o("p"),Zp=a(`Because Unigram is not based on merge rules (in contrast to BPE and WordPiece), the algorithm has several ways of tokenizing new text after training. As an example, if a trained Unigram tokenizer exhibits the vocabulary:`),Fn=c(),f(rt.$$.fragment),Gn=c(),I=o("p"),Ca=o("code"),eh=a('"hugs"'),th=a(" could be tokenized both as "),Ba=o("code"),sh=a('["hug", "s"]'),ah=a(", "),Oa=o("code"),nh=a('["h", "ug", "s"]'),oh=a(" or "),Ma=o("code"),lh=a('["h", "u", "g", "s"]'),rh=a(`. So which one to choose? Unigram saves the probability of each token in the training corpus on top of saving the vocabulary so that the probability of each possible tokenization can be computed after training. The algorithm simply picks the most likely tokenization in practice, but also offers the possibility to sample a possible tokenization according to their probabilities.`),Kn=c(),L=o("p"),ih=a(`Those probabilities are defined by the loss the tokenizer is trained on. Assuming that the training data consists of the words `),Vn=new ko,Jn=a(" and that the set of all possible tokenizations for a word "),Yn=new ko,Qn=a(` is defined as `),Zn=new ko,eo=a(`, then the overall loss is defined as `),to=new ko,so=c(),Ht=o("a"),ao=c(),de=o("h4"),xe=o("a"),Na=o("span"),f(it.$$.fragment),ph=c(),Wa=o("span"),hh=a("SentencePiece"),no=c(),R=o("p"),ch=a(`All tokenization algorithms described so far have the same problem: It is assumed that the input text uses spaces to separate words. However, not all languages use spaces to separate words. One possible solution is to use language specific pre-tokenizers, `),Ia=o("em"),uh=a("e.g."),mh=c(),Rt=o("a"),dh=a("XLM"),fh=a(` uses a specific Chinese, Japanese, and Thai pre-tokenizer). To solve this problem more generally, `),pt=o("a"),gh=a(`SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing (Kudo et al., 2018)`),bh=a(` treats the input as a raw input stream, thus including the space in the set of characters to use. It then uses the BPE or unigram algorithm to construct the appropriate vocabulary.`),oo=c(),X=o("p"),vh=a("The "),Xt=o("a"),yh=a("XLNetTokenizer"),wh=a(` uses SentencePiece for example, which is also why in the example earlier the `),La=o("code"),kh=a('"\u2581"'),Eh=a(` character was included in the vocabulary. Decoding with SentencePiece is very easy since all tokens can just be concatenated and `),Ua=o("code"),_h=a('"\u2581"'),zh=a(" is replaced by a space."),lo=c(),M=o("p"),qh=a(`All transformers models in the library that use SentencePiece use it in combination with unigram. Examples of models using SentencePiece are `),Ft=o("a"),jh=a("ALBERT"),$h=a(", "),Gt=o("a"),xh=a("XLNet"),Ph=a(", "),Kt=o("a"),Th=a("Marian"),Ah=a(", and "),Vt=o("a"),Dh=a("T5"),Sh=a("."),this.h()},l(e){const i=jm('[data-svelte="svelte-1phssyn"]',document.head);V=l(i,"META",{name:!0,content:!0}),i.forEach(s),ht=u(e),U=l(e,"H1",{class:!0});var io=r(U);K=l(io,"A",{id:!0,class:!0,href:!0});var Bh=r(K);ts=l(Bh,"SPAN",{});var Oh=r(ts);g(Se.$$.fragment,Oh),Oh.forEach(s),Bh.forEach(s),qo=u(io),ss=l(io,"SPAN",{});var Mh=r(ss);jo=n(Mh,"Summary of the tokenizers"),Mh.forEach(s),io.forEach(s),Ha=u(e),g(Ce.$$.fragment,e),Ra=u(e),ct=l(e,"P",{});var Nh=r(ct);$o=n(Nh,"On this page, we will have a closer look at tokenization."),Nh.forEach(s),Xa=u(e),g(Be.$$.fragment,e),Fa=u(e),D=l(e,"P",{});var te=r(D);xo=n(te,"As we saw in "),ut=l(te,"A",{href:!0});var Wh=r(ut);Po=n(Wh,"the preprocessing tutorial"),Wh.forEach(s),To=n(te,`, tokenizing a text is splitting it into words or subwords, which then are converted to ids through a look-up table. Converting words or subwords to ids is straightforward, so in this summary, we will focus on splitting a text into words or subwords (i.e. tokenizing a text). More specifically, we will look at the three main types of tokenizers used in \u{1F917} Transformers: `),mt=l(te,"A",{href:!0});var Ih=r(mt);Ao=n(Ih,`Byte-Pair Encoding (BPE)`),Ih.forEach(s),Do=n(te,", "),dt=l(te,"A",{href:!0});var Lh=r(dt);So=n(Lh,"WordPiece"),Lh.forEach(s),Co=n(te,", and "),ft=l(te,"A",{href:!0});var Uh=r(ft);Bo=n(Uh,"SentencePiece"),Uh.forEach(s),Oo=n(te,`, and show examples of which tokenizer type is used by which model.`),te.forEach(s),Ga=u(e),J=l(e,"P",{});var Jt=r(J);Mo=n(Jt,`Note that on each model page, you can look at the documentation of the associated tokenizer to know which tokenizer type was used by the pretrained model. For instance, if we look at `),gt=l(Jt,"A",{href:!0});var Hh=r(gt);No=n(Hh,"BertTokenizer"),Hh.forEach(s),Wo=n(Jt,`, we can see that the model uses `),bt=l(Jt,"A",{href:!0});var Rh=r(bt);Io=n(Rh,"WordPiece"),Rh.forEach(s),Lo=n(Jt,"."),Jt.forEach(s),Ka=u(e),ie=l(e,"H2",{class:!0});var po=r(ie);ve=l(po,"A",{id:!0,class:!0,href:!0});var Xh=r(ve);as=l(Xh,"SPAN",{});var Fh=r(as);g(Oe.$$.fragment,Fh),Fh.forEach(s),Xh.forEach(s),Uo=u(po),ns=l(po,"SPAN",{});var Gh=r(ns);Ho=n(Gh,"Introduction"),Gh.forEach(s),po.forEach(s),Va=u(e),Me=l(e,"P",{});var Ch=r(Me);Ro=n(Ch,`Splitting a text into smaller chunks is a task that is harder than it looks, and there are multiple ways of doing so. For instance, let\u2019s look at the sentence `),os=l(Ch,"CODE",{});var Kh=r(os);Xo=n(Kh,`"Don't you love \u{1F917} Transformers? We sure do."`),Kh.forEach(s),Ch.forEach(s),Ja=u(e),g(Ne.$$.fragment,e),Ya=u(e),vt=l(e,"P",{});var Vh=r(vt);Fo=n(Vh,"A simple way of tokenizing this text is to split it by spaces, which would give:"),Vh.forEach(s),Qa=u(e),g(We.$$.fragment,e),Za=u(e),S=l(e,"P",{});var se=r(S);Go=n(se,"This is a sensible first step, but if we look at the tokens "),ls=l(se,"CODE",{});var Jh=r(ls);Ko=n(Jh,'"Transformers?"'),Jh.forEach(s),Vo=n(se," and "),rs=l(se,"CODE",{});var Yh=r(rs);Jo=n(Yh,'"do."'),Yh.forEach(s),Yo=n(se,`, we notice that the punctuation is attached to the words `),is=l(se,"CODE",{});var Qh=r(is);Qo=n(Qh,'"Transformer"'),Qh.forEach(s),Zo=n(se," and "),ps=l(se,"CODE",{});var Zh=r(ps);el=n(Zh,'"do"'),Zh.forEach(s),tl=n(se,`, which is suboptimal. We should take the punctuation into account so that a model does not have to learn a different representation of a word and every possible punctuation symbol that could follow it, which would explode the number of representations the model has to learn. Taking punctuation into account, tokenizing our exemplary text would give:`),se.forEach(s),en=u(e),g(Ie.$$.fragment,e),tn=u(e),C=l(e,"P",{});var ae=r(C);sl=n(ae,"Better. However, it is disadvantageous, how the tokenization dealt with the word "),hs=l(ae,"CODE",{});var ec=r(hs);al=n(ec,`"Don't"`),ec.forEach(s),nl=n(ae,". "),cs=l(ae,"CODE",{});var tc=r(cs);ol=n(tc,`"Don't"`),tc.forEach(s),ll=n(ae,` stands for `),us=l(ae,"CODE",{});var sc=r(us);rl=n(sc,'"do not"'),sc.forEach(s),il=n(ae,", so it would be better tokenized as "),ms=l(ae,"CODE",{});var ac=r(ms);pl=n(ac,`["Do", "n't"]`),ac.forEach(s),hl=n(ae,`. This is where things start getting complicated, and part of the reason each model has its own tokenizer type. Depending on the rules we apply for tokenizing a text, a different tokenized output is generated for the same text. A pretrained model only performs properly if you feed it an input that was tokenized with the same rules that were used to tokenize its training data.`),ae.forEach(s),sn=u(e),W=l(e,"P",{});var fe=r(W);Le=l(fe,"A",{href:!0,rel:!0});var nc=r(Le);cl=n(nc,"spaCy"),nc.forEach(s),ul=n(fe," and "),Ue=l(fe,"A",{href:!0,rel:!0});var oc=r(Ue);ml=n(oc,"Moses"),oc.forEach(s),dl=n(fe,` are two popular rule-based tokenizers. Applying them on our example, `),ds=l(fe,"EM",{});var lc=r(ds);fl=n(lc,"spaCy"),lc.forEach(s),gl=n(fe," and "),fs=l(fe,"EM",{});var rc=r(fs);bl=n(rc,"Moses"),rc.forEach(s),vl=n(fe," would output something like:"),fe.forEach(s),an=u(e),g(He.$$.fragment,e),nn=u(e),Y=l(e,"P",{});var Yt=r(Y);yl=n(Yt,`As can be seen space and punctuation tokenization, as well as rule-based tokenization, is used here. Space and punctuation tokenization and rule-based tokenization are both examples of word tokenization, which is loosely defined as splitting sentences into words. While it\u2019s the most intuitive way to split texts into smaller chunks, this tokenization method can lead to problems for massive text corpora. In this case, space and punctuation tokenization usually generates a very big vocabulary (the set of all unique words and tokens used). `),gs=l(Yt,"EM",{});var ic=r(gs);wl=n(ic,"E.g."),ic.forEach(s),kl=n(Yt,", "),yt=l(Yt,"A",{href:!0});var pc=r(yt);El=n(pc,"Transformer XL"),pc.forEach(s),_l=n(Yt," uses space and punctuation tokenization, resulting in a vocabulary size of 267,735!"),Yt.forEach(s),on=u(e),wt=l(e,"P",{});var hc=r(wt);zl=n(hc,`Such a big vocabulary size forces the model to have an enormous embedding matrix as the input and output layer, which causes both an increased memory and time complexity. In general, transformers models rarely have a vocabulary size greater than 50,000, especially if they are pretrained only on a single language.`),hc.forEach(s),ln=u(e),kt=l(e,"P",{});var cc=r(kt);ql=n(cc,"So if simple space and punctuation tokenization is unsatisfactory, why not simply tokenize on characters?"),cc.forEach(s),rn=u(e),g(Re.$$.fragment,e),pn=u(e),B=l(e,"P",{});var ne=r(B);jl=n(ne,`While character tokenization is very simple and would greatly reduce memory and time complexity it makes it much harder for the model to learn meaningful input representations. `),bs=l(ne,"EM",{});var uc=r(bs);$l=n(uc,"E.g."),uc.forEach(s),xl=n(ne,` learning a meaningful context-independent representation for the letter `),vs=l(ne,"CODE",{});var mc=r(vs);Pl=n(mc,'"t"'),mc.forEach(s),Tl=n(ne,` is much harder than learning a context-independent representation for the word `),ys=l(ne,"CODE",{});var dc=r(ys);Al=n(dc,'"today"'),dc.forEach(s),Dl=n(ne,`. Therefore, character tokenization is often accompanied by a loss of performance. So to get the best of both worlds, transformers models use a hybrid between word-level and character-level tokenization called `),ws=l(ne,"STRONG",{});var fc=r(ws);Sl=n(fc,"subword"),fc.forEach(s),Cl=n(ne,` tokenization.`),ne.forEach(s),hn=u(e),pe=l(e,"H3",{class:!0});var ho=r(pe);ye=l(ho,"A",{id:!0,class:!0,href:!0});var gc=r(ye);ks=l(gc,"SPAN",{});var bc=r(ks);g(Xe.$$.fragment,bc),bc.forEach(s),gc.forEach(s),Bl=u(ho),Es=l(ho,"SPAN",{});var vc=r(Es);Ol=n(vc,"Subword tokenization"),vc.forEach(s),ho.forEach(s),cn=u(e),g(Fe.$$.fragment,e),un=u(e),k=l(e,"P",{});var j=r(k);Ml=n(j,`Subword tokenization algorithms rely on the principle that frequently used words should not be split into smaller subwords, but rare words should be decomposed into meaningful subwords. For instance `),_s=l(j,"CODE",{});var yc=r(_s);Nl=n(yc,'"annoyingly"'),yc.forEach(s),Wl=n(j,` might be considered a rare word and could be decomposed into `),zs=l(j,"CODE",{});var wc=r(zs);Il=n(wc,'"annoying"'),wc.forEach(s),Ll=n(j," and "),qs=l(j,"CODE",{});var kc=r(qs);Ul=n(kc,'"ly"'),kc.forEach(s),Hl=n(j,". Both "),js=l(j,"CODE",{});var Ec=r(js);Rl=n(Ec,'"annoying"'),Ec.forEach(s),Xl=n(j," and "),$s=l(j,"CODE",{});var _c=r($s);Fl=n(_c,'"ly"'),_c.forEach(s),Gl=n(j,` as stand-alone subwords would appear more frequently while at the same time the meaning of `),xs=l(j,"CODE",{});var zc=r(xs);Kl=n(zc,'"annoyingly"'),zc.forEach(s),Vl=n(j,` is kept by the composite meaning of `),Ps=l(j,"CODE",{});var qc=r(Ps);Jl=n(qc,'"annoying"'),qc.forEach(s),Yl=n(j," and "),Ts=l(j,"CODE",{});var jc=r(Ts);Ql=n(jc,'"ly"'),jc.forEach(s),Zl=n(j,`. This is especially useful in agglutinative languages such as Turkish, where you can form (almost) arbitrarily long complex words by stringing together subwords.`),j.forEach(s),mn=u(e),Q=l(e,"P",{});var Qt=r(Q);er=n(Qt,`Subword tokenization allows the model to have a reasonable vocabulary size while being able to learn meaningful context-independent representations. In addition, subword tokenization enables the model to process words it has never seen before, by decomposing them into known subwords. For instance, the `),Et=l(Qt,"A",{href:!0});var $c=r(Et);tr=n($c,"BertTokenizer"),$c.forEach(s),sr=n(Qt,` tokenizes `),As=l(Qt,"CODE",{});var xc=r(As);ar=n(xc,'"I have a new GPU!"'),xc.forEach(s),nr=n(Qt," as follows:"),Qt.forEach(s),dn=u(e),g(Ge.$$.fragment,e),fn=u(e),P=l(e,"P",{});var F=r(P);or=n(F,"Because we are considering the uncased model, the sentence was lowercased first. We can see that the words "),Ds=l(F,"CODE",{});var Pc=r(Ds);lr=n(Pc,'["i", "have", "a", "new"]'),Pc.forEach(s),rr=n(F," are present in the tokenizer\u2019s vocabulary, but the word "),Ss=l(F,"CODE",{});var Tc=r(Ss);ir=n(Tc,'"gpu"'),Tc.forEach(s),pr=n(F,` is not. Consequently, the tokenizer splits `),Cs=l(F,"CODE",{});var Ac=r(Cs);hr=n(Ac,'"gpu"'),Ac.forEach(s),cr=n(F," into known subwords: "),Bs=l(F,"CODE",{});var Dc=r(Bs);ur=n(Dc,'["gp" and "##u"]'),Dc.forEach(s),mr=n(F,". "),Os=l(F,"CODE",{});var Sc=r(Os);dr=n(Sc,'"##"'),Sc.forEach(s),fr=n(F,` means that the rest of the token should be attached to the previous one, without space (for decoding or reversal of the tokenization).`),F.forEach(s),gn=u(e),we=l(e,"P",{});var co=r(we);gr=n(co,"As another example, "),_t=l(co,"A",{href:!0});var Cc=r(_t);br=n(Cc,"XLNetTokenizer"),Cc.forEach(s),vr=n(co," tokenizes our previously exemplary text as follows:"),co.forEach(s),bn=u(e),g(Ke.$$.fragment,e),vn=u(e),T=l(e,"P",{});var G=r(T);yr=n(G,"We\u2019ll get back to the meaning of those "),Ms=l(G,"CODE",{});var Bc=r(Ms);wr=n(Bc,'"\u2581"'),Bc.forEach(s),kr=n(G," when we look at "),zt=l(G,"A",{href:!0});var Oc=r(zt);Er=n(Oc,"SentencePiece"),Oc.forEach(s),_r=n(G,`. As one can see, the rare word `),Ns=l(G,"CODE",{});var Mc=r(Ns);zr=n(Mc,'"Transformers"'),Mc.forEach(s),qr=n(G," has been split into the more frequent subwords "),Ws=l(G,"CODE",{});var Nc=r(Ws);jr=n(Nc,'"Transform"'),Nc.forEach(s),$r=n(G," and "),Is=l(G,"CODE",{});var Wc=r(Is);xr=n(Wc,'"ers"'),Wc.forEach(s),Pr=n(G,"."),G.forEach(s),yn=u(e),qt=l(e,"P",{});var Ic=r(qt);Tr=n(Ic,`Let\u2019s now look at how the different subword tokenization algorithms work. Note that all of those tokenization algorithms rely on some form of training which is usually done on the corpus the corresponding model will be trained on.`),Ic.forEach(s),wn=u(e),jt=l(e,"A",{id:!0}),r(jt).forEach(s),kn=u(e),he=l(e,"H2",{class:!0});var uo=r(he);ke=l(uo,"A",{id:!0,class:!0,href:!0});var Lc=r(ke);Ls=l(Lc,"SPAN",{});var Uc=r(Ls);g(Ve.$$.fragment,Uc),Uc.forEach(s),Lc.forEach(s),Ar=u(uo),Us=l(uo,"SPAN",{});var Hc=r(Us);Dr=n(Hc,"Byte-Pair Encoding (BPE)"),Hc.forEach(s),uo.forEach(s),En=u(e),q=l(e,"P",{});var N=r(q);Sr=n(N,"Byte-Pair Encoding (BPE) was introduced in "),Je=l(N,"A",{href:!0,rel:!0});var Rc=r(Je);Cr=n(Rc,`Neural Machine Translation of Rare Words with Subword Units (Sennrich et al., 2015)`),Rc.forEach(s),Br=n(N,`. BPE relies on a pre-tokenizer that splits the training data into words. Pretokenization can be as simple as space tokenization, e.g. `),$t=l(N,"A",{href:!0});var Xc=r($t);Or=n(Xc,"GPT-2"),Xc.forEach(s),Mr=n(N,", "),xt=l(N,"A",{href:!0});var Fc=r(xt);Nr=n(Fc,"Roberta"),Fc.forEach(s),Wr=n(N,". More advanced pre-tokenization include rule-based tokenization, e.g. "),Pt=l(N,"A",{href:!0});var Gc=r(Pt);Ir=n(Gc,"XLM"),Gc.forEach(s),Lr=n(N,`, `),Tt=l(N,"A",{href:!0});var Kc=r(Tt);Ur=n(Kc,"FlauBERT"),Kc.forEach(s),Hr=n(N," which uses Moses for most languages, or "),At=l(N,"A",{href:!0});var Vc=r(At);Rr=n(Vc,"GPT"),Vc.forEach(s),Xr=n(N,` which uses Spacy and ftfy, to count the frequency of each word in the training corpus.`),N.forEach(s),_n=u(e),Dt=l(e,"P",{});var Jc=r(Dt);Fr=n(Jc,`After pre-tokenization, a set of unique words has been created and the frequency of each word it occurred in the training data has been determined. Next, BPE creates a base vocabulary consisting of all symbols that occur in the set of unique words and learns merge rules to form a new symbol from two symbols of the base vocabulary. It does so until the vocabulary has attained the desired vocabulary size. Note that the desired vocabulary size is a hyperparameter to define before training the tokenizer.`),Jc.forEach(s),zn=u(e),St=l(e,"P",{});var Yc=r(St);Gr=n(Yc,`As an example, let\u2019s assume that after pre-tokenization, the following set of words including their frequency has been determined:`),Yc.forEach(s),qn=u(e),g(Ye.$$.fragment,e),jn=u(e),Ee=l(e,"P",{});var mo=r(Ee);Kr=n(mo,"Consequently, the base vocabulary is "),Hs=l(mo,"CODE",{});var Qc=r(Hs);Vr=n(Qc,'["b", "g", "h", "n", "p", "s", "u"]'),Qc.forEach(s),Jr=n(mo,`. Splitting all words into symbols of the base vocabulary, we obtain:`),mo.forEach(s),$n=u(e),g(Qe.$$.fragment,e),xn=u(e),m=l(e,"P",{});var d=r(m);Yr=n(d,`BPE then counts the frequency of each possible symbol pair and picks the symbol pair that occurs most frequently. In the example above `),Rs=l(d,"CODE",{});var Zc=r(Rs);Qr=n(Zc,'"h"'),Zc.forEach(s),Zr=n(d," followed by "),Xs=l(d,"CODE",{});var eu=r(Xs);ei=n(eu,'"u"'),eu.forEach(s),ti=n(d," is present "),Fs=l(d,"EM",{});var tu=r(Fs);si=n(tu,"10 + 5 = 15"),tu.forEach(s),ai=n(d,` times (10 times in the 10 occurrences of `),Gs=l(d,"CODE",{});var su=r(Gs);ni=n(su,'"hug"'),su.forEach(s),oi=n(d,", 5 times in the 5 occurrences of "),Ks=l(d,"CODE",{});var au=r(Ks);li=n(au,'"hugs"'),au.forEach(s),ri=n(d,"). However, the most frequent symbol pair is "),Vs=l(d,"CODE",{});var nu=r(Vs);ii=n(nu,'"u"'),nu.forEach(s),pi=n(d,` followed by `),Js=l(d,"CODE",{});var ou=r(Js);hi=n(ou,'"g"'),ou.forEach(s),ci=n(d,", occurring "),Ys=l(d,"EM",{});var lu=r(Ys);ui=n(lu,"10 + 5 + 5 = 20"),lu.forEach(s),mi=n(d,` times in total. Thus, the first merge rule the tokenizer learns is to group all `),Qs=l(d,"CODE",{});var ru=r(Qs);di=n(ru,'"u"'),ru.forEach(s),fi=n(d," symbols followed by a "),Zs=l(d,"CODE",{});var iu=r(Zs);gi=n(iu,'"g"'),iu.forEach(s),bi=n(d," symbol together. Next, "),ea=l(d,"CODE",{});var pu=r(ea);vi=n(pu,'"ug"'),pu.forEach(s),yi=n(d,` is added to the vocabulary. The set of words then becomes`),d.forEach(s),Pn=u(e),g(Ze.$$.fragment,e),Tn=u(e),E=l(e,"P",{});var $=r(E);wi=n($,"BPE then identifies the next most common symbol pair. It\u2019s "),ta=l($,"CODE",{});var hu=r(ta);ki=n(hu,'"u"'),hu.forEach(s),Ei=n($," followed by "),sa=l($,"CODE",{});var cu=r(sa);_i=n(cu,'"n"'),cu.forEach(s),zi=n($,", which occurs 16 times. "),aa=l($,"CODE",{});var uu=r(aa);qi=n(uu,'"u"'),uu.forEach(s),ji=n($,`, `),na=l($,"CODE",{});var mu=r(na);$i=n(mu,'"n"'),mu.forEach(s),xi=n($," is merged to "),oa=l($,"CODE",{});var du=r(oa);Pi=n(du,'"un"'),du.forEach(s),Ti=n($," and added to the vocabulary. The next most frequent symbol pair is "),la=l($,"CODE",{});var fu=r(la);Ai=n(fu,'"h"'),fu.forEach(s),Di=n($,` followed by `),ra=l($,"CODE",{});var gu=r(ra);Si=n(gu,'"ug"'),gu.forEach(s),Ci=n($,", occurring 15 times. Again the pair is merged and "),ia=l($,"CODE",{});var bu=r(ia);Bi=n(bu,'"hug"'),bu.forEach(s),Oi=n($," can be added to the vocabulary."),$.forEach(s),An=u(e),_e=l(e,"P",{});var fo=r(_e);Mi=n(fo,"At this stage, the vocabulary is "),pa=l(fo,"CODE",{});var vu=r(pa);Ni=n(vu,'["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]'),vu.forEach(s),Wi=n(fo,` and our set of unique words is represented as`),fo.forEach(s),Dn=u(e),g(et.$$.fragment,e),Sn=u(e),z=l(e,"P",{});var A=r(z);Ii=n(A,`Assuming, that the Byte-Pair Encoding training would stop at this point, the learned merge rules would then be applied to new words (as long as those new words do not include symbols that were not in the base vocabulary). For instance, the word `),ha=l(A,"CODE",{});var yu=r(ha);Li=n(yu,'"bug"'),yu.forEach(s),Ui=n(A," would be tokenized to "),ca=l(A,"CODE",{});var wu=r(ca);Hi=n(wu,'["b", "ug"]'),wu.forEach(s),Ri=n(A," but "),ua=l(A,"CODE",{});var ku=r(ua);Xi=n(ku,'"mug"'),ku.forEach(s),Fi=n(A," would be tokenized as "),ma=l(A,"CODE",{});var Eu=r(ma);Gi=n(Eu,'["<unk>", "ug"]'),Eu.forEach(s),Ki=n(A,` since the symbol `),da=l(A,"CODE",{});var _u=r(da);Vi=n(_u,'"m"'),_u.forEach(s),Ji=n(A," is not in the base vocabulary. In general, single letters such as "),fa=l(A,"CODE",{});var zu=r(fa);Yi=n(zu,'"m"'),zu.forEach(s),Qi=n(A,` are not replaced by the `),ga=l(A,"CODE",{});var qu=r(ga);Zi=n(qu,'"<unk>"'),qu.forEach(s),ep=n(A,` symbol because the training data usually includes at least one occurrence of each letter, but it is likely to happen for very special characters like emojis.`),A.forEach(s),Cn=u(e),Z=l(e,"P",{});var Zt=r(Z);tp=n(Zt,"As mentioned earlier, the vocabulary size, "),ba=l(Zt,"EM",{});var ju=r(ba);sp=n(ju,"i.e."),ju.forEach(s),ap=n(Zt,` the base vocabulary size + the number of merges, is a hyperparameter to choose. For instance `),Ct=l(Zt,"A",{href:!0});var $u=r(Ct);np=n($u,"GPT"),$u.forEach(s),op=n(Zt,` has a vocabulary size of 40,478 since they have 478 base characters and chose to stop training after 40,000 merges.`),Zt.forEach(s),Bn=u(e),ce=l(e,"H3",{class:!0});var go=r(ce);ze=l(go,"A",{id:!0,class:!0,href:!0});var xu=r(ze);va=l(xu,"SPAN",{});var Pu=r(va);g(tt.$$.fragment,Pu),Pu.forEach(s),xu.forEach(s),lp=u(go),ya=l(go,"SPAN",{});var Tu=r(ya);rp=n(Tu,"Byte-level BPE"),Tu.forEach(s),go.forEach(s),On=u(e),H=l(e,"P",{});var Pe=r(H);ip=n(Pe,"A base vocabulary that includes all possible base characters can be quite large if "),wa=l(Pe,"EM",{});var Au=r(wa);pp=n(Au,"e.g."),Au.forEach(s),hp=n(Pe,` all unicode characters are considered as base characters. To have a better base vocabulary, `),st=l(Pe,"A",{href:!0,rel:!0});var Du=r(st);cp=n(Du,"GPT-2"),Du.forEach(s),up=n(Pe,` uses bytes as the base vocabulary, which is a clever trick to force the base vocabulary to be of size 256 while ensuring that every base character is included in the vocabulary. With some additional rules to deal with punctuation, the GPT2\u2019s tokenizer can tokenize every text without the need for the <unk> symbol. `),Bt=l(Pe,"A",{href:!0});var Su=r(Bt);mp=n(Su,"GPT-2"),Su.forEach(s),dp=n(Pe,` has a vocabulary size of 50,257, which corresponds to the 256 bytes base tokens, a special end-of-text token and the symbols learned with 50,000 merges.`),Pe.forEach(s),Mn=u(e),Ot=l(e,"A",{id:!0}),r(Ot).forEach(s),Nn=u(e),ue=l(e,"H4",{class:!0});var bo=r(ue);qe=l(bo,"A",{id:!0,class:!0,href:!0});var Cu=r(qe);ka=l(Cu,"SPAN",{});var Bu=r(ka);g(at.$$.fragment,Bu),Bu.forEach(s),Cu.forEach(s),fp=u(bo),Ea=l(bo,"SPAN",{});var Ou=r(Ea);gp=n(Ou,"WordPiece"),Ou.forEach(s),bo.forEach(s),Wn=u(e),O=l(e,"P",{});var oe=r(O);bp=n(oe,"WordPiece is the subword tokenization algorithm used for "),Mt=l(oe,"A",{href:!0});var Mu=r(Mt);vp=n(Mu,"BERT"),Mu.forEach(s),yp=n(oe,", "),Nt=l(oe,"A",{href:!0});var Nu=r(Nt);wp=n(Nu,"DistilBERT"),Nu.forEach(s),kp=n(oe,", and "),Wt=l(oe,"A",{href:!0});var Wu=r(Wt);Ep=n(Wu,"Electra"),Wu.forEach(s),_p=n(oe,". The algorithm was outlined in "),nt=l(oe,"A",{href:!0,rel:!0});var Iu=r(nt);zp=n(Iu,`Japanese and Korean Voice Search (Schuster et al., 2012)`),Iu.forEach(s),qp=n(oe,` and is very similar to BPE. WordPiece first initializes the vocabulary to include every character present in the training data and progressively learns a given number of merge rules. In contrast to BPE, WordPiece does not choose the most frequent symbol pair, but the one that maximizes the likelihood of the training data once added to the vocabulary.`),oe.forEach(s),In=u(e),_=l(e,"P",{});var x=r(_);jp=n(x,`So what does this mean exactly? Referring to the previous example, maximizing the likelihood of the training data is equivalent to finding the symbol pair, whose probability divided by the probabilities of its first symbol followed by its second symbol is the greatest among all symbol pairs. `),_a=l(x,"EM",{});var Lu=r(_a);$p=n(Lu,"E.g."),Lu.forEach(s),xp=u(x),za=l(x,"CODE",{});var Uu=r(za);Pp=n(Uu,'"u"'),Uu.forEach(s),Tp=n(x,", followed by "),qa=l(x,"CODE",{});var Hu=r(qa);Ap=n(Hu,'"g"'),Hu.forEach(s),Dp=n(x,` would have only been merged if the probability of `),ja=l(x,"CODE",{});var Ru=r(ja);Sp=n(Ru,'"ug"'),Ru.forEach(s),Cp=n(x," divided by "),$a=l(x,"CODE",{});var Xu=r($a);Bp=n(Xu,'"u"'),Xu.forEach(s),Op=n(x,", "),xa=l(x,"CODE",{});var Fu=r(xa);Mp=n(Fu,'"g"'),Fu.forEach(s),Np=n(x,` would have been greater than for any other symbol pair. Intuitively, WordPiece is slightly different to BPE in that it evaluates what it `),Pa=l(x,"EM",{});var Gu=r(Pa);Wp=n(Gu,"loses"),Gu.forEach(s),Ip=n(x,` by merging two symbols to ensure it\u2019s `),Ta=l(x,"EM",{});var Ku=r(Ta);Lp=n(Ku,"worth it"),Ku.forEach(s),Up=n(x,"."),x.forEach(s),Ln=u(e),It=l(e,"A",{id:!0}),r(It).forEach(s),Un=u(e),me=l(e,"H4",{class:!0});var vo=r(me);je=l(vo,"A",{id:!0,class:!0,href:!0});var Vu=r(je);Aa=l(Vu,"SPAN",{});var Ju=r(Aa);g(ot.$$.fragment,Ju),Ju.forEach(s),Vu.forEach(s),Hp=u(vo),Da=l(vo,"SPAN",{});var Yu=r(Da);Rp=n(Yu,"Unigram"),Yu.forEach(s),vo.forEach(s),Hn=u(e),ee=l(e,"P",{});var es=r(ee);Xp=n(es,"Unigram is a subword tokenization algorithm introduced in "),lt=l(es,"A",{href:!0,rel:!0});var Qu=r(lt);Fp=n(Qu,`Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates (Kudo, 2018)`),Qu.forEach(s),Gp=n(es,`. In contrast to BPE or WordPiece, Unigram initializes its base vocabulary to a large number of symbols and progressively trims down each symbol to obtain a smaller vocabulary. The base vocabulary could for instance correspond to all pre-tokenized words and the most common substrings. Unigram is not used directly for any of the models in the transformers, but it\u2019s used in conjunction with `),Lt=l(es,"A",{href:!0});var Zu=r(Lt);Kp=n(Zu,"SentencePiece"),Zu.forEach(s),Vp=n(es,"."),es.forEach(s),Rn=u(e),$e=l(e,"P",{});var yo=r($e);Jp=n(yo,`At each training step, the Unigram algorithm defines a loss (often defined as the log-likelihood) over the training data given the current vocabulary and a unigram language model. Then, for each symbol in the vocabulary, the algorithm computes how much the overall loss would increase if the symbol was to be removed from the vocabulary. Unigram then removes p (with p usually being 10% or 20%) percent of the symbols whose loss increase is the lowest, `),Sa=l(yo,"EM",{});var em=r(Sa);Yp=n(em,"i.e."),em.forEach(s),Qp=n(yo,` those symbols that least affect the overall loss over the training data. This process is repeated until the vocabulary has reached the desired size. The Unigram algorithm always keeps the base characters so that any word can be tokenized.`),yo.forEach(s),Xn=u(e),Ut=l(e,"P",{});var tm=r(Ut);Zp=n(tm,`Because Unigram is not based on merge rules (in contrast to BPE and WordPiece), the algorithm has several ways of tokenizing new text after training. As an example, if a trained Unigram tokenizer exhibits the vocabulary:`),tm.forEach(s),Fn=u(e),g(rt.$$.fragment,e),Gn=u(e),I=l(e,"P",{});var ge=r(I);Ca=l(ge,"CODE",{});var sm=r(Ca);eh=n(sm,'"hugs"'),sm.forEach(s),th=n(ge," could be tokenized both as "),Ba=l(ge,"CODE",{});var am=r(Ba);sh=n(am,'["hug", "s"]'),am.forEach(s),ah=n(ge,", "),Oa=l(ge,"CODE",{});var nm=r(Oa);nh=n(nm,'["h", "ug", "s"]'),nm.forEach(s),oh=n(ge," or "),Ma=l(ge,"CODE",{});var om=r(Ma);lh=n(om,'["h", "u", "g", "s"]'),om.forEach(s),rh=n(ge,`. So which one to choose? Unigram saves the probability of each token in the training corpus on top of saving the vocabulary so that the probability of each possible tokenization can be computed after training. The algorithm simply picks the most likely tokenization in practice, but also offers the possibility to sample a possible tokenization according to their probabilities.`),ge.forEach(s),Kn=u(e),L=l(e,"P",{});var be=r(L);ih=n(be,`Those probabilities are defined by the loss the tokenizer is trained on. Assuming that the training data consists of the words `),Vn=Eo(be),Jn=n(be," and that the set of all possible tokenizations for a word "),Yn=Eo(be),Qn=n(be,` is defined as `),Zn=Eo(be),eo=n(be,`, then the overall loss is defined as `),to=Eo(be),be.forEach(s),so=u(e),Ht=l(e,"A",{id:!0}),r(Ht).forEach(s),ao=u(e),de=l(e,"H4",{class:!0});var wo=r(de);xe=l(wo,"A",{id:!0,class:!0,href:!0});var lm=r(xe);Na=l(lm,"SPAN",{});var rm=r(Na);g(it.$$.fragment,rm),rm.forEach(s),lm.forEach(s),ph=u(wo),Wa=l(wo,"SPAN",{});var im=r(Wa);hh=n(im,"SentencePiece"),im.forEach(s),wo.forEach(s),no=u(e),R=l(e,"P",{});var Te=r(R);ch=n(Te,`All tokenization algorithms described so far have the same problem: It is assumed that the input text uses spaces to separate words. However, not all languages use spaces to separate words. One possible solution is to use language specific pre-tokenizers, `),Ia=l(Te,"EM",{});var pm=r(Ia);uh=n(pm,"e.g."),pm.forEach(s),mh=u(Te),Rt=l(Te,"A",{href:!0});var hm=r(Rt);dh=n(hm,"XLM"),hm.forEach(s),fh=n(Te,` uses a specific Chinese, Japanese, and Thai pre-tokenizer). To solve this problem more generally, `),pt=l(Te,"A",{href:!0,rel:!0});var cm=r(pt);gh=n(cm,`SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing (Kudo et al., 2018)`),cm.forEach(s),bh=n(Te,` treats the input as a raw input stream, thus including the space in the set of characters to use. It then uses the BPE or unigram algorithm to construct the appropriate vocabulary.`),Te.forEach(s),oo=u(e),X=l(e,"P",{});var Ae=r(X);vh=n(Ae,"The "),Xt=l(Ae,"A",{href:!0});var um=r(Xt);yh=n(um,"XLNetTokenizer"),um.forEach(s),wh=n(Ae,` uses SentencePiece for example, which is also why in the example earlier the `),La=l(Ae,"CODE",{});var mm=r(La);kh=n(mm,'"\u2581"'),mm.forEach(s),Eh=n(Ae,` character was included in the vocabulary. Decoding with SentencePiece is very easy since all tokens can just be concatenated and `),Ua=l(Ae,"CODE",{});var dm=r(Ua);_h=n(dm,'"\u2581"'),dm.forEach(s),zh=n(Ae," is replaced by a space."),Ae.forEach(s),lo=u(e),M=l(e,"P",{});var le=r(M);qh=n(le,`All transformers models in the library that use SentencePiece use it in combination with unigram. Examples of models using SentencePiece are `),Ft=l(le,"A",{href:!0});var fm=r(Ft);jh=n(fm,"ALBERT"),fm.forEach(s),$h=n(le,", "),Gt=l(le,"A",{href:!0});var gm=r(Gt);xh=n(gm,"XLNet"),gm.forEach(s),Ph=n(le,", "),Kt=l(le,"A",{href:!0});var bm=r(Kt);Th=n(bm,"Marian"),bm.forEach(s),Ah=n(le,", and "),Vt=l(le,"A",{href:!0});var vm=r(Vt);Dh=n(vm,"T5"),vm.forEach(s),Sh=n(le,"."),le.forEach(s),this.h()},h(){h(V,"name","hf:doc:metadata"),h(V,"content",JSON.stringify(Tm)),h(K,"id","summary-of-the-tokenizers"),h(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(K,"href","#summary-of-the-tokenizers"),h(U,"class","relative group"),h(ut,"href","preprocessing"),h(mt,"href","#byte-pair-encoding"),h(dt,"href","#wordpiece"),h(ft,"href","#sentencepiece"),h(gt,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),h(bt,"href","#wordpiece"),h(ve,"id","introduction"),h(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ve,"href","#introduction"),h(ie,"class","relative group"),h(Le,"href","https://spacy.io/"),h(Le,"rel","nofollow"),h(Ue,"href","http://www.statmt.org/moses/?n=Development.GetStarted"),h(Ue,"rel","nofollow"),h(yt,"href","model_doc/transformerxl"),h(ye,"id","subword-tokenization"),h(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ye,"href","#subword-tokenization"),h(pe,"class","relative group"),h(Et,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),h(_t,"href","/docs/transformers/pr_16143/en/model_doc/xlnet#transformers.XLNetTokenizer"),h(zt,"href","#sentencepiece"),h(jt,"id","byte-pair-encoding"),h(ke,"id","bytepair-encoding-bpe"),h(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ke,"href","#bytepair-encoding-bpe"),h(he,"class","relative group"),h(Je,"href","https://arxiv.org/abs/1508.07909"),h(Je,"rel","nofollow"),h($t,"href","model_doc/gpt2"),h(xt,"href","model_doc/roberta"),h(Pt,"href","model_doc/xlm"),h(Tt,"href","model_doc/flaubert"),h(At,"href","model_doc/gpt"),h(Ct,"href","model_doc/gpt"),h(ze,"id","bytelevel-bpe"),h(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ze,"href","#bytelevel-bpe"),h(ce,"class","relative group"),h(st,"href","https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf"),h(st,"rel","nofollow"),h(Bt,"href","model_doc/gpt"),h(Ot,"id","wordpiece"),h(qe,"id","wordpiece"),h(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(qe,"href","#wordpiece"),h(ue,"class","relative group"),h(Mt,"href","model_doc/bert"),h(Nt,"href","model_doc/distilbert"),h(Wt,"href","model_doc/electra"),h(nt,"href","https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf"),h(nt,"rel","nofollow"),h(It,"id","unigram"),h(je,"id","unigram"),h(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(je,"href","#unigram"),h(me,"class","relative group"),h(lt,"href","https://arxiv.org/pdf/1804.10959.pdf"),h(lt,"rel","nofollow"),h(Lt,"href","#sentencepiece"),Vn.a=Jn,Yn.a=Qn,Zn.a=eo,to.a=null,h(Ht,"id","sentencepiece"),h(xe,"id","sentencepiece"),h(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(xe,"href","#sentencepiece"),h(de,"class","relative group"),h(Rt,"href","model_doc/xlm"),h(pt,"href","https://arxiv.org/pdf/1808.06226.pdf"),h(pt,"rel","nofollow"),h(Xt,"href","/docs/transformers/pr_16143/en/model_doc/xlnet#transformers.XLNetTokenizer"),h(Ft,"href","model_doc/albert"),h(Gt,"href","model_doc/xlnet"),h(Kt,"href","model_doc/marian"),h(Vt,"href","model_doc/t5")},m(e,i){t(document.head,V),p(e,ht,i),p(e,U,i),t(U,K),t(K,ts),b(Se,ts,null),t(U,qo),t(U,ss),t(ss,jo),p(e,Ha,i),b(Ce,e,i),p(e,Ra,i),p(e,ct,i),t(ct,$o),p(e,Xa,i),b(Be,e,i),p(e,Fa,i),p(e,D,i),t(D,xo),t(D,ut),t(ut,Po),t(D,To),t(D,mt),t(mt,Ao),t(D,Do),t(D,dt),t(dt,So),t(D,Co),t(D,ft),t(ft,Bo),t(D,Oo),p(e,Ga,i),p(e,J,i),t(J,Mo),t(J,gt),t(gt,No),t(J,Wo),t(J,bt),t(bt,Io),t(J,Lo),p(e,Ka,i),p(e,ie,i),t(ie,ve),t(ve,as),b(Oe,as,null),t(ie,Uo),t(ie,ns),t(ns,Ho),p(e,Va,i),p(e,Me,i),t(Me,Ro),t(Me,os),t(os,Xo),p(e,Ja,i),b(Ne,e,i),p(e,Ya,i),p(e,vt,i),t(vt,Fo),p(e,Qa,i),b(We,e,i),p(e,Za,i),p(e,S,i),t(S,Go),t(S,ls),t(ls,Ko),t(S,Vo),t(S,rs),t(rs,Jo),t(S,Yo),t(S,is),t(is,Qo),t(S,Zo),t(S,ps),t(ps,el),t(S,tl),p(e,en,i),b(Ie,e,i),p(e,tn,i),p(e,C,i),t(C,sl),t(C,hs),t(hs,al),t(C,nl),t(C,cs),t(cs,ol),t(C,ll),t(C,us),t(us,rl),t(C,il),t(C,ms),t(ms,pl),t(C,hl),p(e,sn,i),p(e,W,i),t(W,Le),t(Le,cl),t(W,ul),t(W,Ue),t(Ue,ml),t(W,dl),t(W,ds),t(ds,fl),t(W,gl),t(W,fs),t(fs,bl),t(W,vl),p(e,an,i),b(He,e,i),p(e,nn,i),p(e,Y,i),t(Y,yl),t(Y,gs),t(gs,wl),t(Y,kl),t(Y,yt),t(yt,El),t(Y,_l),p(e,on,i),p(e,wt,i),t(wt,zl),p(e,ln,i),p(e,kt,i),t(kt,ql),p(e,rn,i),b(Re,e,i),p(e,pn,i),p(e,B,i),t(B,jl),t(B,bs),t(bs,$l),t(B,xl),t(B,vs),t(vs,Pl),t(B,Tl),t(B,ys),t(ys,Al),t(B,Dl),t(B,ws),t(ws,Sl),t(B,Cl),p(e,hn,i),p(e,pe,i),t(pe,ye),t(ye,ks),b(Xe,ks,null),t(pe,Bl),t(pe,Es),t(Es,Ol),p(e,cn,i),b(Fe,e,i),p(e,un,i),p(e,k,i),t(k,Ml),t(k,_s),t(_s,Nl),t(k,Wl),t(k,zs),t(zs,Il),t(k,Ll),t(k,qs),t(qs,Ul),t(k,Hl),t(k,js),t(js,Rl),t(k,Xl),t(k,$s),t($s,Fl),t(k,Gl),t(k,xs),t(xs,Kl),t(k,Vl),t(k,Ps),t(Ps,Jl),t(k,Yl),t(k,Ts),t(Ts,Ql),t(k,Zl),p(e,mn,i),p(e,Q,i),t(Q,er),t(Q,Et),t(Et,tr),t(Q,sr),t(Q,As),t(As,ar),t(Q,nr),p(e,dn,i),b(Ge,e,i),p(e,fn,i),p(e,P,i),t(P,or),t(P,Ds),t(Ds,lr),t(P,rr),t(P,Ss),t(Ss,ir),t(P,pr),t(P,Cs),t(Cs,hr),t(P,cr),t(P,Bs),t(Bs,ur),t(P,mr),t(P,Os),t(Os,dr),t(P,fr),p(e,gn,i),p(e,we,i),t(we,gr),t(we,_t),t(_t,br),t(we,vr),p(e,bn,i),b(Ke,e,i),p(e,vn,i),p(e,T,i),t(T,yr),t(T,Ms),t(Ms,wr),t(T,kr),t(T,zt),t(zt,Er),t(T,_r),t(T,Ns),t(Ns,zr),t(T,qr),t(T,Ws),t(Ws,jr),t(T,$r),t(T,Is),t(Is,xr),t(T,Pr),p(e,yn,i),p(e,qt,i),t(qt,Tr),p(e,wn,i),p(e,jt,i),p(e,kn,i),p(e,he,i),t(he,ke),t(ke,Ls),b(Ve,Ls,null),t(he,Ar),t(he,Us),t(Us,Dr),p(e,En,i),p(e,q,i),t(q,Sr),t(q,Je),t(Je,Cr),t(q,Br),t(q,$t),t($t,Or),t(q,Mr),t(q,xt),t(xt,Nr),t(q,Wr),t(q,Pt),t(Pt,Ir),t(q,Lr),t(q,Tt),t(Tt,Ur),t(q,Hr),t(q,At),t(At,Rr),t(q,Xr),p(e,_n,i),p(e,Dt,i),t(Dt,Fr),p(e,zn,i),p(e,St,i),t(St,Gr),p(e,qn,i),b(Ye,e,i),p(e,jn,i),p(e,Ee,i),t(Ee,Kr),t(Ee,Hs),t(Hs,Vr),t(Ee,Jr),p(e,$n,i),b(Qe,e,i),p(e,xn,i),p(e,m,i),t(m,Yr),t(m,Rs),t(Rs,Qr),t(m,Zr),t(m,Xs),t(Xs,ei),t(m,ti),t(m,Fs),t(Fs,si),t(m,ai),t(m,Gs),t(Gs,ni),t(m,oi),t(m,Ks),t(Ks,li),t(m,ri),t(m,Vs),t(Vs,ii),t(m,pi),t(m,Js),t(Js,hi),t(m,ci),t(m,Ys),t(Ys,ui),t(m,mi),t(m,Qs),t(Qs,di),t(m,fi),t(m,Zs),t(Zs,gi),t(m,bi),t(m,ea),t(ea,vi),t(m,yi),p(e,Pn,i),b(Ze,e,i),p(e,Tn,i),p(e,E,i),t(E,wi),t(E,ta),t(ta,ki),t(E,Ei),t(E,sa),t(sa,_i),t(E,zi),t(E,aa),t(aa,qi),t(E,ji),t(E,na),t(na,$i),t(E,xi),t(E,oa),t(oa,Pi),t(E,Ti),t(E,la),t(la,Ai),t(E,Di),t(E,ra),t(ra,Si),t(E,Ci),t(E,ia),t(ia,Bi),t(E,Oi),p(e,An,i),p(e,_e,i),t(_e,Mi),t(_e,pa),t(pa,Ni),t(_e,Wi),p(e,Dn,i),b(et,e,i),p(e,Sn,i),p(e,z,i),t(z,Ii),t(z,ha),t(ha,Li),t(z,Ui),t(z,ca),t(ca,Hi),t(z,Ri),t(z,ua),t(ua,Xi),t(z,Fi),t(z,ma),t(ma,Gi),t(z,Ki),t(z,da),t(da,Vi),t(z,Ji),t(z,fa),t(fa,Yi),t(z,Qi),t(z,ga),t(ga,Zi),t(z,ep),p(e,Cn,i),p(e,Z,i),t(Z,tp),t(Z,ba),t(ba,sp),t(Z,ap),t(Z,Ct),t(Ct,np),t(Z,op),p(e,Bn,i),p(e,ce,i),t(ce,ze),t(ze,va),b(tt,va,null),t(ce,lp),t(ce,ya),t(ya,rp),p(e,On,i),p(e,H,i),t(H,ip),t(H,wa),t(wa,pp),t(H,hp),t(H,st),t(st,cp),t(H,up),t(H,Bt),t(Bt,mp),t(H,dp),p(e,Mn,i),p(e,Ot,i),p(e,Nn,i),p(e,ue,i),t(ue,qe),t(qe,ka),b(at,ka,null),t(ue,fp),t(ue,Ea),t(Ea,gp),p(e,Wn,i),p(e,O,i),t(O,bp),t(O,Mt),t(Mt,vp),t(O,yp),t(O,Nt),t(Nt,wp),t(O,kp),t(O,Wt),t(Wt,Ep),t(O,_p),t(O,nt),t(nt,zp),t(O,qp),p(e,In,i),p(e,_,i),t(_,jp),t(_,_a),t(_a,$p),t(_,xp),t(_,za),t(za,Pp),t(_,Tp),t(_,qa),t(qa,Ap),t(_,Dp),t(_,ja),t(ja,Sp),t(_,Cp),t(_,$a),t($a,Bp),t(_,Op),t(_,xa),t(xa,Mp),t(_,Np),t(_,Pa),t(Pa,Wp),t(_,Ip),t(_,Ta),t(Ta,Lp),t(_,Up),p(e,Ln,i),p(e,It,i),p(e,Un,i),p(e,me,i),t(me,je),t(je,Aa),b(ot,Aa,null),t(me,Hp),t(me,Da),t(Da,Rp),p(e,Hn,i),p(e,ee,i),t(ee,Xp),t(ee,lt),t(lt,Fp),t(ee,Gp),t(ee,Lt),t(Lt,Kp),t(ee,Vp),p(e,Rn,i),p(e,$e,i),t($e,Jp),t($e,Sa),t(Sa,Yp),t($e,Qp),p(e,Xn,i),p(e,Ut,i),t(Ut,Zp),p(e,Fn,i),b(rt,e,i),p(e,Gn,i),p(e,I,i),t(I,Ca),t(Ca,eh),t(I,th),t(I,Ba),t(Ba,sh),t(I,ah),t(I,Oa),t(Oa,nh),t(I,oh),t(I,Ma),t(Ma,lh),t(I,rh),p(e,Kn,i),p(e,L,i),t(L,ih),Vn.m(ym,L),t(L,Jn),Yn.m(wm,L),t(L,Qn),Zn.m(km,L),t(L,eo),to.m(Em,L),p(e,so,i),p(e,Ht,i),p(e,ao,i),p(e,de,i),t(de,xe),t(xe,Na),b(it,Na,null),t(de,ph),t(de,Wa),t(Wa,hh),p(e,no,i),p(e,R,i),t(R,ch),t(R,Ia),t(Ia,uh),t(R,mh),t(R,Rt),t(Rt,dh),t(R,fh),t(R,pt),t(pt,gh),t(R,bh),p(e,oo,i),p(e,X,i),t(X,vh),t(X,Xt),t(Xt,yh),t(X,wh),t(X,La),t(La,kh),t(X,Eh),t(X,Ua),t(Ua,_h),t(X,zh),p(e,lo,i),p(e,M,i),t(M,qh),t(M,Ft),t(Ft,jh),t(M,$h),t(M,Gt),t(Gt,xh),t(M,Ph),t(M,Kt),t(Kt,Th),t(M,Ah),t(M,Vt),t(Vt,Dh),t(M,Sh),ro=!0},p:$m,i(e){ro||(v(Se.$$.fragment,e),v(Ce.$$.fragment,e),v(Be.$$.fragment,e),v(Oe.$$.fragment,e),v(Ne.$$.fragment,e),v(We.$$.fragment,e),v(Ie.$$.fragment,e),v(He.$$.fragment,e),v(Re.$$.fragment,e),v(Xe.$$.fragment,e),v(Fe.$$.fragment,e),v(Ge.$$.fragment,e),v(Ke.$$.fragment,e),v(Ve.$$.fragment,e),v(Ye.$$.fragment,e),v(Qe.$$.fragment,e),v(Ze.$$.fragment,e),v(et.$$.fragment,e),v(tt.$$.fragment,e),v(at.$$.fragment,e),v(ot.$$.fragment,e),v(rt.$$.fragment,e),v(it.$$.fragment,e),ro=!0)},o(e){y(Se.$$.fragment,e),y(Ce.$$.fragment,e),y(Be.$$.fragment,e),y(Oe.$$.fragment,e),y(Ne.$$.fragment,e),y(We.$$.fragment,e),y(Ie.$$.fragment,e),y(He.$$.fragment,e),y(Re.$$.fragment,e),y(Xe.$$.fragment,e),y(Fe.$$.fragment,e),y(Ge.$$.fragment,e),y(Ke.$$.fragment,e),y(Ve.$$.fragment,e),y(Ye.$$.fragment,e),y(Qe.$$.fragment,e),y(Ze.$$.fragment,e),y(et.$$.fragment,e),y(tt.$$.fragment,e),y(at.$$.fragment,e),y(ot.$$.fragment,e),y(rt.$$.fragment,e),y(it.$$.fragment,e),ro=!1},d(e){s(V),e&&s(ht),e&&s(U),w(Se),e&&s(Ha),w(Ce,e),e&&s(Ra),e&&s(ct),e&&s(Xa),w(Be,e),e&&s(Fa),e&&s(D),e&&s(Ga),e&&s(J),e&&s(Ka),e&&s(ie),w(Oe),e&&s(Va),e&&s(Me),e&&s(Ja),w(Ne,e),e&&s(Ya),e&&s(vt),e&&s(Qa),w(We,e),e&&s(Za),e&&s(S),e&&s(en),w(Ie,e),e&&s(tn),e&&s(C),e&&s(sn),e&&s(W),e&&s(an),w(He,e),e&&s(nn),e&&s(Y),e&&s(on),e&&s(wt),e&&s(ln),e&&s(kt),e&&s(rn),w(Re,e),e&&s(pn),e&&s(B),e&&s(hn),e&&s(pe),w(Xe),e&&s(cn),w(Fe,e),e&&s(un),e&&s(k),e&&s(mn),e&&s(Q),e&&s(dn),w(Ge,e),e&&s(fn),e&&s(P),e&&s(gn),e&&s(we),e&&s(bn),w(Ke,e),e&&s(vn),e&&s(T),e&&s(yn),e&&s(qt),e&&s(wn),e&&s(jt),e&&s(kn),e&&s(he),w(Ve),e&&s(En),e&&s(q),e&&s(_n),e&&s(Dt),e&&s(zn),e&&s(St),e&&s(qn),w(Ye,e),e&&s(jn),e&&s(Ee),e&&s($n),w(Qe,e),e&&s(xn),e&&s(m),e&&s(Pn),w(Ze,e),e&&s(Tn),e&&s(E),e&&s(An),e&&s(_e),e&&s(Dn),w(et,e),e&&s(Sn),e&&s(z),e&&s(Cn),e&&s(Z),e&&s(Bn),e&&s(ce),w(tt),e&&s(On),e&&s(H),e&&s(Mn),e&&s(Ot),e&&s(Nn),e&&s(ue),w(at),e&&s(Wn),e&&s(O),e&&s(In),e&&s(_),e&&s(Ln),e&&s(It),e&&s(Un),e&&s(me),w(ot),e&&s(Hn),e&&s(ee),e&&s(Rn),e&&s($e),e&&s(Xn),e&&s(Ut),e&&s(Fn),w(rt,e),e&&s(Gn),e&&s(I),e&&s(Kn),e&&s(L),e&&s(so),e&&s(Ht),e&&s(ao),e&&s(de),w(it),e&&s(no),e&&s(R),e&&s(oo),e&&s(X),e&&s(lo),e&&s(M)}}}const Tm={local:"summary-of-the-tokenizers",sections:[{local:"introduction",sections:[{local:"subword-tokenization",title:"Subword tokenization"}],title:"Introduction"},{local:"bytepair-encoding-bpe",sections:[{local:"bytelevel-bpe",sections:[{local:"wordpiece",title:"WordPiece"},{local:"unigram",title:"Unigram"},{local:"sentencepiece",title:"SentencePiece"}],title:"Byte-level BPE"}],title:"Byte-Pair Encoding (BPE)"}],title:"Summary of the tokenizers"};function Am(zo,V,ht){let{fw:U}=V;return zo.$$set=K=>{"fw"in K&&ht(0,U=K.fw)},[U]}class Nm extends _m{constructor(V){super();zm(this,V,Am,Pm,qm,{fw:0})}}export{Nm as default,Tm as metadata};
263
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/troubleshooting.mdx-e0bea728.js
import{S as $s,i as ys,s as bs,e as s,k as m,w as c,t as n,M as Es,c as a,d as t,m as p,a as l,x as v,h as i,b as h,F as r,g as f,y as g,q as w,o as _,B as $}from"../chunks/vendor-4833417e.js";import{T as ks}from"../chunks/Tip-fffd6df1.js";import{Y as _s}from"../chunks/Youtube-27813aed.js";import{I as Re}from"../chunks/IconCopyLink-4b81c553.js";import{C as M}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function Ts(Ye){let u,T,d,y,j;return{c(){u=s("p"),T=n("Refer to the Performance "),d=s("a"),y=n("guide"),j=n(" for more details about memory-saving techniques."),this.h()},l(b){u=a(b,"P",{});var A=l(u);T=i(A,"Refer to the Performance "),d=a(A,"A",{href:!0});var G=l(d);y=i(G,"guide"),G.forEach(t),j=i(A," for more details about memory-saving techniques."),A.forEach(t),this.h()},h(){h(d,"href","performance")},m(b,A){f(b,u,A),r(u,T),r(u,d),r(d,y),r(u,j)},d(b){b&&t(u)}}}function As(Ye){let u,T,d,y,j,b,A,G,lr,pt,ke,nr,ut,X,dt,Te,E,ir,Z,fr,hr,ee,mr,pr,te,ur,dr,ct,re,vt,P,Je,oe,cr,se,vr,gr,wr,Ke,ae,_r,Ae,$r,yr,gt,L,br,le,Er,kr,wt,C,B,We,ne,Tr,Qe,Ar,_t,je,jr,$t,ie,yt,N,Pr,Pe,Cr,Ur,bt,U,H,Xe,fe,Fr,Ze,Ir,Et,Ce,Sr,kt,he,Tt,Ue,xr,At,O,F,qr,Fe,et,Dr,Mr,Ie,Gr,Lr,Br,I,Nr,Se,tt,Hr,Or,xe,zr,Vr,jt,z,Pt,S,V,rt,me,Rr,ot,Yr,Ct,R,Jr,pe,Kr,Wr,Ut,qe,k,Qr,st,Xr,Zr,ue,at,eo,to,De,ro,oo,Ft,de,It,Me,x,so,lt,ao,lo,Ge,no,io,St,ce,xt,q,Y,nt,ve,fo,it,ho,qt,J,mo,ft,po,uo,Dt,ge,Mt,Le,co,Gt,we,Lt,D,K,ht,_e,vo,mt,go,Bt,Be,wo,Nt,$e,Ht,Ne,_o,Ot,ye,zt,He,$o,Vt,be,Rt;return b=new Re({}),X=new _s({props:{id:"S2EEG3JIt2A"}}),re=new _s({props:{id:"_PAli-V4wj0"}}),ne=new Re({}),ie=new M({props:{code:`ValueError: Connection error, and we cannot find the requested files in the cached path. Please try again or make sure your Internet connection is on.`,highlighted:`ValueError: Connection error, <span class="hljs-built_in">and</span> we cannot <span class="hljs-keyword">find</span> the requested <span class="hljs-keyword">files</span> in the cached path. Please <span class="hljs-keyword">try</span> again <span class="hljs-built_in">or</span> <span class="hljs-keyword">make</span> sure your Internet connection <span class="hljs-keyword">is</span> <span class="hljs-keyword">on</span>.`}}),fe=new Re({}),he=new M({props:{code:"CUDA out of memory. Tried to allocate 256.00 MiB (GPU 0; 11.17 GiB total capacity; 9.70 GiB already allocated; 179.81 MiB free; 9.85 GiB reserved in total by PyTorch)",highlighted:'<span class="hljs-attribute">CUDA</span> out of memory. Tried to allocate <span class="hljs-number">256</span>.<span class="hljs-number">00</span> MiB (GPU <span class="hljs-number">0</span>; <span class="hljs-number">11</span>.<span class="hljs-number">17</span> GiB total capacity; <span class="hljs-number">9</span>.<span class="hljs-number">70</span> GiB already allocated; <span class="hljs-number">179</span>.<span class="hljs-number">81</span> MiB free; <span class="hljs-number">9</span>.<span class="hljs-number">85</span> GiB reserved in total by PyTorch)'}}),z=new ks({props:{$$slots:{default:[Ts]},$$scope:{ctx:Ye}}}),me=new Re({}),de=new M({props:{code:`from transformers import TFPreTrainedModel from tensorflow import keras model.save_weights("some_folder/tf_model.h5") model = TFPreTrainedModel.from_pretrained("some_folder")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFPreTrainedModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tensorflow <span class="hljs-keyword">import</span> keras <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_weights(<span class="hljs-string">&quot;some_folder/tf_model.h5&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFPreTrainedModel.from_pretrained(<span class="hljs-string">&quot;some_folder&quot;</span>)`}}),ce=new M({props:{code:`from transformers import TFPreTrainedModel model.save_pretrained("path_to/model") model = TFPreTrainedModel.from_pretrained("path_to/model")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFPreTrainedModel <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;path_to/model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFPreTrainedModel.from_pretrained(<span class="hljs-string">&quot;path_to/model&quot;</span>)`}}),ve=new Re({}),ge=new M({props:{code:"ImportError: cannot import name 'ImageGPTFeatureExtractor' from 'transformers' (unknown location)",highlighted:'ImportError: cannot <span class="hljs-keyword">import</span> <span class="hljs-type">name</span> <span class="hljs-string">&#x27;ImageGPTFeatureExtractor&#x27;</span> <span class="hljs-keyword">from</span> <span class="hljs-string">&#x27;transformers&#x27;</span> (<span class="hljs-type">unknown</span> <span class="hljs-keyword">location</span>)'}}),we=new M({props:{code:"pip install transformers --upgrade",highlighted:"pip install transformers --upgrade"}}),_e=new Re({}),$e=new M({props:{code:"RuntimeError: CUDA error: device-side assert triggered",highlighted:'RuntimeError: CUDA <span class="hljs-literal">error</span>: device-<span class="hljs-literal">side</span> <span class="hljs-keyword">assert</span> triggered'}}),ye=new M({props:{code:`import os os.environ["CUDA_VISIBLE_DEVICES"] = ""`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> os <span class="hljs-meta">&gt;&gt;&gt; </span>os.environ[<span class="hljs-string">&quot;CUDA_VISIBLE_DEVICES&quot;</span>] = <span class="hljs-string">&quot;&quot;</span>`}}),be=new M({props:{code:`import os os.environ["CUDA_LAUNCH_BLOCKING"] = "1"`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> os <span class="hljs-meta">&gt;&gt;&gt; </span>os.environ[<span class="hljs-string">&quot;CUDA_LAUNCH_BLOCKING&quot;</span>] = <span class="hljs-string">&quot;1&quot;</span>`}}),{c(){u=s("meta"),T=m(),d=s("h1"),y=s("a"),j=s("span"),c(b.$$.fragment),A=m(),G=s("span"),lr=n("Troubleshoot"),pt=m(),ke=s("p"),nr=n("Sometimes errors occur, but we are here to help! This guide covers some of the most common issues we\u2019ve seen and how you can resolve them. However, this guide isn\u2019t meant to be a comprehensive collection of every \u{1F917} Transformers issue. For more help with troubleshooting your issue, try:"),ut=m(),c(X.$$.fragment),dt=m(),Te=s("ol"),E=s("li"),ir=n("Asking for help on the "),Z=s("a"),fr=n("forums"),hr=n(". There are specific categories you can post your question to, like "),ee=s("a"),mr=n("Beginners"),pr=n(" or "),te=s("a"),ur=n("\u{1F917} Transformers"),dr=n(". Make sure you write a good descriptive forum post with some reproducible code to maximize the likelihood that your problem is solved!"),ct=m(),c(re.$$.fragment),vt=m(),P=s("ol"),Je=s("li"),oe=s("p"),cr=n("Create an "),se=s("a"),vr=n("Issue"),gr=n(" on the \u{1F917} Transformers repository if it is a bug related to the library. Try to include as much information describing the bug as possible to help us better figure out what\u2019s wrong and how we can fix it."),wr=m(),Ke=s("li"),ae=s("p"),_r=n("Check the "),Ae=s("a"),$r=n("Migration"),yr=n(" guide if you use an older version of \u{1F917} Transformers since some important changes have been introduced between versions."),gt=m(),L=s("p"),br=n("For more details about troubleshooting and getting help, take a look at "),le=s("a"),Er=n("Chapter 8"),kr=n(" of the Hugging Face course."),wt=m(),C=s("h2"),B=s("a"),We=s("span"),c(ne.$$.fragment),Tr=m(),Qe=s("span"),Ar=n("Firewalled environments"),_t=m(),je=s("p"),jr=n("Some GPU instances on cloud and intranet setups are firewalled to external connections, resulting in a connection error. When your script attempts to download model weights or datasets, the download will hang and then timeout with the following message:"),$t=m(),c(ie.$$.fragment),yt=m(),N=s("p"),Pr=n("In this case, you should try to run \u{1F917} Transformers on "),Pe=s("a"),Cr=n("offline mode"),Ur=n(" to avoid the connection error."),bt=m(),U=s("h2"),H=s("a"),Xe=s("span"),c(fe.$$.fragment),Fr=m(),Ze=s("span"),Ir=n("CUDA out of memory"),Et=m(),Ce=s("p"),Sr=n("Training large models with millions of parameters can be challenging without the appropriate hardware. A common error you may encounter when the GPU runs out of memory is:"),kt=m(),c(he.$$.fragment),Tt=m(),Ue=s("p"),xr=n("Here are some potential solutions you can try to lessen memory use:"),At=m(),O=s("ul"),F=s("li"),qr=n("Reduce the "),Fe=s("a"),et=s("code"),Dr=n("per_device_train_batch_size"),Mr=n(" value in "),Ie=s("a"),Gr=n("TrainingArguments"),Lr=n("."),Br=m(),I=s("li"),Nr=n("Try using "),Se=s("a"),tt=s("code"),Hr=n("gradient_accumulation_steps"),Or=n(" in "),xe=s("a"),zr=n("TrainingArguments"),Vr=n(" to effectively increase overall batch size."),jt=m(),c(z.$$.fragment),Pt=m(),S=s("h2"),V=s("a"),rt=s("span"),c(me.$$.fragment),Rr=m(),ot=s("span"),Yr=n("Unable to load a saved TensorFlow model"),Ct=m(),R=s("p"),Jr=n("TensorFlow\u2019s "),pe=s("a"),Kr=n("model.save"),Wr=n(" method will save the entire model - architecture, weights, training configuration - in a single file. However, when you load the model file again, you may run into an error because \u{1F917} Transformers may not load all the TensorFlow-related objects in the model file. To avoid issues with saving and loading TensorFlow models, we recommend you:"),Ut=m(),qe=s("ul"),k=s("li"),Qr=n("Save the model weights as a "),st=s("code"),Xr=n("h5"),Zr=n(" file extension with "),ue=s("a"),at=s("code"),eo=n("model.save_weights"),to=n(" and then reload the model with "),De=s("a"),ro=n("from_pretrained()"),oo=n(":"),Ft=m(),c(de.$$.fragment),It=m(),Me=s("ul"),x=s("li"),so=n("Save the model with "),lt=s("code"),ao=n("save_pretrained"),lo=n(" and load it again with "),Ge=s("a"),no=n("from_pretrained()"),io=n(":"),St=m(),c(ce.$$.fragment),xt=m(),q=s("h2"),Y=s("a"),nt=s("span"),c(ve.$$.fragment),fo=m(),it=s("span"),ho=n("ImportError"),qt=m(),J=s("p"),mo=n("Another common error you may encounter, especially if it is a newly released model, is "),ft=s("code"),po=n("ImportError"),uo=n(":"),Dt=m(),c(ge.$$.fragment),Mt=m(),Le=s("p"),co=n("For these error types, check to make sure you have the latest version of \u{1F917} Transformers installed to access the most recent models:"),Gt=m(),c(we.$$.fragment),Lt=m(),D=s("h2"),K=s("a"),ht=s("span"),c(_e.$$.fragment),vo=m(),mt=s("span"),go=n("CUDA error: device-side assert triggered"),Bt=m(),Be=s("p"),wo=n("Sometimes you may run into a generic CUDA error about an error in the device code."),Nt=m(),c($e.$$.fragment),Ht=m(),Ne=s("p"),_o=n("You should try to run the code on a CPU first to get a more descriptive error message. Add the following environment variable to the beginning of your code to switch to a CPU:"),Ot=m(),c(ye.$$.fragment),zt=m(),He=s("p"),$o=n("Another option is to get a better traceback from the GPU. Add the following environment variable to the beginning of your code to get the traceback to point to the source of the error:"),Vt=m(),c(be.$$.fragment),this.h()},l(e){const o=Es('[data-svelte="svelte-1phssyn"]',document.head);u=a(o,"META",{name:!0,content:!0}),o.forEach(t),T=p(e),d=a(e,"H1",{class:!0});var Ee=l(d);y=a(Ee,"A",{id:!0,class:!0,href:!0});var yo=l(y);j=a(yo,"SPAN",{});var bo=l(j);v(b.$$.fragment,bo),bo.forEach(t),yo.forEach(t),A=p(Ee),G=a(Ee,"SPAN",{});var Eo=l(G);lr=i(Eo,"Troubleshoot"),Eo.forEach(t),Ee.forEach(t),pt=p(e),ke=a(e,"P",{});var ko=l(ke);nr=i(ko,"Sometimes errors occur, but we are here to help! This guide covers some of the most common issues we\u2019ve seen and how you can resolve them. However, this guide isn\u2019t meant to be a comprehensive collection of every \u{1F917} Transformers issue. For more help with troubleshooting your issue, try:"),ko.forEach(t),ut=p(e),v(X.$$.fragment,e),dt=p(e),Te=a(e,"OL",{});var To=l(Te);E=a(To,"LI",{});var W=l(E);ir=i(W,"Asking for help on the "),Z=a(W,"A",{href:!0,rel:!0});var Ao=l(Z);fr=i(Ao,"forums"),Ao.forEach(t),hr=i(W,". There are specific categories you can post your question to, like "),ee=a(W,"A",{href:!0,rel:!0});var jo=l(ee);mr=i(jo,"Beginners"),jo.forEach(t),pr=i(W," or "),te=a(W,"A",{href:!0,rel:!0});var Po=l(te);ur=i(Po,"\u{1F917} Transformers"),Po.forEach(t),dr=i(W,". Make sure you write a good descriptive forum post with some reproducible code to maximize the likelihood that your problem is solved!"),W.forEach(t),To.forEach(t),ct=p(e),v(re.$$.fragment,e),vt=p(e),P=a(e,"OL",{start:!0});var Yt=l(P);Je=a(Yt,"LI",{});var Co=l(Je);oe=a(Co,"P",{});var Jt=l(oe);cr=i(Jt,"Create an "),se=a(Jt,"A",{href:!0,rel:!0});var Uo=l(se);vr=i(Uo,"Issue"),Uo.forEach(t),gr=i(Jt," on the \u{1F917} Transformers repository if it is a bug related to the library. Try to include as much information describing the bug as possible to help us better figure out what\u2019s wrong and how we can fix it."),Jt.forEach(t),Co.forEach(t),wr=p(Yt),Ke=a(Yt,"LI",{});var Fo=l(Ke);ae=a(Fo,"P",{});var Kt=l(ae);_r=i(Kt,"Check the "),Ae=a(Kt,"A",{href:!0});var Io=l(Ae);$r=i(Io,"Migration"),Io.forEach(t),yr=i(Kt," guide if you use an older version of \u{1F917} Transformers since some important changes have been introduced between versions."),Kt.forEach(t),Fo.forEach(t),Yt.forEach(t),gt=p(e),L=a(e,"P",{});var Wt=l(L);br=i(Wt,"For more details about troubleshooting and getting help, take a look at "),le=a(Wt,"A",{href:!0,rel:!0});var So=l(le);Er=i(So,"Chapter 8"),So.forEach(t),kr=i(Wt," of the Hugging Face course."),Wt.forEach(t),wt=p(e),C=a(e,"H2",{class:!0});var Qt=l(C);B=a(Qt,"A",{id:!0,class:!0,href:!0});var xo=l(B);We=a(xo,"SPAN",{});var qo=l(We);v(ne.$$.fragment,qo),qo.forEach(t),xo.forEach(t),Tr=p(Qt),Qe=a(Qt,"SPAN",{});var Do=l(Qe);Ar=i(Do,"Firewalled environments"),Do.forEach(t),Qt.forEach(t),_t=p(e),je=a(e,"P",{});var Mo=l(je);jr=i(Mo,"Some GPU instances on cloud and intranet setups are firewalled to external connections, resulting in a connection error. When your script attempts to download model weights or datasets, the download will hang and then timeout with the following message:"),Mo.forEach(t),$t=p(e),v(ie.$$.fragment,e),yt=p(e),N=a(e,"P",{});var Xt=l(N);Pr=i(Xt,"In this case, you should try to run \u{1F917} Transformers on "),Pe=a(Xt,"A",{href:!0});var Go=l(Pe);Cr=i(Go,"offline mode"),Go.forEach(t),Ur=i(Xt," to avoid the connection error."),Xt.forEach(t),bt=p(e),U=a(e,"H2",{class:!0});var Zt=l(U);H=a(Zt,"A",{id:!0,class:!0,href:!0});var Lo=l(H);Xe=a(Lo,"SPAN",{});var Bo=l(Xe);v(fe.$$.fragment,Bo),Bo.forEach(t),Lo.forEach(t),Fr=p(Zt),Ze=a(Zt,"SPAN",{});var No=l(Ze);Ir=i(No,"CUDA out of memory"),No.forEach(t),Zt.forEach(t),Et=p(e),Ce=a(e,"P",{});var Ho=l(Ce);Sr=i(Ho,"Training large models with millions of parameters can be challenging without the appropriate hardware. A common error you may encounter when the GPU runs out of memory is:"),Ho.forEach(t),kt=p(e),v(he.$$.fragment,e),Tt=p(e),Ue=a(e,"P",{});var Oo=l(Ue);xr=i(Oo,"Here are some potential solutions you can try to lessen memory use:"),Oo.forEach(t),At=p(e),O=a(e,"UL",{});var er=l(O);F=a(er,"LI",{});var Oe=l(F);qr=i(Oe,"Reduce the "),Fe=a(Oe,"A",{href:!0});var zo=l(Fe);et=a(zo,"CODE",{});var Vo=l(et);Dr=i(Vo,"per_device_train_batch_size"),Vo.forEach(t),zo.forEach(t),Mr=i(Oe," value in "),Ie=a(Oe,"A",{href:!0});var Ro=l(Ie);Gr=i(Ro,"TrainingArguments"),Ro.forEach(t),Lr=i(Oe,"."),Oe.forEach(t),Br=p(er),I=a(er,"LI",{});var ze=l(I);Nr=i(ze,"Try using "),Se=a(ze,"A",{href:!0});var Yo=l(Se);tt=a(Yo,"CODE",{});var Jo=l(tt);Hr=i(Jo,"gradient_accumulation_steps"),Jo.forEach(t),Yo.forEach(t),Or=i(ze," in "),xe=a(ze,"A",{href:!0});var Ko=l(xe);zr=i(Ko,"TrainingArguments"),Ko.forEach(t),Vr=i(ze," to effectively increase overall batch size."),ze.forEach(t),er.forEach(t),jt=p(e),v(z.$$.fragment,e),Pt=p(e),S=a(e,"H2",{class:!0});var tr=l(S);V=a(tr,"A",{id:!0,class:!0,href:!0});var Wo=l(V);rt=a(Wo,"SPAN",{});var Qo=l(rt);v(me.$$.fragment,Qo),Qo.forEach(t),Wo.forEach(t),Rr=p(tr),ot=a(tr,"SPAN",{});var Xo=l(ot);Yr=i(Xo,"Unable to load a saved TensorFlow model"),Xo.forEach(t),tr.forEach(t),Ct=p(e),R=a(e,"P",{});var rr=l(R);Jr=i(rr,"TensorFlow\u2019s "),pe=a(rr,"A",{href:!0,rel:!0});var Zo=l(pe);Kr=i(Zo,"model.save"),Zo.forEach(t),Wr=i(rr," method will save the entire model - architecture, weights, training configuration - in a single file. However, when you load the model file again, you may run into an error because \u{1F917} Transformers may not load all the TensorFlow-related objects in the model file. To avoid issues with saving and loading TensorFlow models, we recommend you:"),rr.forEach(t),Ut=p(e),qe=a(e,"UL",{});var es=l(qe);k=a(es,"LI",{});var Q=l(k);Qr=i(Q,"Save the model weights as a "),st=a(Q,"CODE",{});var ts=l(st);Xr=i(ts,"h5"),ts.forEach(t),Zr=i(Q," file extension with "),ue=a(Q,"A",{href:!0,rel:!0});var rs=l(ue);at=a(rs,"CODE",{});var os=l(at);eo=i(os,"model.save_weights"),os.forEach(t),rs.forEach(t),to=i(Q," and then reload the model with "),De=a(Q,"A",{href:!0});var ss=l(De);ro=i(ss,"from_pretrained()"),ss.forEach(t),oo=i(Q,":"),Q.forEach(t),es.forEach(t),Ft=p(e),v(de.$$.fragment,e),It=p(e),Me=a(e,"UL",{});var as=l(Me);x=a(as,"LI",{});var Ve=l(x);so=i(Ve,"Save the model with "),lt=a(Ve,"CODE",{});var ls=l(lt);ao=i(ls,"save_pretrained"),ls.forEach(t),lo=i(Ve," and load it again with "),Ge=a(Ve,"A",{href:!0});var ns=l(Ge);no=i(ns,"from_pretrained()"),ns.forEach(t),io=i(Ve,":"),Ve.forEach(t),as.forEach(t),St=p(e),v(ce.$$.fragment,e),xt=p(e),q=a(e,"H2",{class:!0});var or=l(q);Y=a(or,"A",{id:!0,class:!0,href:!0});var is=l(Y);nt=a(is,"SPAN",{});var fs=l(nt);v(ve.$$.fragment,fs),fs.forEach(t),is.forEach(t),fo=p(or),it=a(or,"SPAN",{});var hs=l(it);ho=i(hs,"ImportError"),hs.forEach(t),or.forEach(t),qt=p(e),J=a(e,"P",{});var sr=l(J);mo=i(sr,"Another common error you may encounter, especially if it is a newly released model, is "),ft=a(sr,"CODE",{});var ms=l(ft);po=i(ms,"ImportError"),ms.forEach(t),uo=i(sr,":"),sr.forEach(t),Dt=p(e),v(ge.$$.fragment,e),Mt=p(e),Le=a(e,"P",{});var ps=l(Le);co=i(ps,"For these error types, check to make sure you have the latest version of \u{1F917} Transformers installed to access the most recent models:"),ps.forEach(t),Gt=p(e),v(we.$$.fragment,e),Lt=p(e),D=a(e,"H2",{class:!0});var ar=l(D);K=a(ar,"A",{id:!0,class:!0,href:!0});var us=l(K);ht=a(us,"SPAN",{});var ds=l(ht);v(_e.$$.fragment,ds),ds.forEach(t),us.forEach(t),vo=p(ar),mt=a(ar,"SPAN",{});var cs=l(mt);go=i(cs,"CUDA error: device-side assert triggered"),cs.forEach(t),ar.forEach(t),Bt=p(e),Be=a(e,"P",{});var vs=l(Be);wo=i(vs,"Sometimes you may run into a generic CUDA error about an error in the device code."),vs.forEach(t),Nt=p(e),v($e.$$.fragment,e),Ht=p(e),Ne=a(e,"P",{});var gs=l(Ne);_o=i(gs,"You should try to run the code on a CPU first to get a more descriptive error message. Add the following environment variable to the beginning of your code to switch to a CPU:"),gs.forEach(t),Ot=p(e),v(ye.$$.fragment,e),zt=p(e),He=a(e,"P",{});var ws=l(He);$o=i(ws,"Another option is to get a better traceback from the GPU. Add the following environment variable to the beginning of your code to get the traceback to point to the source of the error:"),ws.forEach(t),Vt=p(e),v(be.$$.fragment,e),this.h()},h(){h(u,"name","hf:doc:metadata"),h(u,"content",JSON.stringify(js)),h(y,"id","troubleshoot"),h(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(y,"href","#troubleshoot"),h(d,"class","relative group"),h(Z,"href","https://discuss.huggingface.co/"),h(Z,"rel","nofollow"),h(ee,"href","https://discuss.huggingface.co/c/beginners/5"),h(ee,"rel","nofollow"),h(te,"href","https://discuss.huggingface.co/c/transformers/9"),h(te,"rel","nofollow"),h(se,"href","https://github.com/huggingface/transformers/issues/new/choose"),h(se,"rel","nofollow"),h(Ae,"href","migration"),h(P,"start","2"),h(le,"href","https://huggingface.co/course/chapter8/1?fw=pt"),h(le,"rel","nofollow"),h(B,"id","firewalled-environments"),h(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(B,"href","#firewalled-environments"),h(C,"class","relative group"),h(Pe,"href","installation#offline-mode"),h(H,"id","cuda-out-of-memory"),h(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(H,"href","#cuda-out-of-memory"),h(U,"class","relative group"),h(Fe,"href","main_classes/trainer#transformers.TrainingArguments.per_device_train_batch_size"),h(Ie,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),h(Se,"href","main_classes/trainer#transformers.TrainingArguments.gradient_accumulation_steps"),h(xe,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),h(V,"id","unable-to-load-a-saved-tensorflow-model"),h(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(V,"href","#unable-to-load-a-saved-tensorflow-model"),h(S,"class","relative group"),h(pe,"href","https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model"),h(pe,"rel","nofollow"),h(ue,"href","https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model"),h(ue,"rel","nofollow"),h(De,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained"),h(Ge,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained"),h(Y,"id","importerror"),h(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Y,"href","#importerror"),h(q,"class","relative group"),h(K,"id","cuda-error-deviceside-assert-triggered"),h(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(K,"href","#cuda-error-deviceside-assert-triggered"),h(D,"class","relative group")},m(e,o){r(document.head,u),f(e,T,o),f(e,d,o),r(d,y),r(y,j),g(b,j,null),r(d,A),r(d,G),r(G,lr),f(e,pt,o),f(e,ke,o),r(ke,nr),f(e,ut,o),g(X,e,o),f(e,dt,o),f(e,Te,o),r(Te,E),r(E,ir),r(E,Z),r(Z,fr),r(E,hr),r(E,ee),r(ee,mr),r(E,pr),r(E,te),r(te,ur),r(E,dr),f(e,ct,o),g(re,e,o),f(e,vt,o),f(e,P,o),r(P,Je),r(Je,oe),r(oe,cr),r(oe,se),r(se,vr),r(oe,gr),r(P,wr),r(P,Ke),r(Ke,ae),r(ae,_r),r(ae,Ae),r(Ae,$r),r(ae,yr),f(e,gt,o),f(e,L,o),r(L,br),r(L,le),r(le,Er),r(L,kr),f(e,wt,o),f(e,C,o),r(C,B),r(B,We),g(ne,We,null),r(C,Tr),r(C,Qe),r(Qe,Ar),f(e,_t,o),f(e,je,o),r(je,jr),f(e,$t,o),g(ie,e,o),f(e,yt,o),f(e,N,o),r(N,Pr),r(N,Pe),r(Pe,Cr),r(N,Ur),f(e,bt,o),f(e,U,o),r(U,H),r(H,Xe),g(fe,Xe,null),r(U,Fr),r(U,Ze),r(Ze,Ir),f(e,Et,o),f(e,Ce,o),r(Ce,Sr),f(e,kt,o),g(he,e,o),f(e,Tt,o),f(e,Ue,o),r(Ue,xr),f(e,At,o),f(e,O,o),r(O,F),r(F,qr),r(F,Fe),r(Fe,et),r(et,Dr),r(F,Mr),r(F,Ie),r(Ie,Gr),r(F,Lr),r(O,Br),r(O,I),r(I,Nr),r(I,Se),r(Se,tt),r(tt,Hr),r(I,Or),r(I,xe),r(xe,zr),r(I,Vr),f(e,jt,o),g(z,e,o),f(e,Pt,o),f(e,S,o),r(S,V),r(V,rt),g(me,rt,null),r(S,Rr),r(S,ot),r(ot,Yr),f(e,Ct,o),f(e,R,o),r(R,Jr),r(R,pe),r(pe,Kr),r(R,Wr),f(e,Ut,o),f(e,qe,o),r(qe,k),r(k,Qr),r(k,st),r(st,Xr),r(k,Zr),r(k,ue),r(ue,at),r(at,eo),r(k,to),r(k,De),r(De,ro),r(k,oo),f(e,Ft,o),g(de,e,o),f(e,It,o),f(e,Me,o),r(Me,x),r(x,so),r(x,lt),r(lt,ao),r(x,lo),r(x,Ge),r(Ge,no),r(x,io),f(e,St,o),g(ce,e,o),f(e,xt,o),f(e,q,o),r(q,Y),r(Y,nt),g(ve,nt,null),r(q,fo),r(q,it),r(it,ho),f(e,qt,o),f(e,J,o),r(J,mo),r(J,ft),r(ft,po),r(J,uo),f(e,Dt,o),g(ge,e,o),f(e,Mt,o),f(e,Le,o),r(Le,co),f(e,Gt,o),g(we,e,o),f(e,Lt,o),f(e,D,o),r(D,K),r(K,ht),g(_e,ht,null),r(D,vo),r(D,mt),r(mt,go),f(e,Bt,o),f(e,Be,o),r(Be,wo),f(e,Nt,o),g($e,e,o),f(e,Ht,o),f(e,Ne,o),r(Ne,_o),f(e,Ot,o),g(ye,e,o),f(e,zt,o),f(e,He,o),r(He,$o),f(e,Vt,o),g(be,e,o),Rt=!0},p(e,[o]){const Ee={};o&2&&(Ee.$$scope={dirty:o,ctx:e}),z.$set(Ee)},i(e){Rt||(w(b.$$.fragment,e),w(X.$$.fragment,e),w(re.$$.fragment,e),w(ne.$$.fragment,e),w(ie.$$.fragment,e),w(fe.$$.fragment,e),w(he.$$.fragment,e),w(z.$$.fragment,e),w(me.$$.fragment,e),w(de.$$.fragment,e),w(ce.$$.fragment,e),w(ve.$$.fragment,e),w(ge.$$.fragment,e),w(we.$$.fragment,e),w(_e.$$.fragment,e),w($e.$$.fragment,e),w(ye.$$.fragment,e),w(be.$$.fragment,e),Rt=!0)},o(e){_(b.$$.fragment,e),_(X.$$.fragment,e),_(re.$$.fragment,e),_(ne.$$.fragment,e),_(ie.$$.fragment,e),_(fe.$$.fragment,e),_(he.$$.fragment,e),_(z.$$.fragment,e),_(me.$$.fragment,e),_(de.$$.fragment,e),_(ce.$$.fragment,e),_(ve.$$.fragment,e),_(ge.$$.fragment,e),_(we.$$.fragment,e),_(_e.$$.fragment,e),_($e.$$.fragment,e),_(ye.$$.fragment,e),_(be.$$.fragment,e),Rt=!1},d(e){t(u),e&&t(T),e&&t(d),$(b),e&&t(pt),e&&t(ke),e&&t(ut),$(X,e),e&&t(dt),e&&t(Te),e&&t(ct),$(re,e),e&&t(vt),e&&t(P),e&&t(gt),e&&t(L),e&&t(wt),e&&t(C),$(ne),e&&t(_t),e&&t(je),e&&t($t),$(ie,e),e&&t(yt),e&&t(N),e&&t(bt),e&&t(U),$(fe),e&&t(Et),e&&t(Ce),e&&t(kt),$(he,e),e&&t(Tt),e&&t(Ue),e&&t(At),e&&t(O),e&&t(jt),$(z,e),e&&t(Pt),e&&t(S),$(me),e&&t(Ct),e&&t(R),e&&t(Ut),e&&t(qe),e&&t(Ft),$(de,e),e&&t(It),e&&t(Me),e&&t(St),$(ce,e),e&&t(xt),e&&t(q),$(ve),e&&t(qt),e&&t(J),e&&t(Dt),$(ge,e),e&&t(Mt),e&&t(Le),e&&t(Gt),$(we,e),e&&t(Lt),e&&t(D),$(_e),e&&t(Bt),e&&t(Be),e&&t(Nt),$($e,e),e&&t(Ht),e&&t(Ne),e&&t(Ot),$(ye,e),e&&t(zt),e&&t(He),e&&t(Vt),$(be,e)}}}const js={local:"troubleshoot",sections:[{local:"firewalled-environments",title:"Firewalled environments"},{local:"cuda-out-of-memory",title:"CUDA out of memory"},{local:"unable-to-load-a-saved-tensorflow-model",title:"Unable to load a saved TensorFlow model"},{local:"importerror",title:"ImportError"},{local:"cuda-error-deviceside-assert-triggered",title:"CUDA error: device-side assert triggered"}],title:"Troubleshoot"};function Ps(Ye,u,T){let{fw:d}=u;return Ye.$$set=y=>{"fw"in y&&T(0,d=y.fw)},[d]}class qs extends $s{constructor(u){super();ys(this,u,Ps,As,bs,{fw:0})}}export{qs as default,js as metadata};
264
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_sharing.mdx-3b3df35f.js
import{S as pf,i as mf,s as df,e as s,k as u,w as m,t as r,M as cf,c as i,d as t,m as h,a as n,x as d,h as l,b as p,N as Dr,F as o,g as f,y as c,q as _,o as y,B as w}from"../chunks/vendor-4833417e.js";import{T as _f}from"../chunks/Tip-fffd6df1.js";import{Y as yf}from"../chunks/Youtube-27813aed.js";import{I as V}from"../chunks/IconCopyLink-4b81c553.js";import{C as b}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function wf(Gt){let g,j,v,$,M;return{c(){g=s("p"),j=r("To share a model with the community, you need an account on "),v=s("a"),$=r("huggingface.co"),M=r(". You can also join an existing organization or create a new one."),this.h()},l(E){g=i(E,"P",{});var H=n(g);j=l(H,"To share a model with the community, you need an account on "),v=i(H,"A",{href:!0,rel:!0});var W=n(v);$=l(W,"huggingface.co"),W.forEach(t),M=l(H,". You can also join an existing organization or create a new one."),H.forEach(t),this.h()},h(){p(v,"href","https://huggingface.co/join"),p(v,"rel","nofollow")},m(E,H){f(E,g,H),o(g,j),o(g,v),o(v,$),o(g,M)},d(E){E&&t(g)}}}function vf(Gt){let g,j,v,$,M,E,H,W,xr,zo,mt,Or,Lo,J,Ir,Te,Mr,Nr,Ro,K,Yt,zr,Lr,Vt,Rr,Bo,T,Ai,Uo,X,Go,N,Z,Wt,Pe,Br,Jt,Ur,Yo,dt,Gr,Vo,F,Yr,Ae,Vr,Wr,Kt,Jr,Kr,Wo,Q,Xr,Xt,Zr,Qr,Jo,je,Ko,ct,el,Xo,_t,yt,ji,Zo,z,ee,Zt,He,tl,Qt,ol,Qo,te,al,eo,rl,ll,ea,Fe,ta,oe,sl,Ce,to,il,nl,oa,Se,aa,C,fl,oo,ul,hl,qe,pl,ml,ra,De,la,L,ae,ao,xe,dl,ro,cl,sa,wt,_l,ia,re,yl,vt,wl,vl,na,le,gl,lo,bl,$l,fa,Oe,ua,gt,kl,ha,Ie,pa,se,El,so,Tl,Pl,ma,Me,da,bt,Al,ca,Ne,_a,R,ie,io,ze,jl,$t,Hl,no,Fl,ya,Le,wa,k,Cl,kt,Sl,ql,Et,Dl,xl,fo,Ol,Il,Tt,Ml,Nl,va,Re,ga,ne,zl,Pt,Ll,Rl,ba,Be,$a,S,Bl,At,Ul,Gl,jt,Yl,Vl,ka,Ue,Ea,B,fe,uo,Ge,Wl,Ht,Jl,ho,Kl,Ta,q,Xl,Ft,Zl,Ql,Ct,es,ts,Pa,D,po,os,as,mo,rs,ls,Ye,ss,co,is,ns,Aa,Ve,ja,ue,fs,We,_o,us,hs,Ha,Je,Fa,U,he,yo,Ke,ps,Xe,ms,wo,ds,cs,Ca,pe,_s,vo,ys,ws,Sa,me,vs,go,gs,bs,qa,Ze,Da,x,$s,bo,ks,Es,$o,Ts,Ps,xa,Qe,Oa,de,As,ko,js,Hs,Ia,et,Ma,ce,Fs,Eo,Cs,Ss,Na,tt,za,St,qs,La,ot,Ra,_e,Ds,To,xs,Os,Ba,ye,Is,at,Ms,Ns,Ua,G,we,Po,rt,zs,Ao,Ls,Ga,ve,Rs,lt,Bs,Us,Ya,qt,Dt,Hi,Va,xt,Gs,Wa,P,st,Ys,jo,Vs,Ws,Js,Ho,Ks,Xs,Fo,Zs,Qs,Co,ei,Ja,O,ti,So,oi,ai,qo,ri,li,Ka,Ot,It,Fi,Xa,Y,ge,Do,it,si,xo,ii,Za,be,ni,Oo,fi,ui,Qa,$e,nt,hi,Io,pi,mi,di,ft,ci,Mo,_i,yi,er,A,wi,ut,vi,gi,No,bi,$i,ht,ki,Ei,tr;return E=new V({}),X=new _f({props:{$$slots:{default:[wf]},$$scope:{ctx:Gt}}}),Pe=new V({}),je=new b({props:{code:`model = AutoModel.from_pretrained( "julien-c/EsperBERTo-small", revision="v2.0.1" # tag name, or branch name, or commit hash )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;julien-c/EsperBERTo-small&quot;</span>, revision=<span class="hljs-string">&quot;v2.0.1&quot;</span> <span class="hljs-comment"># tag name, or branch name, or commit hash</span> <span class="hljs-meta">... </span>)`}}),He=new V({}),Fe=new b({props:{code:"huggingface-cli login",highlighted:"huggingface-cli login"}}),Se=new b({props:{code:"pip install huggingface_hub",highlighted:"pip install huggingface_hub"}}),De=new b({props:{code:`from huggingface_hub import notebook_login notebook_login()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> notebook_login <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_login()`}}),xe=new V({}),Oe=new b({props:{code:'tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_pt=<span class="hljs-literal">True</span>)'}}),Ie=new b({props:{code:'tf_model.save_pretrained("path/to/awesome-name-you-picked")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>)'}}),Me=new b({props:{code:`pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) pt_model.save_pretrained("path/to/awesome-name-you-picked")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = DistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_tf=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>)`}}),Ne=new b({props:{code:`flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( "path/to/awesome-name-you-picked", from_pt=True )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_pt=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>)`}}),ze=new V({}),Le=new yf({props:{id:"Z1-XMy-GNLQ"}}),Re=new b({props:{code:'training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;my-awesome-model&quot;</span>, push_to_hub=<span class="hljs-literal">True</span>)'}}),Be=new b({props:{code:`trainer = Trainer( model=model, args=training_args, train_dataset=small_train_dataset, eval_dataset=small_eval_dataset, compute_metrics=compute_metrics, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=small_train_dataset, <span class="hljs-meta">... </span> eval_dataset=small_eval_dataset, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)`}}),Ue=new b({props:{code:"trainer.push_to_hub()",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>trainer.push_to_hub()'}}),Ge=new V({}),Ve=new b({props:{code:`from transformers.keras.callbacks import PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.keras.callbacks <span class="hljs-keyword">import</span> PushToHubCallback <span class="hljs-meta">&gt;&gt;&gt; </span>push_to_hub_callback = PushToHubCallback( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./your_model_save_path&quot;</span>, tokenizer=tokenizer, hub_model_id=<span class="hljs-string">&quot;your-username/my-awesome-model&quot;</span> <span class="hljs-meta">... </span>)`}}),Je=new b({props:{code:"model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=<span class="hljs-number">3</span>, callbacks=push_to_hub_callback)'}}),Ke=new V({}),Ze=new b({props:{code:'pt_model.push_to_hub("my-awesome-model")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)'}}),Qe=new b({props:{code:`from transformers import AutoModel model = AutoModel.from_pretrained("your_username/my-awesome-model")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;your_username/my-awesome-model&quot;</span>)`}}),et=new b({props:{code:'pt_model.push_to_hub("my-awesome-model", organization="my-awesome-org")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>, organization=<span class="hljs-string">&quot;my-awesome-org&quot;</span>)'}}),tt=new b({props:{code:'tokenizer.push_to_hub("my-awesome-model")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)'}}),ot=new b({props:{code:'tf_model.push_to_hub("my-awesome-model")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)'}}),rt=new V({}),it=new V({}),{c(){g=s("meta"),j=u(),v=s("h1"),$=s("a"),M=s("span"),m(E.$$.fragment),H=u(),W=s("span"),xr=r("Share a model"),zo=u(),mt=s("p"),Or=r("The last two tutorials showed how you can fine-tune a model with PyTorch, Keras, and \u{1F917} Accelerate for distributed setups. The next step is to share your model with the community! At Hugging Face, we believe in openly sharing knowledge and resources to democratize artificial intelligence for everyone. We encourage you to consider sharing your model with the community to help others save time and resources."),Lo=u(),J=s("p"),Ir=r("In this tutorial, you will learn two methods for sharing a trained or fine-tuned model on the "),Te=s("a"),Mr=r("Model Hub"),Nr=r(":"),Ro=u(),K=s("ul"),Yt=s("li"),zr=r("Programmatically push your files to the Hub."),Lr=u(),Vt=s("li"),Rr=r("Drag-and-drop your files to the Hub with the web interface."),Bo=u(),T=s("iframe"),Uo=u(),m(X.$$.fragment),Go=u(),N=s("h2"),Z=s("a"),Wt=s("span"),m(Pe.$$.fragment),Br=u(),Jt=s("span"),Ur=r("Repository features"),Yo=u(),dt=s("p"),Gr=r("Each repository on the Model Hub behaves like a typical GitHub repository. Our repositories offer versioning, commit history, and the ability to visualize differences."),Vo=u(),F=s("p"),Yr=r("The Model Hub\u2019s built-in versioning is based on git and "),Ae=s("a"),Vr=r("git-lfs"),Wr=r(". In other words, you can treat one model as one repository, enabling greater access control and scalability. Version control allows "),Kt=s("em"),Jr=r("revisions"),Kr=r(", a method for pinning a specific version of a model with a commit hash, tag or branch."),Wo=u(),Q=s("p"),Xr=r("As a result, you can load a specific model version with the "),Xt=s("code"),Zr=r("revision"),Qr=r(" parameter:"),Jo=u(),m(je.$$.fragment),Ko=u(),ct=s("p"),el=r("Files are also easily edited in a repository, and you can view the commit history as well as the difference:"),Xo=u(),_t=s("p"),yt=s("img"),Zo=u(),z=s("h2"),ee=s("a"),Zt=s("span"),m(He.$$.fragment),tl=u(),Qt=s("span"),ol=r("Setup"),Qo=u(),te=s("p"),al=r("Before sharing a model to the Hub, you will need your Hugging Face credentials. If you have access to a terminal, run the following command in the virtual environment where \u{1F917} Transformers is installed. This will store your access token in your Hugging Face cache folder ("),eo=s("code"),rl=r("~/.cache/"),ll=r(" by default):"),ea=u(),m(Fe.$$.fragment),ta=u(),oe=s("p"),sl=r("If you are using a notebook like Jupyter or Colaboratory, make sure you have the "),Ce=s("a"),to=s("code"),il=r("huggingface_hub"),nl=r(" library installed. This library allows you to programmatically interact with the Hub."),oa=u(),m(Se.$$.fragment),aa=u(),C=s("p"),fl=r("Then use "),oo=s("code"),ul=r("notebook_login"),hl=r(" to sign-in to the Hub, and follow the link "),qe=s("a"),pl=r("here"),ml=r(" to generate a token to login with:"),ra=u(),m(De.$$.fragment),la=u(),L=s("h2"),ae=s("a"),ao=s("span"),m(xe.$$.fragment),dl=u(),ro=s("span"),cl=r("Convert a model for all frameworks"),sa=u(),wt=s("p"),_l=r("To ensure your model can be used by someone working with a different framework, we recommend you convert and upload your model with both PyTorch and TensorFlow checkpoints. While users are still able to load your model from a different framework if you skip this step, it will be slower because \u{1F917} Transformers will need to convert the checkpoint on-the-fly."),ia=u(),re=s("p"),yl=r("Converting a checkpoint for another framework is easy. Make sure you have PyTorch and TensorFlow installed (see "),vt=s("a"),wl=r("here"),vl=r(" for installation instructions), and then find the specific model for your task in the other framework."),na=u(),le=s("p"),gl=r("For example, suppose you trained DistilBert for sequence classification in PyTorch and want to convert it to it\u2019s TensorFlow equivalent. Load the TensorFlow equivalent of your model for your task, and specify "),lo=s("code"),bl=r("from_pt=True"),$l=r(" so \u{1F917} Transformers will convert the PyTorch checkpoint to a TensorFlow checkpoint:"),fa=u(),m(Oe.$$.fragment),ua=u(),gt=s("p"),kl=r("Then save your new TensorFlow model with it\u2019s new checkpoint:"),ha=u(),m(Ie.$$.fragment),pa=u(),se=s("p"),El=r("Similarly, specify "),so=s("code"),Tl=r("from_tf=True"),Pl=r(" to convert a checkpoint from TensorFlow to PyTorch:"),ma=u(),m(Me.$$.fragment),da=u(),bt=s("p"),Al=r("If a model is available in Flax, you can also convert a checkpoint from PyTorch to Flax:"),ca=u(),m(Ne.$$.fragment),_a=u(),R=s("h2"),ie=s("a"),io=s("span"),m(ze.$$.fragment),jl=u(),$t=s("span"),Hl=r("Push a model with "),no=s("code"),Fl=r("Trainer"),ya=u(),m(Le.$$.fragment),wa=u(),k=s("p"),Cl=r("Sharing a model to the Hub is as simple as adding an extra parameter or callback. Remember from the "),kt=s("a"),Sl=r("fine-tuning tutorial"),ql=r(", the "),Et=s("a"),Dl=r("TrainingArguments"),xl=r(" class is where you specify hyperparameters and additional training options. One of these training options includes the ability to push a model directly to the Hub. Set "),fo=s("code"),Ol=r("push_to_hub=True"),Il=r(" in your "),Tt=s("a"),Ml=r("TrainingArguments"),Nl=r(":"),va=u(),m(Re.$$.fragment),ga=u(),ne=s("p"),zl=r("Pass your training arguments as usual to "),Pt=s("a"),Ll=r("Trainer"),Rl=r(":"),ba=u(),m(Be.$$.fragment),$a=u(),S=s("p"),Bl=r("After you fine-tune your model, call "),At=s("a"),Ul=r("push_to_hub()"),Gl=r(" on "),jt=s("a"),Yl=r("Trainer"),Vl=r(" to push the trained model to the Hub. \u{1F917} Transformers will even automatically add training hyperparameters, training results and framework versions to your model card!"),ka=u(),m(Ue.$$.fragment),Ea=u(),B=s("h2"),fe=s("a"),uo=s("span"),m(Ge.$$.fragment),Wl=u(),Ht=s("span"),Jl=r("Push a model with "),ho=s("code"),Kl=r("PushToHubCallback"),Ta=u(),q=s("p"),Xl=r("TensorFlow users can enable the same functionality with "),Ft=s("a"),Zl=r("PushToHubCallback"),Ql=r(". In the "),Ct=s("a"),es=r("PushToHubCallback"),ts=r(" function, add:"),Pa=u(),D=s("ul"),po=s("li"),os=r("An output directory for your model."),as=u(),mo=s("li"),rs=r("A tokenizer."),ls=u(),Ye=s("li"),ss=r("The "),co=s("code"),is=r("hub_model_id"),ns=r(", which is your Hub username and model name."),Aa=u(),m(Ve.$$.fragment),ja=u(),ue=s("p"),fs=r("Add the callback to "),We=s("a"),_o=s("code"),us=r("fit"),hs=r(", and \u{1F917} Transformers will push the trained model to the Hub:"),Ha=u(),m(Je.$$.fragment),Fa=u(),U=s("h2"),he=s("a"),yo=s("span"),m(Ke.$$.fragment),ps=u(),Xe=s("span"),ms=r("Use the "),wo=s("code"),ds=r("push_to_hub"),cs=r(" function"),Ca=u(),pe=s("p"),_s=r("You can also call "),vo=s("code"),ys=r("push_to_hub"),ws=r(" directly on your model to upload it to the Hub."),Sa=u(),me=s("p"),vs=r("Specify your model name in "),go=s("code"),gs=r("push_to_hub"),bs=r(":"),qa=u(),m(Ze.$$.fragment),Da=u(),x=s("p"),$s=r("This creates a repository under your username with the model name "),bo=s("code"),ks=r("my-awesome-model"),Es=r(". Users can now load your model with the "),$o=s("code"),Ts=r("from_pretrained"),Ps=r(" function:"),xa=u(),m(Qe.$$.fragment),Oa=u(),de=s("p"),As=r("If you belong to an organization and want to push your model under the organization name instead, add the "),ko=s("code"),js=r("organization"),Hs=r(" parameter:"),Ia=u(),m(et.$$.fragment),Ma=u(),ce=s("p"),Fs=r("The "),Eo=s("code"),Cs=r("push_to_hub"),Ss=r(" function can also be used to add other files to a model repository. For example, add a tokenizer to a model repository:"),Na=u(),m(tt.$$.fragment),za=u(),St=s("p"),qs=r("Or perhaps you\u2019d like to add the TensorFlow version of your fine-tuned PyTorch model:"),La=u(),m(ot.$$.fragment),Ra=u(),_e=s("p"),Ds=r("Now when you navigate to the your Hugging Face profile, you should see your newly created model repository. Clicking on the "),To=s("strong"),xs=r("Files"),Os=r(" tab will display all the files you\u2019ve uploaded to the repository."),Ba=u(),ye=s("p"),Is=r("For more details on how to create and upload files to a repository, refer to the Hub documentation "),at=s("a"),Ms=r("here"),Ns=r("."),Ua=u(),G=s("h2"),we=s("a"),Po=s("span"),m(rt.$$.fragment),zs=u(),Ao=s("span"),Ls=r("Upload with the web interface"),Ga=u(),ve=s("p"),Rs=r("Users who prefer a no-code approach are able to upload a model through the Hub\u2019s web interface. Visit "),lt=s("a"),Bs=r("huggingface.co/new"),Us=r(" to create a new repository:"),Ya=u(),qt=s("p"),Dt=s("img"),Va=u(),xt=s("p"),Gs=r("From here, add some information about your model:"),Wa=u(),P=s("ul"),st=s("li"),Ys=r("Select the "),jo=s("strong"),Vs=r("owner"),Ws=r(" of the repository. This can be yourself or any of the organizations you belong to."),Js=u(),Ho=s("li"),Ks=r("Pick a name for your model, which will also be the repository name."),Xs=u(),Fo=s("li"),Zs=r("Choose whether your model is public or private."),Qs=u(),Co=s("li"),ei=r("Specify the license usage for your model."),Ja=u(),O=s("p"),ti=r("Now click on the "),So=s("strong"),oi=r("Files"),ai=r(" tab and click on the "),qo=s("strong"),ri=r("Add file"),li=r(" button to upload a new file to your repository. Then drag-and-drop a file to upload and add a commit message."),Ka=u(),Ot=s("p"),It=s("img"),Xa=u(),Y=s("h2"),ge=s("a"),Do=s("span"),m(it.$$.fragment),si=u(),xo=s("span"),ii=r("Add a model card"),Za=u(),be=s("p"),ni=r("To make sure users understand your model\u2019s capabilities, limitations, potential biases and ethical considerations, please add a model card to your repository. The model card is defined in the "),Oo=s("code"),fi=r("README.md"),ui=r(" file. You can add a model card by:"),Qa=u(),$e=s("ul"),nt=s("li"),hi=r("Manually creating and uploading a "),Io=s("code"),pi=r("README.md"),mi=r(" file."),di=u(),ft=s("li"),ci=r("Clicking on the "),Mo=s("strong"),_i=r("Edit model card"),yi=r(" button in your model repository."),er=u(),A=s("p"),wi=r("Take a look at the DistilBert "),ut=s("a"),vi=r("model card"),gi=r(" for a good example of the type of information a model card should include. For more details about other options you can control in the "),No=s("code"),bi=r("README.md"),$i=r(" file such as a model\u2019s carbon footprint or widget examples, refer to the documentation "),ht=s("a"),ki=r("here"),Ei=r("."),this.h()},l(e){const a=cf('[data-svelte="svelte-1phssyn"]',document.head);g=i(a,"META",{name:!0,content:!0}),a.forEach(t),j=h(e),v=i(e,"H1",{class:!0});var pt=n(v);$=i(pt,"A",{id:!0,class:!0,href:!0});var Ci=n($);M=i(Ci,"SPAN",{});var Si=n(M);d(E.$$.fragment,Si),Si.forEach(t),Ci.forEach(t),H=h(pt),W=i(pt,"SPAN",{});var qi=n(W);xr=l(qi,"Share a model"),qi.forEach(t),pt.forEach(t),zo=h(e),mt=i(e,"P",{});var Di=n(mt);Or=l(Di,"The last two tutorials showed how you can fine-tune a model with PyTorch, Keras, and \u{1F917} Accelerate for distributed setups. The next step is to share your model with the community! At Hugging Face, we believe in openly sharing knowledge and resources to democratize artificial intelligence for everyone. We encourage you to consider sharing your model with the community to help others save time and resources."),Di.forEach(t),Lo=h(e),J=i(e,"P",{});var or=n(J);Ir=l(or,"In this tutorial, you will learn two methods for sharing a trained or fine-tuned model on the "),Te=i(or,"A",{href:!0,rel:!0});var xi=n(Te);Mr=l(xi,"Model Hub"),xi.forEach(t),Nr=l(or,":"),or.forEach(t),Ro=h(e),K=i(e,"UL",{});var ar=n(K);Yt=i(ar,"LI",{});var Oi=n(Yt);zr=l(Oi,"Programmatically push your files to the Hub."),Oi.forEach(t),Lr=h(ar),Vt=i(ar,"LI",{});var Ii=n(Vt);Rr=l(Ii,"Drag-and-drop your files to the Hub with the web interface."),Ii.forEach(t),ar.forEach(t),Bo=h(e),T=i(e,"IFRAME",{width:!0,height:!0,src:!0,title:!0,frameborder:!0,allow:!0}),n(T).forEach(t),Uo=h(e),d(X.$$.fragment,e),Go=h(e),N=i(e,"H2",{class:!0});var rr=n(N);Z=i(rr,"A",{id:!0,class:!0,href:!0});var Mi=n(Z);Wt=i(Mi,"SPAN",{});var Ni=n(Wt);d(Pe.$$.fragment,Ni),Ni.forEach(t),Mi.forEach(t),Br=h(rr),Jt=i(rr,"SPAN",{});var zi=n(Jt);Ur=l(zi,"Repository features"),zi.forEach(t),rr.forEach(t),Yo=h(e),dt=i(e,"P",{});var Li=n(dt);Gr=l(Li,"Each repository on the Model Hub behaves like a typical GitHub repository. Our repositories offer versioning, commit history, and the ability to visualize differences."),Li.forEach(t),Vo=h(e),F=i(e,"P",{});var Mt=n(F);Yr=l(Mt,"The Model Hub\u2019s built-in versioning is based on git and "),Ae=i(Mt,"A",{href:!0,rel:!0});var Ri=n(Ae);Vr=l(Ri,"git-lfs"),Ri.forEach(t),Wr=l(Mt,". In other words, you can treat one model as one repository, enabling greater access control and scalability. Version control allows "),Kt=i(Mt,"EM",{});var Bi=n(Kt);Jr=l(Bi,"revisions"),Bi.forEach(t),Kr=l(Mt,", a method for pinning a specific version of a model with a commit hash, tag or branch."),Mt.forEach(t),Wo=h(e),Q=i(e,"P",{});var lr=n(Q);Xr=l(lr,"As a result, you can load a specific model version with the "),Xt=i(lr,"CODE",{});var Ui=n(Xt);Zr=l(Ui,"revision"),Ui.forEach(t),Qr=l(lr," parameter:"),lr.forEach(t),Jo=h(e),d(je.$$.fragment,e),Ko=h(e),ct=i(e,"P",{});var Gi=n(ct);el=l(Gi,"Files are also easily edited in a repository, and you can view the commit history as well as the difference:"),Gi.forEach(t),Xo=h(e),_t=i(e,"P",{});var Yi=n(_t);yt=i(Yi,"IMG",{src:!0,alt:!0}),Yi.forEach(t),Zo=h(e),z=i(e,"H2",{class:!0});var sr=n(z);ee=i(sr,"A",{id:!0,class:!0,href:!0});var Vi=n(ee);Zt=i(Vi,"SPAN",{});var Wi=n(Zt);d(He.$$.fragment,Wi),Wi.forEach(t),Vi.forEach(t),tl=h(sr),Qt=i(sr,"SPAN",{});var Ji=n(Qt);ol=l(Ji,"Setup"),Ji.forEach(t),sr.forEach(t),Qo=h(e),te=i(e,"P",{});var ir=n(te);al=l(ir,"Before sharing a model to the Hub, you will need your Hugging Face credentials. If you have access to a terminal, run the following command in the virtual environment where \u{1F917} Transformers is installed. This will store your access token in your Hugging Face cache folder ("),eo=i(ir,"CODE",{});var Ki=n(eo);rl=l(Ki,"~/.cache/"),Ki.forEach(t),ll=l(ir," by default):"),ir.forEach(t),ea=h(e),d(Fe.$$.fragment,e),ta=h(e),oe=i(e,"P",{});var nr=n(oe);sl=l(nr,"If you are using a notebook like Jupyter or Colaboratory, make sure you have the "),Ce=i(nr,"A",{href:!0,rel:!0});var Xi=n(Ce);to=i(Xi,"CODE",{});var Zi=n(to);il=l(Zi,"huggingface_hub"),Zi.forEach(t),Xi.forEach(t),nl=l(nr," library installed. This library allows you to programmatically interact with the Hub."),nr.forEach(t),oa=h(e),d(Se.$$.fragment,e),aa=h(e),C=i(e,"P",{});var Nt=n(C);fl=l(Nt,"Then use "),oo=i(Nt,"CODE",{});var Qi=n(oo);ul=l(Qi,"notebook_login"),Qi.forEach(t),hl=l(Nt," to sign-in to the Hub, and follow the link "),qe=i(Nt,"A",{href:!0,rel:!0});var en=n(qe);pl=l(en,"here"),en.forEach(t),ml=l(Nt," to generate a token to login with:"),Nt.forEach(t),ra=h(e),d(De.$$.fragment,e),la=h(e),L=i(e,"H2",{class:!0});var fr=n(L);ae=i(fr,"A",{id:!0,class:!0,href:!0});var tn=n(ae);ao=i(tn,"SPAN",{});var on=n(ao);d(xe.$$.fragment,on),on.forEach(t),tn.forEach(t),dl=h(fr),ro=i(fr,"SPAN",{});var an=n(ro);cl=l(an,"Convert a model for all frameworks"),an.forEach(t),fr.forEach(t),sa=h(e),wt=i(e,"P",{});var rn=n(wt);_l=l(rn,"To ensure your model can be used by someone working with a different framework, we recommend you convert and upload your model with both PyTorch and TensorFlow checkpoints. While users are still able to load your model from a different framework if you skip this step, it will be slower because \u{1F917} Transformers will need to convert the checkpoint on-the-fly."),rn.forEach(t),ia=h(e),re=i(e,"P",{});var ur=n(re);yl=l(ur,"Converting a checkpoint for another framework is easy. Make sure you have PyTorch and TensorFlow installed (see "),vt=i(ur,"A",{href:!0});var ln=n(vt);wl=l(ln,"here"),ln.forEach(t),vl=l(ur," for installation instructions), and then find the specific model for your task in the other framework."),ur.forEach(t),na=h(e),le=i(e,"P",{});var hr=n(le);gl=l(hr,"For example, suppose you trained DistilBert for sequence classification in PyTorch and want to convert it to it\u2019s TensorFlow equivalent. Load the TensorFlow equivalent of your model for your task, and specify "),lo=i(hr,"CODE",{});var sn=n(lo);bl=l(sn,"from_pt=True"),sn.forEach(t),$l=l(hr," so \u{1F917} Transformers will convert the PyTorch checkpoint to a TensorFlow checkpoint:"),hr.forEach(t),fa=h(e),d(Oe.$$.fragment,e),ua=h(e),gt=i(e,"P",{});var nn=n(gt);kl=l(nn,"Then save your new TensorFlow model with it\u2019s new checkpoint:"),nn.forEach(t),ha=h(e),d(Ie.$$.fragment,e),pa=h(e),se=i(e,"P",{});var pr=n(se);El=l(pr,"Similarly, specify "),so=i(pr,"CODE",{});var fn=n(so);Tl=l(fn,"from_tf=True"),fn.forEach(t),Pl=l(pr," to convert a checkpoint from TensorFlow to PyTorch:"),pr.forEach(t),ma=h(e),d(Me.$$.fragment,e),da=h(e),bt=i(e,"P",{});var un=n(bt);Al=l(un,"If a model is available in Flax, you can also convert a checkpoint from PyTorch to Flax:"),un.forEach(t),ca=h(e),d(Ne.$$.fragment,e),_a=h(e),R=i(e,"H2",{class:!0});var mr=n(R);ie=i(mr,"A",{id:!0,class:!0,href:!0});var hn=n(ie);io=i(hn,"SPAN",{});var pn=n(io);d(ze.$$.fragment,pn),pn.forEach(t),hn.forEach(t),jl=h(mr),$t=i(mr,"SPAN",{});var Ti=n($t);Hl=l(Ti,"Push a model with "),no=i(Ti,"CODE",{});var mn=n(no);Fl=l(mn,"Trainer"),mn.forEach(t),Ti.forEach(t),mr.forEach(t),ya=h(e),d(Le.$$.fragment,e),wa=h(e),k=i(e,"P",{});var I=n(k);Cl=l(I,"Sharing a model to the Hub is as simple as adding an extra parameter or callback. Remember from the "),kt=i(I,"A",{href:!0});var dn=n(kt);Sl=l(dn,"fine-tuning tutorial"),dn.forEach(t),ql=l(I,", the "),Et=i(I,"A",{href:!0});var cn=n(Et);Dl=l(cn,"TrainingArguments"),cn.forEach(t),xl=l(I," class is where you specify hyperparameters and additional training options. One of these training options includes the ability to push a model directly to the Hub. Set "),fo=i(I,"CODE",{});var _n=n(fo);Ol=l(_n,"push_to_hub=True"),_n.forEach(t),Il=l(I," in your "),Tt=i(I,"A",{href:!0});var yn=n(Tt);Ml=l(yn,"TrainingArguments"),yn.forEach(t),Nl=l(I,":"),I.forEach(t),va=h(e),d(Re.$$.fragment,e),ga=h(e),ne=i(e,"P",{});var dr=n(ne);zl=l(dr,"Pass your training arguments as usual to "),Pt=i(dr,"A",{href:!0});var wn=n(Pt);Ll=l(wn,"Trainer"),wn.forEach(t),Rl=l(dr,":"),dr.forEach(t),ba=h(e),d(Be.$$.fragment,e),$a=h(e),S=i(e,"P",{});var zt=n(S);Bl=l(zt,"After you fine-tune your model, call "),At=i(zt,"A",{href:!0});var vn=n(At);Ul=l(vn,"push_to_hub()"),vn.forEach(t),Gl=l(zt," on "),jt=i(zt,"A",{href:!0});var gn=n(jt);Yl=l(gn,"Trainer"),gn.forEach(t),Vl=l(zt," to push the trained model to the Hub. \u{1F917} Transformers will even automatically add training hyperparameters, training results and framework versions to your model card!"),zt.forEach(t),ka=h(e),d(Ue.$$.fragment,e),Ea=h(e),B=i(e,"H2",{class:!0});var cr=n(B);fe=i(cr,"A",{id:!0,class:!0,href:!0});var bn=n(fe);uo=i(bn,"SPAN",{});var $n=n(uo);d(Ge.$$.fragment,$n),$n.forEach(t),bn.forEach(t),Wl=h(cr),Ht=i(cr,"SPAN",{});var Pi=n(Ht);Jl=l(Pi,"Push a model with "),ho=i(Pi,"CODE",{});var kn=n(ho);Kl=l(kn,"PushToHubCallback"),kn.forEach(t),Pi.forEach(t),cr.forEach(t),Ta=h(e),q=i(e,"P",{});var Lt=n(q);Xl=l(Lt,"TensorFlow users can enable the same functionality with "),Ft=i(Lt,"A",{href:!0});var En=n(Ft);Zl=l(En,"PushToHubCallback"),En.forEach(t),Ql=l(Lt,". In the "),Ct=i(Lt,"A",{href:!0});var Tn=n(Ct);es=l(Tn,"PushToHubCallback"),Tn.forEach(t),ts=l(Lt," function, add:"),Lt.forEach(t),Pa=h(e),D=i(e,"UL",{});var Rt=n(D);po=i(Rt,"LI",{});var Pn=n(po);os=l(Pn,"An output directory for your model."),Pn.forEach(t),as=h(Rt),mo=i(Rt,"LI",{});var An=n(mo);rs=l(An,"A tokenizer."),An.forEach(t),ls=h(Rt),Ye=i(Rt,"LI",{});var _r=n(Ye);ss=l(_r,"The "),co=i(_r,"CODE",{});var jn=n(co);is=l(jn,"hub_model_id"),jn.forEach(t),ns=l(_r,", which is your Hub username and model name."),_r.forEach(t),Rt.forEach(t),Aa=h(e),d(Ve.$$.fragment,e),ja=h(e),ue=i(e,"P",{});var yr=n(ue);fs=l(yr,"Add the callback to "),We=i(yr,"A",{href:!0,rel:!0});var Hn=n(We);_o=i(Hn,"CODE",{});var Fn=n(_o);us=l(Fn,"fit"),Fn.forEach(t),Hn.forEach(t),hs=l(yr,", and \u{1F917} Transformers will push the trained model to the Hub:"),yr.forEach(t),Ha=h(e),d(Je.$$.fragment,e),Fa=h(e),U=i(e,"H2",{class:!0});var wr=n(U);he=i(wr,"A",{id:!0,class:!0,href:!0});var Cn=n(he);yo=i(Cn,"SPAN",{});var Sn=n(yo);d(Ke.$$.fragment,Sn),Sn.forEach(t),Cn.forEach(t),ps=h(wr),Xe=i(wr,"SPAN",{});var vr=n(Xe);ms=l(vr,"Use the "),wo=i(vr,"CODE",{});var qn=n(wo);ds=l(qn,"push_to_hub"),qn.forEach(t),cs=l(vr," function"),vr.forEach(t),wr.forEach(t),Ca=h(e),pe=i(e,"P",{});var gr=n(pe);_s=l(gr,"You can also call "),vo=i(gr,"CODE",{});var Dn=n(vo);ys=l(Dn,"push_to_hub"),Dn.forEach(t),ws=l(gr," directly on your model to upload it to the Hub."),gr.forEach(t),Sa=h(e),me=i(e,"P",{});var br=n(me);vs=l(br,"Specify your model name in "),go=i(br,"CODE",{});var xn=n(go);gs=l(xn,"push_to_hub"),xn.forEach(t),bs=l(br,":"),br.forEach(t),qa=h(e),d(Ze.$$.fragment,e),Da=h(e),x=i(e,"P",{});var Bt=n(x);$s=l(Bt,"This creates a repository under your username with the model name "),bo=i(Bt,"CODE",{});var On=n(bo);ks=l(On,"my-awesome-model"),On.forEach(t),Es=l(Bt,". Users can now load your model with the "),$o=i(Bt,"CODE",{});var In=n($o);Ts=l(In,"from_pretrained"),In.forEach(t),Ps=l(Bt," function:"),Bt.forEach(t),xa=h(e),d(Qe.$$.fragment,e),Oa=h(e),de=i(e,"P",{});var $r=n(de);As=l($r,"If you belong to an organization and want to push your model under the organization name instead, add the "),ko=i($r,"CODE",{});var Mn=n(ko);js=l(Mn,"organization"),Mn.forEach(t),Hs=l($r," parameter:"),$r.forEach(t),Ia=h(e),d(et.$$.fragment,e),Ma=h(e),ce=i(e,"P",{});var kr=n(ce);Fs=l(kr,"The "),Eo=i(kr,"CODE",{});var Nn=n(Eo);Cs=l(Nn,"push_to_hub"),Nn.forEach(t),Ss=l(kr," function can also be used to add other files to a model repository. For example, add a tokenizer to a model repository:"),kr.forEach(t),Na=h(e),d(tt.$$.fragment,e),za=h(e),St=i(e,"P",{});var zn=n(St);qs=l(zn,"Or perhaps you\u2019d like to add the TensorFlow version of your fine-tuned PyTorch model:"),zn.forEach(t),La=h(e),d(ot.$$.fragment,e),Ra=h(e),_e=i(e,"P",{});var Er=n(_e);Ds=l(Er,"Now when you navigate to the your Hugging Face profile, you should see your newly created model repository. Clicking on the "),To=i(Er,"STRONG",{});var Ln=n(To);xs=l(Ln,"Files"),Ln.forEach(t),Os=l(Er," tab will display all the files you\u2019ve uploaded to the repository."),Er.forEach(t),Ba=h(e),ye=i(e,"P",{});var Tr=n(ye);Is=l(Tr,"For more details on how to create and upload files to a repository, refer to the Hub documentation "),at=i(Tr,"A",{href:!0,rel:!0});var Rn=n(at);Ms=l(Rn,"here"),Rn.forEach(t),Ns=l(Tr,"."),Tr.forEach(t),Ua=h(e),G=i(e,"H2",{class:!0});var Pr=n(G);we=i(Pr,"A",{id:!0,class:!0,href:!0});var Bn=n(we);Po=i(Bn,"SPAN",{});var Un=n(Po);d(rt.$$.fragment,Un),Un.forEach(t),Bn.forEach(t),zs=h(Pr),Ao=i(Pr,"SPAN",{});var Gn=n(Ao);Ls=l(Gn,"Upload with the web interface"),Gn.forEach(t),Pr.forEach(t),Ga=h(e),ve=i(e,"P",{});var Ar=n(ve);Rs=l(Ar,"Users who prefer a no-code approach are able to upload a model through the Hub\u2019s web interface. Visit "),lt=i(Ar,"A",{href:!0,rel:!0});var Yn=n(lt);Bs=l(Yn,"huggingface.co/new"),Yn.forEach(t),Us=l(Ar," to create a new repository:"),Ar.forEach(t),Ya=h(e),qt=i(e,"P",{});var Vn=n(qt);Dt=i(Vn,"IMG",{src:!0,alt:!0}),Vn.forEach(t),Va=h(e),xt=i(e,"P",{});var Wn=n(xt);Gs=l(Wn,"From here, add some information about your model:"),Wn.forEach(t),Wa=h(e),P=i(e,"UL",{});var ke=n(P);st=i(ke,"LI",{});var jr=n(st);Ys=l(jr,"Select the "),jo=i(jr,"STRONG",{});var Jn=n(jo);Vs=l(Jn,"owner"),Jn.forEach(t),Ws=l(jr," of the repository. This can be yourself or any of the organizations you belong to."),jr.forEach(t),Js=h(ke),Ho=i(ke,"LI",{});var Kn=n(Ho);Ks=l(Kn,"Pick a name for your model, which will also be the repository name."),Kn.forEach(t),Xs=h(ke),Fo=i(ke,"LI",{});var Xn=n(Fo);Zs=l(Xn,"Choose whether your model is public or private."),Xn.forEach(t),Qs=h(ke),Co=i(ke,"LI",{});var Zn=n(Co);ei=l(Zn,"Specify the license usage for your model."),Zn.forEach(t),ke.forEach(t),Ja=h(e),O=i(e,"P",{});var Ut=n(O);ti=l(Ut,"Now click on the "),So=i(Ut,"STRONG",{});var Qn=n(So);oi=l(Qn,"Files"),Qn.forEach(t),ai=l(Ut," tab and click on the "),qo=i(Ut,"STRONG",{});var ef=n(qo);ri=l(ef,"Add file"),ef.forEach(t),li=l(Ut," button to upload a new file to your repository. Then drag-and-drop a file to upload and add a commit message."),Ut.forEach(t),Ka=h(e),Ot=i(e,"P",{});var tf=n(Ot);It=i(tf,"IMG",{src:!0,alt:!0}),tf.forEach(t),Xa=h(e),Y=i(e,"H2",{class:!0});var Hr=n(Y);ge=i(Hr,"A",{id:!0,class:!0,href:!0});var of=n(ge);Do=i(of,"SPAN",{});var af=n(Do);d(it.$$.fragment,af),af.forEach(t),of.forEach(t),si=h(Hr),xo=i(Hr,"SPAN",{});var rf=n(xo);ii=l(rf,"Add a model card"),rf.forEach(t),Hr.forEach(t),Za=h(e),be=i(e,"P",{});var Fr=n(be);ni=l(Fr,"To make sure users understand your model\u2019s capabilities, limitations, potential biases and ethical considerations, please add a model card to your repository. The model card is defined in the "),Oo=i(Fr,"CODE",{});var lf=n(Oo);fi=l(lf,"README.md"),lf.forEach(t),ui=l(Fr," file. You can add a model card by:"),Fr.forEach(t),Qa=h(e),$e=i(e,"UL",{});var Cr=n($e);nt=i(Cr,"LI",{});var Sr=n(nt);hi=l(Sr,"Manually creating and uploading a "),Io=i(Sr,"CODE",{});var sf=n(Io);pi=l(sf,"README.md"),sf.forEach(t),mi=l(Sr," file."),Sr.forEach(t),di=h(Cr),ft=i(Cr,"LI",{});var qr=n(ft);ci=l(qr,"Clicking on the "),Mo=i(qr,"STRONG",{});var nf=n(Mo);_i=l(nf,"Edit model card"),nf.forEach(t),yi=l(qr," button in your model repository."),qr.forEach(t),Cr.forEach(t),er=h(e),A=i(e,"P",{});var Ee=n(A);wi=l(Ee,"Take a look at the DistilBert "),ut=i(Ee,"A",{href:!0,rel:!0});var ff=n(ut);vi=l(ff,"model card"),ff.forEach(t),gi=l(Ee," for a good example of the type of information a model card should include. For more details about other options you can control in the "),No=i(Ee,"CODE",{});var uf=n(No);bi=l(uf,"README.md"),uf.forEach(t),$i=l(Ee," file such as a model\u2019s carbon footprint or widget examples, refer to the documentation "),ht=i(Ee,"A",{href:!0,rel:!0});var hf=n(ht);ki=l(hf,"here"),hf.forEach(t),Ei=l(Ee,"."),Ee.forEach(t),this.h()},h(){p(g,"name","hf:doc:metadata"),p(g,"content",JSON.stringify(gf)),p($,"id","share-a-model"),p($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p($,"href","#share-a-model"),p(v,"class","relative group"),p(Te,"href","https://huggingface.co/models"),p(Te,"rel","nofollow"),p(T,"width","560"),p(T,"height","315"),Dr(T.src,Ai="https://www.youtube.com/embed/XvSGPZFEjDY")||p(T,"src",Ai),p(T,"title","YouTube video player"),p(T,"frameborder","0"),p(T,"allow",`accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture`),T.allowFullscreen=!0,p(Z,"id","repository-features"),p(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Z,"href","#repository-features"),p(N,"class","relative group"),p(Ae,"href","https://git-lfs.github.com/"),p(Ae,"rel","nofollow"),Dr(yt.src,ji="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png")||p(yt,"src",ji),p(yt,"alt","vis_diff"),p(ee,"id","setup"),p(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ee,"href","#setup"),p(z,"class","relative group"),p(Ce,"href","https://huggingface.co/docs/hub/adding-a-library"),p(Ce,"rel","nofollow"),p(qe,"href","https://huggingface.co/settings/token"),p(qe,"rel","nofollow"),p(ae,"id","convert-a-model-for-all-frameworks"),p(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ae,"href","#convert-a-model-for-all-frameworks"),p(L,"class","relative group"),p(vt,"href","installation"),p(ie,"id","push-a-model-with-trainer"),p(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ie,"href","#push-a-model-with-trainer"),p(R,"class","relative group"),p(kt,"href","training"),p(Et,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),p(Tt,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),p(Pt,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),p(At,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.push_to_hub"),p(jt,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),p(fe,"id","push-a-model-with-pushtohubcallback"),p(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(fe,"href","#push-a-model-with-pushtohubcallback"),p(B,"class","relative group"),p(Ft,"href","/docs/transformers/pr_16143/en/main_classes/keras_callbacks#transformers.PushToHubCallback"),p(Ct,"href","/docs/transformers/pr_16143/en/main_classes/keras_callbacks#transformers.PushToHubCallback"),p(We,"href","https://keras.io/api/models/model_training_apis/"),p(We,"rel","nofollow"),p(he,"id","use-the-pushtohub-function"),p(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(he,"href","#use-the-pushtohub-function"),p(U,"class","relative group"),p(at,"href","https://huggingface.co/docs/hub/how-to-upstream"),p(at,"rel","nofollow"),p(we,"id","upload-with-the-web-interface"),p(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(we,"href","#upload-with-the-web-interface"),p(G,"class","relative group"),p(lt,"href","https://huggingface.co/new"),p(lt,"rel","nofollow"),Dr(Dt.src,Hi="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png")||p(Dt,"src",Hi),p(Dt,"alt","new_model_repo"),Dr(It.src,Fi="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png")||p(It,"src",Fi),p(It,"alt","upload_file"),p(ge,"id","add-a-model-card"),p(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(ge,"href","#add-a-model-card"),p(Y,"class","relative group"),p(ut,"href","https://huggingface.co/distilbert-base-uncased"),p(ut,"rel","nofollow"),p(ht,"href","https://huggingface.co/docs/hub/model-repos"),p(ht,"rel","nofollow")},m(e,a){o(document.head,g),f(e,j,a),f(e,v,a),o(v,$),o($,M),c(E,M,null),o(v,H),o(v,W),o(W,xr),f(e,zo,a),f(e,mt,a),o(mt,Or),f(e,Lo,a),f(e,J,a),o(J,Ir),o(J,Te),o(Te,Mr),o(J,Nr),f(e,Ro,a),f(e,K,a),o(K,Yt),o(Yt,zr),o(K,Lr),o(K,Vt),o(Vt,Rr),f(e,Bo,a),f(e,T,a),f(e,Uo,a),c(X,e,a),f(e,Go,a),f(e,N,a),o(N,Z),o(Z,Wt),c(Pe,Wt,null),o(N,Br),o(N,Jt),o(Jt,Ur),f(e,Yo,a),f(e,dt,a),o(dt,Gr),f(e,Vo,a),f(e,F,a),o(F,Yr),o(F,Ae),o(Ae,Vr),o(F,Wr),o(F,Kt),o(Kt,Jr),o(F,Kr),f(e,Wo,a),f(e,Q,a),o(Q,Xr),o(Q,Xt),o(Xt,Zr),o(Q,Qr),f(e,Jo,a),c(je,e,a),f(e,Ko,a),f(e,ct,a),o(ct,el),f(e,Xo,a),f(e,_t,a),o(_t,yt),f(e,Zo,a),f(e,z,a),o(z,ee),o(ee,Zt),c(He,Zt,null),o(z,tl),o(z,Qt),o(Qt,ol),f(e,Qo,a),f(e,te,a),o(te,al),o(te,eo),o(eo,rl),o(te,ll),f(e,ea,a),c(Fe,e,a),f(e,ta,a),f(e,oe,a),o(oe,sl),o(oe,Ce),o(Ce,to),o(to,il),o(oe,nl),f(e,oa,a),c(Se,e,a),f(e,aa,a),f(e,C,a),o(C,fl),o(C,oo),o(oo,ul),o(C,hl),o(C,qe),o(qe,pl),o(C,ml),f(e,ra,a),c(De,e,a),f(e,la,a),f(e,L,a),o(L,ae),o(ae,ao),c(xe,ao,null),o(L,dl),o(L,ro),o(ro,cl),f(e,sa,a),f(e,wt,a),o(wt,_l),f(e,ia,a),f(e,re,a),o(re,yl),o(re,vt),o(vt,wl),o(re,vl),f(e,na,a),f(e,le,a),o(le,gl),o(le,lo),o(lo,bl),o(le,$l),f(e,fa,a),c(Oe,e,a),f(e,ua,a),f(e,gt,a),o(gt,kl),f(e,ha,a),c(Ie,e,a),f(e,pa,a),f(e,se,a),o(se,El),o(se,so),o(so,Tl),o(se,Pl),f(e,ma,a),c(Me,e,a),f(e,da,a),f(e,bt,a),o(bt,Al),f(e,ca,a),c(Ne,e,a),f(e,_a,a),f(e,R,a),o(R,ie),o(ie,io),c(ze,io,null),o(R,jl),o(R,$t),o($t,Hl),o($t,no),o(no,Fl),f(e,ya,a),c(Le,e,a),f(e,wa,a),f(e,k,a),o(k,Cl),o(k,kt),o(kt,Sl),o(k,ql),o(k,Et),o(Et,Dl),o(k,xl),o(k,fo),o(fo,Ol),o(k,Il),o(k,Tt),o(Tt,Ml),o(k,Nl),f(e,va,a),c(Re,e,a),f(e,ga,a),f(e,ne,a),o(ne,zl),o(ne,Pt),o(Pt,Ll),o(ne,Rl),f(e,ba,a),c(Be,e,a),f(e,$a,a),f(e,S,a),o(S,Bl),o(S,At),o(At,Ul),o(S,Gl),o(S,jt),o(jt,Yl),o(S,Vl),f(e,ka,a),c(Ue,e,a),f(e,Ea,a),f(e,B,a),o(B,fe),o(fe,uo),c(Ge,uo,null),o(B,Wl),o(B,Ht),o(Ht,Jl),o(Ht,ho),o(ho,Kl),f(e,Ta,a),f(e,q,a),o(q,Xl),o(q,Ft),o(Ft,Zl),o(q,Ql),o(q,Ct),o(Ct,es),o(q,ts),f(e,Pa,a),f(e,D,a),o(D,po),o(po,os),o(D,as),o(D,mo),o(mo,rs),o(D,ls),o(D,Ye),o(Ye,ss),o(Ye,co),o(co,is),o(Ye,ns),f(e,Aa,a),c(Ve,e,a),f(e,ja,a),f(e,ue,a),o(ue,fs),o(ue,We),o(We,_o),o(_o,us),o(ue,hs),f(e,Ha,a),c(Je,e,a),f(e,Fa,a),f(e,U,a),o(U,he),o(he,yo),c(Ke,yo,null),o(U,ps),o(U,Xe),o(Xe,ms),o(Xe,wo),o(wo,ds),o(Xe,cs),f(e,Ca,a),f(e,pe,a),o(pe,_s),o(pe,vo),o(vo,ys),o(pe,ws),f(e,Sa,a),f(e,me,a),o(me,vs),o(me,go),o(go,gs),o(me,bs),f(e,qa,a),c(Ze,e,a),f(e,Da,a),f(e,x,a),o(x,$s),o(x,bo),o(bo,ks),o(x,Es),o(x,$o),o($o,Ts),o(x,Ps),f(e,xa,a),c(Qe,e,a),f(e,Oa,a),f(e,de,a),o(de,As),o(de,ko),o(ko,js),o(de,Hs),f(e,Ia,a),c(et,e,a),f(e,Ma,a),f(e,ce,a),o(ce,Fs),o(ce,Eo),o(Eo,Cs),o(ce,Ss),f(e,Na,a),c(tt,e,a),f(e,za,a),f(e,St,a),o(St,qs),f(e,La,a),c(ot,e,a),f(e,Ra,a),f(e,_e,a),o(_e,Ds),o(_e,To),o(To,xs),o(_e,Os),f(e,Ba,a),f(e,ye,a),o(ye,Is),o(ye,at),o(at,Ms),o(ye,Ns),f(e,Ua,a),f(e,G,a),o(G,we),o(we,Po),c(rt,Po,null),o(G,zs),o(G,Ao),o(Ao,Ls),f(e,Ga,a),f(e,ve,a),o(ve,Rs),o(ve,lt),o(lt,Bs),o(ve,Us),f(e,Ya,a),f(e,qt,a),o(qt,Dt),f(e,Va,a),f(e,xt,a),o(xt,Gs),f(e,Wa,a),f(e,P,a),o(P,st),o(st,Ys),o(st,jo),o(jo,Vs),o(st,Ws),o(P,Js),o(P,Ho),o(Ho,Ks),o(P,Xs),o(P,Fo),o(Fo,Zs),o(P,Qs),o(P,Co),o(Co,ei),f(e,Ja,a),f(e,O,a),o(O,ti),o(O,So),o(So,oi),o(O,ai),o(O,qo),o(qo,ri),o(O,li),f(e,Ka,a),f(e,Ot,a),o(Ot,It),f(e,Xa,a),f(e,Y,a),o(Y,ge),o(ge,Do),c(it,Do,null),o(Y,si),o(Y,xo),o(xo,ii),f(e,Za,a),f(e,be,a),o(be,ni),o(be,Oo),o(Oo,fi),o(be,ui),f(e,Qa,a),f(e,$e,a),o($e,nt),o(nt,hi),o(nt,Io),o(Io,pi),o(nt,mi),o($e,di),o($e,ft),o(ft,ci),o(ft,Mo),o(Mo,_i),o(ft,yi),f(e,er,a),f(e,A,a),o(A,wi),o(A,ut),o(ut,vi),o(A,gi),o(A,No),o(No,bi),o(A,$i),o(A,ht),o(ht,ki),o(A,Ei),tr=!0},p(e,[a]){const pt={};a&2&&(pt.$$scope={dirty:a,ctx:e}),X.$set(pt)},i(e){tr||(_(E.$$.fragment,e),_(X.$$.fragment,e),_(Pe.$$.fragment,e),_(je.$$.fragment,e),_(He.$$.fragment,e),_(Fe.$$.fragment,e),_(Se.$$.fragment,e),_(De.$$.fragment,e),_(xe.$$.fragment,e),_(Oe.$$.fragment,e),_(Ie.$$.fragment,e),_(Me.$$.fragment,e),_(Ne.$$.fragment,e),_(ze.$$.fragment,e),_(Le.$$.fragment,e),_(Re.$$.fragment,e),_(Be.$$.fragment,e),_(Ue.$$.fragment,e),_(Ge.$$.fragment,e),_(Ve.$$.fragment,e),_(Je.$$.fragment,e),_(Ke.$$.fragment,e),_(Ze.$$.fragment,e),_(Qe.$$.fragment,e),_(et.$$.fragment,e),_(tt.$$.fragment,e),_(ot.$$.fragment,e),_(rt.$$.fragment,e),_(it.$$.fragment,e),tr=!0)},o(e){y(E.$$.fragment,e),y(X.$$.fragment,e),y(Pe.$$.fragment,e),y(je.$$.fragment,e),y(He.$$.fragment,e),y(Fe.$$.fragment,e),y(Se.$$.fragment,e),y(De.$$.fragment,e),y(xe.$$.fragment,e),y(Oe.$$.fragment,e),y(Ie.$$.fragment,e),y(Me.$$.fragment,e),y(Ne.$$.fragment,e),y(ze.$$.fragment,e),y(Le.$$.fragment,e),y(Re.$$.fragment,e),y(Be.$$.fragment,e),y(Ue.$$.fragment,e),y(Ge.$$.fragment,e),y(Ve.$$.fragment,e),y(Je.$$.fragment,e),y(Ke.$$.fragment,e),y(Ze.$$.fragment,e),y(Qe.$$.fragment,e),y(et.$$.fragment,e),y(tt.$$.fragment,e),y(ot.$$.fragment,e),y(rt.$$.fragment,e),y(it.$$.fragment,e),tr=!1},d(e){t(g),e&&t(j),e&&t(v),w(E),e&&t(zo),e&&t(mt),e&&t(Lo),e&&t(J),e&&t(Ro),e&&t(K),e&&t(Bo),e&&t(T),e&&t(Uo),w(X,e),e&&t(Go),e&&t(N),w(Pe),e&&t(Yo),e&&t(dt),e&&t(Vo),e&&t(F),e&&t(Wo),e&&t(Q),e&&t(Jo),w(je,e),e&&t(Ko),e&&t(ct),e&&t(Xo),e&&t(_t),e&&t(Zo),e&&t(z),w(He),e&&t(Qo),e&&t(te),e&&t(ea),w(Fe,e),e&&t(ta),e&&t(oe),e&&t(oa),w(Se,e),e&&t(aa),e&&t(C),e&&t(ra),w(De,e),e&&t(la),e&&t(L),w(xe),e&&t(sa),e&&t(wt),e&&t(ia),e&&t(re),e&&t(na),e&&t(le),e&&t(fa),w(Oe,e),e&&t(ua),e&&t(gt),e&&t(ha),w(Ie,e),e&&t(pa),e&&t(se),e&&t(ma),w(Me,e),e&&t(da),e&&t(bt),e&&t(ca),w(Ne,e),e&&t(_a),e&&t(R),w(ze),e&&t(ya),w(Le,e),e&&t(wa),e&&t(k),e&&t(va),w(Re,e),e&&t(ga),e&&t(ne),e&&t(ba),w(Be,e),e&&t($a),e&&t(S),e&&t(ka),w(Ue,e),e&&t(Ea),e&&t(B),w(Ge),e&&t(Ta),e&&t(q),e&&t(Pa),e&&t(D),e&&t(Aa),w(Ve,e),e&&t(ja),e&&t(ue),e&&t(Ha),w(Je,e),e&&t(Fa),e&&t(U),w(Ke),e&&t(Ca),e&&t(pe),e&&t(Sa),e&&t(me),e&&t(qa),w(Ze,e),e&&t(Da),e&&t(x),e&&t(xa),w(Qe,e),e&&t(Oa),e&&t(de),e&&t(Ia),w(et,e),e&&t(Ma),e&&t(ce),e&&t(Na),w(tt,e),e&&t(za),e&&t(St),e&&t(La),w(ot,e),e&&t(Ra),e&&t(_e),e&&t(Ba),e&&t(ye),e&&t(Ua),e&&t(G),w(rt),e&&t(Ga),e&&t(ve),e&&t(Ya),e&&t(qt),e&&t(Va),e&&t(xt),e&&t(Wa),e&&t(P),e&&t(Ja),e&&t(O),e&&t(Ka),e&&t(Ot),e&&t(Xa),e&&t(Y),w(it),e&&t(Za),e&&t(be),e&&t(Qa),e&&t($e),e&&t(er),e&&t(A)}}}const gf={local:"share-a-model",sections:[{local:"repository-features",title:"Repository features"},{local:"setup",title:"Setup"},{local:"convert-a-model-for-all-frameworks",title:"Convert a model for all frameworks"},{local:"push-a-model-with-trainer",title:"Push a model with `Trainer`"},{local:"push-a-model-with-pushtohubcallback",title:"Push a model with `PushToHubCallback`"},{local:"use-the-pushtohub-function",title:"Use the `push_to_hub` function"},{local:"upload-with-the-web-interface",title:"Upload with the web interface"},{local:"add-a-model-card",title:"Add a model card"}],title:"Share a model"};function bf(Gt,g,j){let{fw:v}=g;return Gt.$$set=$=>{"fw"in $&&j(0,v=$.fw)},[v]}class jf extends pf{constructor(g){super();mf(this,g,bf,vf,df,{fw:0})}}export{jf as default,gf as metadata};
265
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/notebooks.mdx-229c9f2b.js
import{S as v1,i as w1,s as E1,e as a,k as s,w as Ao,t as g,M as k1,c as o,d as r,m as n,a as l,x as Do,h,b as t,N as c,F as e,g as m,y as So,L as T1,q as xo,o as Ho,B as Mo}from"../chunks/vendor-4833417e.js";import{I as Io}from"../chunks/IconCopyLink-4b81c553.js";function y1(Bi){let ie,Oo,w,E,qn,mt,Yi,Pn,Qi,di,Go,Vi,mi,$o,Ki,bi,ge,me,Ln,bt,Ji,Un,ji,pi,he,be,Fn,pt,Zi,Xn,eg,_i,No,tg,vi,pe,Bn,k,Ro,ag,og,Co,rg,lg,Yn,sg,Qn,ng,_,T,zo,_t,ig,gg,Wo,hg,cg,qo,vt,Po,Pf,ug,Lo,wt,Uo,Lf,fg,y,Fo,Et,dg,mg,Xo,bg,pg,Bo,kt,Yo,Uf,_g,Qo,Tt,Vo,Ff,vg,A,Ko,yt,wg,Eg,Jo,kg,Tg,jo,At,Zo,Xf,yg,er,Dt,tr,Bf,Ag,D,ar,St,Dg,Sg,or,xg,Hg,rr,xt,lr,Yf,Mg,sr,Ht,nr,Qf,Ig,S,ir,Mt,Og,Gg,gr,$g,Ng,hr,It,cr,Vf,Rg,ur,Ot,fr,Kf,Cg,x,dr,Gt,zg,Wg,mr,qg,Pg,br,$t,pr,Jf,Lg,_r,Nt,vr,jf,Ug,H,wr,Rt,Fg,Xg,Er,Bg,Yg,kr,Ct,Tr,Zf,Qg,yr,zt,Ar,ed,wi,ce,_e,Vn,Wt,Vg,Kn,Kg,Ei,ve,Jn,M,Dr,Jg,jg,Sr,Zg,eh,jn,th,Zn,ah,u,I,xr,qt,oh,rh,Hr,lh,sh,Mr,Pt,Ir,td,nh,Or,Lt,Gr,ad,ih,O,$r,Ut,gh,hh,Nr,ch,uh,Rr,Ft,Cr,od,fh,zr,Xt,Wr,rd,dh,G,qr,Bt,mh,bh,Pr,ph,_h,Lr,Yt,Ur,ld,vh,Fr,Qt,Xr,sd,wh,$,Br,Vt,Eh,kh,Yr,Th,yh,Qr,Kt,Vr,nd,Ah,Kr,Jt,Jr,id,Dh,N,jr,jt,Sh,xh,Zr,Hh,Mh,el,Zt,tl,gd,Ih,al,ea,ol,hd,Oh,R,rl,ta,Gh,$h,ll,Nh,Rh,sl,aa,nl,cd,Ch,il,oa,gl,ud,zh,C,hl,ra,Wh,qh,cl,Ph,Lh,ul,la,fl,fd,Uh,dl,sa,ml,dd,Fh,z,bl,na,Xh,Bh,pl,Yh,Qh,_l,ia,vl,md,Vh,wl,ga,El,bd,Kh,W,kl,ha,Jh,jh,Tl,Zh,ec,yl,ca,Al,pd,tc,Dl,ua,Sl,_d,ac,q,xl,fa,oc,rc,Hl,lc,sc,Ml,da,Il,vd,nc,Ol,ma,Gl,wd,ic,P,$l,ba,gc,hc,Nl,cc,uc,Rl,pa,Cl,Ed,fc,zl,_a,Wl,kd,dc,L,ql,va,mc,bc,Pl,pc,_c,Ll,wa,Ul,Td,vc,Fl,Ea,Xl,yd,wc,U,Bl,ka,Ec,kc,Yl,Tc,yc,Ql,Ta,Vl,Ad,Ac,Kl,ya,Jl,Dd,Dc,F,jl,Aa,Sc,xc,Zl,Hc,Mc,es,Da,ts,Sd,Ic,as,Sa,os,xd,Oc,X,rs,xa,Gc,$c,ls,Nc,Rc,ei,Cc,ti,zc,B,ss,Ha,Wc,qc,ns,Pc,Lc,is,Ma,gs,Hd,Uc,hs,Ia,cs,Md,Fc,Y,us,Oa,Xc,Bc,fs,Yc,Qc,ds,Ga,ms,Id,Vc,bs,$a,ps,Od,Kc,Q,_s,Na,Jc,jc,vs,Zc,eu,ws,Ra,Es,Gd,tu,ks,Ca,Ts,$d,ki,ue,we,ai,za,au,oi,ou,Ti,Ee,ri,V,ys,ru,lu,As,su,nu,li,iu,si,gu,b,K,Ds,Wa,hu,cu,Ss,uu,fu,xs,qa,Hs,Nd,du,Ms,Pa,Is,Rd,mu,J,Os,La,bu,pu,Gs,_u,vu,$s,Ua,Ns,Cd,wu,Rs,Fa,Cs,zd,Eu,j,zs,Xa,ku,Tu,Ws,yu,Au,qs,Ba,Ps,Wd,Du,Ls,Ya,Us,qd,Su,Z,Fs,Qa,xu,Hu,Xs,Mu,Iu,Bs,Va,Ys,Pd,Ou,Qs,Ka,Vs,Ld,Gu,ee,Ks,Ja,$u,Nu,Js,Ru,Cu,js,ja,Zs,Ud,zu,en,Za,tn,Fd,Wu,te,an,eo,qu,Pu,on,Lu,Uu,rn,to,ln,Xd,Fu,sn,ao,nn,Bd,Xu,ae,gn,oo,Bu,Yu,hn,Qu,Vu,cn,ro,un,Yd,Ku,fn,lo,dn,Qd,Ju,oe,mn,so,ju,Zu,bn,ef,tf,pn,no,_n,Vd,af,vn,io,wn,Kd,of,re,En,go,rf,lf,kn,sf,nf,Tn,ho,yn,Jd,gf,An,co,Dn,jd,yi,fe,ke,ni,uo,hf,ii,cf,Ai,Te,uf,fo,ff,df,Di,ye,gi,le,Sn,mf,bf,xn,pf,_f,hi,vf,ci,wf,mo,se,Hn,bo,Ef,kf,Ae,Tf,po,yf,Af,Df,Mn,_o,In,Zd,Sf,On,vo,Gn,em,xf,ne,$n,wo,Hf,Mf,De,If,Eo,Of,Gf,$f,Nn,ko,Rn,tm,Nf,Cn,To,zn,am,Si,de,Se,ui,yo,Rf,fi,Cf,xi,xe,zf,Wn,Wf,qf,Hi;return mt=new Io({}),bt=new Io({}),pt=new Io({}),Wt=new Io({}),za=new Io({}),uo=new Io({}),yo=new Io({}),{c(){ie=a("meta"),Oo=s(),w=a("h1"),E=a("a"),qn=a("span"),Ao(mt.$$.fragment),Yi=s(),Pn=a("span"),Qi=g("\u{1F917} Transformers Notebooks"),di=s(),Go=a("p"),Vi=g("You can find here a list of the official notebooks provided by Hugging Face."),mi=s(),$o=a("p"),Ki=g(`Also, we would like to list here interesting content created by the community. If you wrote some notebook(s) leveraging \u{1F917} Transformers and would like be listed here, please open a Pull Request so it can be included under the Community notebooks.`),bi=s(),ge=a("h2"),me=a("a"),Ln=a("span"),Ao(bt.$$.fragment),Ji=s(),Un=a("span"),ji=g("Hugging Face's notebooks \u{1F917}"),pi=s(),he=a("h3"),be=a("a"),Fn=a("span"),Ao(pt.$$.fragment),Zi=s(),Xn=a("span"),eg=g("Documentation notebooks"),_i=s(),No=a("p"),tg=g("You can open any page of the documentation as a notebook in colab (there is a button directly on said pages) but they are also listed here if you need to:"),vi=s(),pe=a("table"),Bn=a("thead"),k=a("tr"),Ro=a("th"),ag=g("Notebook"),og=s(),Co=a("th"),rg=g("Description"),lg=s(),Yn=a("th"),sg=s(),Qn=a("th"),ng=s(),_=a("tbody"),T=a("tr"),zo=a("td"),_t=a("a"),ig=g("Quicktour of the library"),gg=s(),Wo=a("td"),hg=g("A presentation of the various APIs in Transformers"),cg=s(),qo=a("td"),vt=a("a"),Po=a("img"),ug=s(),Lo=a("td"),wt=a("a"),Uo=a("img"),fg=s(),y=a("tr"),Fo=a("td"),Et=a("a"),dg=g("Summary of the tasks"),mg=s(),Xo=a("td"),bg=g("How to run the models of the Transformers library task by task"),pg=s(),Bo=a("td"),kt=a("a"),Yo=a("img"),_g=s(),Qo=a("td"),Tt=a("a"),Vo=a("img"),vg=s(),A=a("tr"),Ko=a("td"),yt=a("a"),wg=g("Preprocessing data"),Eg=s(),Jo=a("td"),kg=g("How to use a tokenizer to preprocess your data"),Tg=s(),jo=a("td"),At=a("a"),Zo=a("img"),yg=s(),er=a("td"),Dt=a("a"),tr=a("img"),Ag=s(),D=a("tr"),ar=a("td"),St=a("a"),Dg=g("Fine-tuning a pretrained model"),Sg=s(),or=a("td"),xg=g("How to use the Trainer to fine-tune a pretrained model"),Hg=s(),rr=a("td"),xt=a("a"),lr=a("img"),Mg=s(),sr=a("td"),Ht=a("a"),nr=a("img"),Ig=s(),S=a("tr"),ir=a("td"),Mt=a("a"),Og=g("Summary of the tokenizers"),Gg=s(),gr=a("td"),$g=g("The differences between the tokenizers algorithm"),Ng=s(),hr=a("td"),It=a("a"),cr=a("img"),Rg=s(),ur=a("td"),Ot=a("a"),fr=a("img"),Cg=s(),x=a("tr"),dr=a("td"),Gt=a("a"),zg=g("Multilingual models"),Wg=s(),mr=a("td"),qg=g("How to use the multilingual models of the library"),Pg=s(),br=a("td"),$t=a("a"),pr=a("img"),Lg=s(),_r=a("td"),Nt=a("a"),vr=a("img"),Ug=s(),H=a("tr"),wr=a("td"),Rt=a("a"),Fg=g("Fine-tuning with custom datasets"),Xg=s(),Er=a("td"),Bg=g("How to fine-tune a pretrained model on various tasks"),Yg=s(),kr=a("td"),Ct=a("a"),Tr=a("img"),Qg=s(),yr=a("td"),zt=a("a"),Ar=a("img"),wi=s(),ce=a("h3"),_e=a("a"),Vn=a("span"),Ao(Wt.$$.fragment),Vg=s(),Kn=a("span"),Kg=g("PyTorch Examples"),Ei=s(),ve=a("table"),Jn=a("thead"),M=a("tr"),Dr=a("th"),Jg=g("Notebook"),jg=s(),Sr=a("th"),Zg=g("Description"),eh=s(),jn=a("th"),th=s(),Zn=a("th"),ah=s(),u=a("tbody"),I=a("tr"),xr=a("td"),qt=a("a"),oh=g("Train your tokenizer"),rh=s(),Hr=a("td"),lh=g("How to train and use your very own tokenizer"),sh=s(),Mr=a("td"),Pt=a("a"),Ir=a("img"),nh=s(),Or=a("td"),Lt=a("a"),Gr=a("img"),ih=s(),O=a("tr"),$r=a("td"),Ut=a("a"),gh=g("Train your language model"),hh=s(),Nr=a("td"),ch=g("How to easily start using transformers"),uh=s(),Rr=a("td"),Ft=a("a"),Cr=a("img"),fh=s(),zr=a("td"),Xt=a("a"),Wr=a("img"),dh=s(),G=a("tr"),qr=a("td"),Bt=a("a"),mh=g("How to fine-tune a model on text classification"),bh=s(),Pr=a("td"),ph=g("Show how to preprocess the data and fine-tune a pretrained model on any GLUE task."),_h=s(),Lr=a("td"),Yt=a("a"),Ur=a("img"),vh=s(),Fr=a("td"),Qt=a("a"),Xr=a("img"),wh=s(),$=a("tr"),Br=a("td"),Vt=a("a"),Eh=g("How to fine-tune a model on language modeling"),kh=s(),Yr=a("td"),Th=g("Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task."),yh=s(),Qr=a("td"),Kt=a("a"),Vr=a("img"),Ah=s(),Kr=a("td"),Jt=a("a"),Jr=a("img"),Dh=s(),N=a("tr"),jr=a("td"),jt=a("a"),Sh=g("How to fine-tune a model on token classification"),xh=s(),Zr=a("td"),Hh=g("Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS)."),Mh=s(),el=a("td"),Zt=a("a"),tl=a("img"),Ih=s(),al=a("td"),ea=a("a"),ol=a("img"),Oh=s(),R=a("tr"),rl=a("td"),ta=a("a"),Gh=g("How to fine-tune a model on question answering"),$h=s(),ll=a("td"),Nh=g("Show how to preprocess the data and fine-tune a pretrained model on SQUAD."),Rh=s(),sl=a("td"),aa=a("a"),nl=a("img"),Ch=s(),il=a("td"),oa=a("a"),gl=a("img"),zh=s(),C=a("tr"),hl=a("td"),ra=a("a"),Wh=g("How to fine-tune a model on multiple choice"),qh=s(),cl=a("td"),Ph=g("Show how to preprocess the data and fine-tune a pretrained model on SWAG."),Lh=s(),ul=a("td"),la=a("a"),fl=a("img"),Uh=s(),dl=a("td"),sa=a("a"),ml=a("img"),Fh=s(),z=a("tr"),bl=a("td"),na=a("a"),Xh=g("How to fine-tune a model on translation"),Bh=s(),pl=a("td"),Yh=g("Show how to preprocess the data and fine-tune a pretrained model on WMT."),Qh=s(),_l=a("td"),ia=a("a"),vl=a("img"),Vh=s(),wl=a("td"),ga=a("a"),El=a("img"),Kh=s(),W=a("tr"),kl=a("td"),ha=a("a"),Jh=g("How to fine-tune a model on summarization"),jh=s(),Tl=a("td"),Zh=g("Show how to preprocess the data and fine-tune a pretrained model on XSUM."),ec=s(),yl=a("td"),ca=a("a"),Al=a("img"),tc=s(),Dl=a("td"),ua=a("a"),Sl=a("img"),ac=s(),q=a("tr"),xl=a("td"),fa=a("a"),oc=g("How to fine-tune a speech recognition model in English"),rc=s(),Hl=a("td"),lc=g("Show how to preprocess the data and fine-tune a pretrained Speech model on TIMIT"),sc=s(),Ml=a("td"),da=a("a"),Il=a("img"),nc=s(),Ol=a("td"),ma=a("a"),Gl=a("img"),ic=s(),P=a("tr"),$l=a("td"),ba=a("a"),gc=g("How to fine-tune a speech recognition model in any language"),hc=s(),Nl=a("td"),cc=g("Show how to preprocess the data and fine-tune a multi-lingually pretrained speech model on Common Voice"),uc=s(),Rl=a("td"),pa=a("a"),Cl=a("img"),fc=s(),zl=a("td"),_a=a("a"),Wl=a("img"),dc=s(),L=a("tr"),ql=a("td"),va=a("a"),mc=g("How to fine-tune a model on audio classification"),bc=s(),Pl=a("td"),pc=g("Show how to preprocess the data and fine-tune a pretrained Speech model on Keyword Spotting"),_c=s(),Ll=a("td"),wa=a("a"),Ul=a("img"),vc=s(),Fl=a("td"),Ea=a("a"),Xl=a("img"),wc=s(),U=a("tr"),Bl=a("td"),ka=a("a"),Ec=g("How to train a language model from scratch"),kc=s(),Yl=a("td"),Tc=g("Highlight all the steps to effectively train Transformer model on custom data"),yc=s(),Ql=a("td"),Ta=a("a"),Vl=a("img"),Ac=s(),Kl=a("td"),ya=a("a"),Jl=a("img"),Dc=s(),F=a("tr"),jl=a("td"),Aa=a("a"),Sc=g("How to generate text"),xc=s(),Zl=a("td"),Hc=g("How to use different decoding methods for language generation with transformers"),Mc=s(),es=a("td"),Da=a("a"),ts=a("img"),Ic=s(),as=a("td"),Sa=a("a"),os=a("img"),Oc=s(),X=a("tr"),rs=a("td"),xa=a("a"),Gc=g("How to export model to ONNX"),$c=s(),ls=a("td"),Nc=g("Highlight how to export and run inference workloads through ONNX"),Rc=s(),ei=a("td"),Cc=s(),ti=a("td"),zc=s(),B=a("tr"),ss=a("td"),Ha=a("a"),Wc=g("How to use Benchmarks"),qc=s(),ns=a("td"),Pc=g("How to benchmark models with transformers"),Lc=s(),is=a("td"),Ma=a("a"),gs=a("img"),Uc=s(),hs=a("td"),Ia=a("a"),cs=a("img"),Fc=s(),Y=a("tr"),us=a("td"),Oa=a("a"),Xc=g("Reformer"),Bc=s(),fs=a("td"),Yc=g("How Reformer pushes the limits of language modeling"),Qc=s(),ds=a("td"),Ga=a("a"),ms=a("img"),Vc=s(),bs=a("td"),$a=a("a"),ps=a("img"),Kc=s(),Q=a("tr"),_s=a("td"),Na=a("a"),Jc=g("How to fine-tune a model on image classification"),jc=s(),vs=a("td"),Zc=g("Show how to preprocess the data and fine-tune any pretrained Vision model on Image Classification"),eu=s(),ws=a("td"),Ra=a("a"),Es=a("img"),tu=s(),ks=a("td"),Ca=a("a"),Ts=a("img"),ki=s(),ue=a("h3"),we=a("a"),ai=a("span"),Ao(za.$$.fragment),au=s(),oi=a("span"),ou=g("TensorFlow Examples"),Ti=s(),Ee=a("table"),ri=a("thead"),V=a("tr"),ys=a("th"),ru=g("Notebook"),lu=s(),As=a("th"),su=g("Description"),nu=s(),li=a("th"),iu=s(),si=a("th"),gu=s(),b=a("tbody"),K=a("tr"),Ds=a("td"),Wa=a("a"),hu=g("Train your tokenizer"),cu=s(),Ss=a("td"),uu=g("How to train and use your very own tokenizer"),fu=s(),xs=a("td"),qa=a("a"),Hs=a("img"),du=s(),Ms=a("td"),Pa=a("a"),Is=a("img"),mu=s(),J=a("tr"),Os=a("td"),La=a("a"),bu=g("Train your language model"),pu=s(),Gs=a("td"),_u=g("How to easily start using transformers"),vu=s(),$s=a("td"),Ua=a("a"),Ns=a("img"),wu=s(),Rs=a("td"),Fa=a("a"),Cs=a("img"),Eu=s(),j=a("tr"),zs=a("td"),Xa=a("a"),ku=g("How to fine-tune a model on text classification"),Tu=s(),Ws=a("td"),yu=g("Show how to preprocess the data and fine-tune a pretrained model on any GLUE task."),Au=s(),qs=a("td"),Ba=a("a"),Ps=a("img"),Du=s(),Ls=a("td"),Ya=a("a"),Us=a("img"),Su=s(),Z=a("tr"),Fs=a("td"),Qa=a("a"),xu=g("How to fine-tune a model on language modeling"),Hu=s(),Xs=a("td"),Mu=g("Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task."),Iu=s(),Bs=a("td"),Va=a("a"),Ys=a("img"),Ou=s(),Qs=a("td"),Ka=a("a"),Vs=a("img"),Gu=s(),ee=a("tr"),Ks=a("td"),Ja=a("a"),$u=g("How to fine-tune a model on token classification"),Nu=s(),Js=a("td"),Ru=g("Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS)."),Cu=s(),js=a("td"),ja=a("a"),Zs=a("img"),zu=s(),en=a("td"),Za=a("a"),tn=a("img"),Wu=s(),te=a("tr"),an=a("td"),eo=a("a"),qu=g("How to fine-tune a model on question answering"),Pu=s(),on=a("td"),Lu=g("Show how to preprocess the data and fine-tune a pretrained model on SQUAD."),Uu=s(),rn=a("td"),to=a("a"),ln=a("img"),Fu=s(),sn=a("td"),ao=a("a"),nn=a("img"),Xu=s(),ae=a("tr"),gn=a("td"),oo=a("a"),Bu=g("How to fine-tune a model on multiple choice"),Yu=s(),hn=a("td"),Qu=g("Show how to preprocess the data and fine-tune a pretrained model on SWAG."),Vu=s(),cn=a("td"),ro=a("a"),un=a("img"),Ku=s(),fn=a("td"),lo=a("a"),dn=a("img"),Ju=s(),oe=a("tr"),mn=a("td"),so=a("a"),ju=g("How to fine-tune a model on translation"),Zu=s(),bn=a("td"),ef=g("Show how to preprocess the data and fine-tune a pretrained model on WMT."),tf=s(),pn=a("td"),no=a("a"),_n=a("img"),af=s(),vn=a("td"),io=a("a"),wn=a("img"),of=s(),re=a("tr"),En=a("td"),go=a("a"),rf=g("How to fine-tune a model on summarization"),lf=s(),kn=a("td"),sf=g("Show how to preprocess the data and fine-tune a pretrained model on XSUM."),nf=s(),Tn=a("td"),ho=a("a"),yn=a("img"),gf=s(),An=a("td"),co=a("a"),Dn=a("img"),yi=s(),fe=a("h3"),ke=a("a"),ni=a("span"),Ao(uo.$$.fragment),hf=s(),ii=a("span"),cf=g("Optimum notebooks"),Ai=s(),Te=a("p"),uf=g("\u{1F917} "),fo=a("a"),ff=g("Optimum"),df=g(" is an extension of \u{1F917} Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on targeted hardwares."),Di=s(),ye=a("table"),gi=a("thead"),le=a("tr"),Sn=a("th"),mf=g("Notebook"),bf=s(),xn=a("th"),pf=g("Description"),_f=s(),hi=a("th"),vf=s(),ci=a("th"),wf=s(),mo=a("tbody"),se=a("tr"),Hn=a("td"),bo=a("a"),Ef=g("How to quantize a model with ONNX Runtime for text classification"),kf=s(),Ae=a("td"),Tf=g("Show how to apply static and dynamic quantization on a model using "),po=a("a"),yf=g("ONNX Runtime"),Af=g(" for any GLUE task."),Df=s(),Mn=a("td"),_o=a("a"),In=a("img"),Sf=s(),On=a("td"),vo=a("a"),Gn=a("img"),xf=s(),ne=a("tr"),$n=a("td"),wo=a("a"),Hf=g("How to quantize a model with Intel Neural Compressor for text classification"),Mf=s(),De=a("td"),If=g("Show how to apply static, dynamic and aware training quantization on a model using "),Eo=a("a"),Of=g("Intel Neural Compressor (INC)"),Gf=g(" for any GLUE task."),$f=s(),Nn=a("td"),ko=a("a"),Rn=a("img"),Nf=s(),Cn=a("td"),To=a("a"),zn=a("img"),Si=s(),de=a("h2"),Se=a("a"),ui=a("span"),Ao(yo.$$.fragment),Rf=s(),fi=a("span"),Cf=g("Community notebooks:"),xi=s(),xe=a("p"),zf=g("More notebooks developed by the community are available "),Wn=a("a"),Wf=g("here"),qf=g("."),this.h()},l(i){const d=k1('[data-svelte="svelte-1phssyn"]',document.head);ie=o(d,"META",{name:!0,content:!0}),d.forEach(r),Oo=n(i),w=o(i,"H1",{class:!0});var Mi=l(w);E=o(Mi,"A",{id:!0,class:!0,href:!0});var om=l(E);qn=o(om,"SPAN",{});var rm=l(qn);Do(mt.$$.fragment,rm),rm.forEach(r),om.forEach(r),Yi=n(Mi),Pn=o(Mi,"SPAN",{});var lm=l(Pn);Qi=h(lm,"\u{1F917} Transformers Notebooks"),lm.forEach(r),Mi.forEach(r),di=n(i),Go=o(i,"P",{});var sm=l(Go);Vi=h(sm,"You can find here a list of the official notebooks provided by Hugging Face."),sm.forEach(r),mi=n(i),$o=o(i,"P",{});var nm=l($o);Ki=h(nm,`Also, we would like to list here interesting content created by the community. If you wrote some notebook(s) leveraging \u{1F917} Transformers and would like be listed here, please open a Pull Request so it can be included under the Community notebooks.`),nm.forEach(r),bi=n(i),ge=o(i,"H2",{class:!0});var Ii=l(ge);me=o(Ii,"A",{id:!0,class:!0,href:!0});var im=l(me);Ln=o(im,"SPAN",{});var gm=l(Ln);Do(bt.$$.fragment,gm),gm.forEach(r),im.forEach(r),Ji=n(Ii),Un=o(Ii,"SPAN",{});var hm=l(Un);ji=h(hm,"Hugging Face's notebooks \u{1F917}"),hm.forEach(r),Ii.forEach(r),pi=n(i),he=o(i,"H3",{class:!0});var Oi=l(he);be=o(Oi,"A",{id:!0,class:!0,href:!0});var cm=l(be);Fn=o(cm,"SPAN",{});var um=l(Fn);Do(pt.$$.fragment,um),um.forEach(r),cm.forEach(r),Zi=n(Oi),Xn=o(Oi,"SPAN",{});var fm=l(Xn);eg=h(fm,"Documentation notebooks"),fm.forEach(r),Oi.forEach(r),_i=n(i),No=o(i,"P",{});var dm=l(No);tg=h(dm,"You can open any page of the documentation as a notebook in colab (there is a button directly on said pages) but they are also listed here if you need to:"),dm.forEach(r),vi=n(i),pe=o(i,"TABLE",{});var Gi=l(pe);Bn=o(Gi,"THEAD",{});var mm=l(Bn);k=o(mm,"TR",{});var He=l(k);Ro=o(He,"TH",{align:!0});var bm=l(Ro);ag=h(bm,"Notebook"),bm.forEach(r),og=n(He),Co=o(He,"TH",{align:!0});var pm=l(Co);rg=h(pm,"Description"),pm.forEach(r),lg=n(He),Yn=o(He,"TH",{align:!0}),l(Yn).forEach(r),sg=n(He),Qn=o(He,"TH",{align:!0}),l(Qn).forEach(r),He.forEach(r),mm.forEach(r),ng=n(Gi),_=o(Gi,"TBODY",{});var v=l(_);T=o(v,"TR",{});var Me=l(T);zo=o(Me,"TD",{align:!0});var _m=l(zo);_t=o(_m,"A",{href:!0,rel:!0});var vm=l(_t);ig=h(vm,"Quicktour of the library"),vm.forEach(r),_m.forEach(r),gg=n(Me),Wo=o(Me,"TD",{align:!0});var wm=l(Wo);hg=h(wm,"A presentation of the various APIs in Transformers"),wm.forEach(r),cg=n(Me),qo=o(Me,"TD",{align:!0});var Em=l(qo);vt=o(Em,"A",{href:!0,rel:!0});var km=l(vt);Po=o(km,"IMG",{src:!0,alt:!0}),km.forEach(r),Em.forEach(r),ug=n(Me),Lo=o(Me,"TD",{align:!0});var Tm=l(Lo);wt=o(Tm,"A",{href:!0,rel:!0});var ym=l(wt);Uo=o(ym,"IMG",{src:!0,alt:!0}),ym.forEach(r),Tm.forEach(r),Me.forEach(r),fg=n(v),y=o(v,"TR",{});var Ie=l(y);Fo=o(Ie,"TD",{align:!0});var Am=l(Fo);Et=o(Am,"A",{href:!0,rel:!0});var Dm=l(Et);dg=h(Dm,"Summary of the tasks"),Dm.forEach(r),Am.forEach(r),mg=n(Ie),Xo=o(Ie,"TD",{align:!0});var Sm=l(Xo);bg=h(Sm,"How to run the models of the Transformers library task by task"),Sm.forEach(r),pg=n(Ie),Bo=o(Ie,"TD",{align:!0});var xm=l(Bo);kt=o(xm,"A",{href:!0,rel:!0});var Hm=l(kt);Yo=o(Hm,"IMG",{src:!0,alt:!0}),Hm.forEach(r),xm.forEach(r),_g=n(Ie),Qo=o(Ie,"TD",{align:!0});var Mm=l(Qo);Tt=o(Mm,"A",{href:!0,rel:!0});var Im=l(Tt);Vo=o(Im,"IMG",{src:!0,alt:!0}),Im.forEach(r),Mm.forEach(r),Ie.forEach(r),vg=n(v),A=o(v,"TR",{});var Oe=l(A);Ko=o(Oe,"TD",{align:!0});var Om=l(Ko);yt=o(Om,"A",{href:!0,rel:!0});var Gm=l(yt);wg=h(Gm,"Preprocessing data"),Gm.forEach(r),Om.forEach(r),Eg=n(Oe),Jo=o(Oe,"TD",{align:!0});var $m=l(Jo);kg=h($m,"How to use a tokenizer to preprocess your data"),$m.forEach(r),Tg=n(Oe),jo=o(Oe,"TD",{align:!0});var Nm=l(jo);At=o(Nm,"A",{href:!0,rel:!0});var Rm=l(At);Zo=o(Rm,"IMG",{src:!0,alt:!0}),Rm.forEach(r),Nm.forEach(r),yg=n(Oe),er=o(Oe,"TD",{align:!0});var Cm=l(er);Dt=o(Cm,"A",{href:!0,rel:!0});var zm=l(Dt);tr=o(zm,"IMG",{src:!0,alt:!0}),zm.forEach(r),Cm.forEach(r),Oe.forEach(r),Ag=n(v),D=o(v,"TR",{});var Ge=l(D);ar=o(Ge,"TD",{align:!0});var Wm=l(ar);St=o(Wm,"A",{href:!0,rel:!0});var qm=l(St);Dg=h(qm,"Fine-tuning a pretrained model"),qm.forEach(r),Wm.forEach(r),Sg=n(Ge),or=o(Ge,"TD",{align:!0});var Pm=l(or);xg=h(Pm,"How to use the Trainer to fine-tune a pretrained model"),Pm.forEach(r),Hg=n(Ge),rr=o(Ge,"TD",{align:!0});var Lm=l(rr);xt=o(Lm,"A",{href:!0,rel:!0});var Um=l(xt);lr=o(Um,"IMG",{src:!0,alt:!0}),Um.forEach(r),Lm.forEach(r),Mg=n(Ge),sr=o(Ge,"TD",{align:!0});var Fm=l(sr);Ht=o(Fm,"A",{href:!0,rel:!0});var Xm=l(Ht);nr=o(Xm,"IMG",{src:!0,alt:!0}),Xm.forEach(r),Fm.forEach(r),Ge.forEach(r),Ig=n(v),S=o(v,"TR",{});var $e=l(S);ir=o($e,"TD",{align:!0});var Bm=l(ir);Mt=o(Bm,"A",{href:!0,rel:!0});var Ym=l(Mt);Og=h(Ym,"Summary of the tokenizers"),Ym.forEach(r),Bm.forEach(r),Gg=n($e),gr=o($e,"TD",{align:!0});var Qm=l(gr);$g=h(Qm,"The differences between the tokenizers algorithm"),Qm.forEach(r),Ng=n($e),hr=o($e,"TD",{align:!0});var Vm=l(hr);It=o(Vm,"A",{href:!0,rel:!0});var Km=l(It);cr=o(Km,"IMG",{src:!0,alt:!0}),Km.forEach(r),Vm.forEach(r),Rg=n($e),ur=o($e,"TD",{align:!0});var Jm=l(ur);Ot=o(Jm,"A",{href:!0,rel:!0});var jm=l(Ot);fr=o(jm,"IMG",{src:!0,alt:!0}),jm.forEach(r),Jm.forEach(r),$e.forEach(r),Cg=n(v),x=o(v,"TR",{});var Ne=l(x);dr=o(Ne,"TD",{align:!0});var Zm=l(dr);Gt=o(Zm,"A",{href:!0,rel:!0});var eb=l(Gt);zg=h(eb,"Multilingual models"),eb.forEach(r),Zm.forEach(r),Wg=n(Ne),mr=o(Ne,"TD",{align:!0});var tb=l(mr);qg=h(tb,"How to use the multilingual models of the library"),tb.forEach(r),Pg=n(Ne),br=o(Ne,"TD",{align:!0});var ab=l(br);$t=o(ab,"A",{href:!0,rel:!0});var ob=l($t);pr=o(ob,"IMG",{src:!0,alt:!0}),ob.forEach(r),ab.forEach(r),Lg=n(Ne),_r=o(Ne,"TD",{align:!0});var rb=l(_r);Nt=o(rb,"A",{href:!0,rel:!0});var lb=l(Nt);vr=o(lb,"IMG",{src:!0,alt:!0}),lb.forEach(r),rb.forEach(r),Ne.forEach(r),Ug=n(v),H=o(v,"TR",{});var Re=l(H);wr=o(Re,"TD",{align:!0});var sb=l(wr);Rt=o(sb,"A",{href:!0,rel:!0});var nb=l(Rt);Fg=h(nb,"Fine-tuning with custom datasets"),nb.forEach(r),sb.forEach(r),Xg=n(Re),Er=o(Re,"TD",{align:!0});var ib=l(Er);Bg=h(ib,"How to fine-tune a pretrained model on various tasks"),ib.forEach(r),Yg=n(Re),kr=o(Re,"TD",{align:!0});var gb=l(kr);Ct=o(gb,"A",{href:!0,rel:!0});var hb=l(Ct);Tr=o(hb,"IMG",{src:!0,alt:!0}),hb.forEach(r),gb.forEach(r),Qg=n(Re),yr=o(Re,"TD",{align:!0});var cb=l(yr);zt=o(cb,"A",{href:!0,rel:!0});var ub=l(zt);Ar=o(ub,"IMG",{src:!0,alt:!0}),ub.forEach(r),cb.forEach(r),Re.forEach(r),v.forEach(r),Gi.forEach(r),wi=n(i),ce=o(i,"H3",{class:!0});var $i=l(ce);_e=o($i,"A",{id:!0,class:!0,href:!0});var fb=l(_e);Vn=o(fb,"SPAN",{});var db=l(Vn);Do(Wt.$$.fragment,db),db.forEach(r),fb.forEach(r),Vg=n($i),Kn=o($i,"SPAN",{});var mb=l(Kn);Kg=h(mb,"PyTorch Examples"),mb.forEach(r),$i.forEach(r),Ei=n(i),ve=o(i,"TABLE",{});var Ni=l(ve);Jn=o(Ni,"THEAD",{});var bb=l(Jn);M=o(bb,"TR",{});var Ce=l(M);Dr=o(Ce,"TH",{align:!0});var pb=l(Dr);Jg=h(pb,"Notebook"),pb.forEach(r),jg=n(Ce),Sr=o(Ce,"TH",{align:!0});var _b=l(Sr);Zg=h(_b,"Description"),_b.forEach(r),eh=n(Ce),jn=o(Ce,"TH",{align:!0}),l(jn).forEach(r),th=n(Ce),Zn=o(Ce,"TH",{align:!0}),l(Zn).forEach(r),Ce.forEach(r),bb.forEach(r),ah=n(Ni),u=o(Ni,"TBODY",{});var f=l(u);I=o(f,"TR",{});var ze=l(I);xr=o(ze,"TD",{align:!0});var vb=l(xr);qt=o(vb,"A",{href:!0,rel:!0});var wb=l(qt);oh=h(wb,"Train your tokenizer"),wb.forEach(r),vb.forEach(r),rh=n(ze),Hr=o(ze,"TD",{align:!0});var Eb=l(Hr);lh=h(Eb,"How to train and use your very own tokenizer"),Eb.forEach(r),sh=n(ze),Mr=o(ze,"TD",{align:!0});var kb=l(Mr);Pt=o(kb,"A",{href:!0,rel:!0});var Tb=l(Pt);Ir=o(Tb,"IMG",{src:!0,alt:!0}),Tb.forEach(r),kb.forEach(r),nh=n(ze),Or=o(ze,"TD",{align:!0});var yb=l(Or);Lt=o(yb,"A",{href:!0,rel:!0});var Ab=l(Lt);Gr=o(Ab,"IMG",{src:!0,alt:!0}),Ab.forEach(r),yb.forEach(r),ze.forEach(r),ih=n(f),O=o(f,"TR",{});var We=l(O);$r=o(We,"TD",{align:!0});var Db=l($r);Ut=o(Db,"A",{href:!0,rel:!0});var Sb=l(Ut);gh=h(Sb,"Train your language model"),Sb.forEach(r),Db.forEach(r),hh=n(We),Nr=o(We,"TD",{align:!0});var xb=l(Nr);ch=h(xb,"How to easily start using transformers"),xb.forEach(r),uh=n(We),Rr=o(We,"TD",{align:!0});var Hb=l(Rr);Ft=o(Hb,"A",{href:!0,rel:!0});var Mb=l(Ft);Cr=o(Mb,"IMG",{src:!0,alt:!0}),Mb.forEach(r),Hb.forEach(r),fh=n(We),zr=o(We,"TD",{align:!0});var Ib=l(zr);Xt=o(Ib,"A",{href:!0,rel:!0});var Ob=l(Xt);Wr=o(Ob,"IMG",{src:!0,alt:!0}),Ob.forEach(r),Ib.forEach(r),We.forEach(r),dh=n(f),G=o(f,"TR",{});var qe=l(G);qr=o(qe,"TD",{align:!0});var Gb=l(qr);Bt=o(Gb,"A",{href:!0,rel:!0});var $b=l(Bt);mh=h($b,"How to fine-tune a model on text classification"),$b.forEach(r),Gb.forEach(r),bh=n(qe),Pr=o(qe,"TD",{align:!0});var Nb=l(Pr);ph=h(Nb,"Show how to preprocess the data and fine-tune a pretrained model on any GLUE task."),Nb.forEach(r),_h=n(qe),Lr=o(qe,"TD",{align:!0});var Rb=l(Lr);Yt=o(Rb,"A",{href:!0,rel:!0});var Cb=l(Yt);Ur=o(Cb,"IMG",{src:!0,alt:!0}),Cb.forEach(r),Rb.forEach(r),vh=n(qe),Fr=o(qe,"TD",{align:!0});var zb=l(Fr);Qt=o(zb,"A",{href:!0,rel:!0});var Wb=l(Qt);Xr=o(Wb,"IMG",{src:!0,alt:!0}),Wb.forEach(r),zb.forEach(r),qe.forEach(r),wh=n(f),$=o(f,"TR",{});var Pe=l($);Br=o(Pe,"TD",{align:!0});var qb=l(Br);Vt=o(qb,"A",{href:!0,rel:!0});var Pb=l(Vt);Eh=h(Pb,"How to fine-tune a model on language modeling"),Pb.forEach(r),qb.forEach(r),kh=n(Pe),Yr=o(Pe,"TD",{align:!0});var Lb=l(Yr);Th=h(Lb,"Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task."),Lb.forEach(r),yh=n(Pe),Qr=o(Pe,"TD",{align:!0});var Ub=l(Qr);Kt=o(Ub,"A",{href:!0,rel:!0});var Fb=l(Kt);Vr=o(Fb,"IMG",{src:!0,alt:!0}),Fb.forEach(r),Ub.forEach(r),Ah=n(Pe),Kr=o(Pe,"TD",{align:!0});var Xb=l(Kr);Jt=o(Xb,"A",{href:!0,rel:!0});var Bb=l(Jt);Jr=o(Bb,"IMG",{src:!0,alt:!0}),Bb.forEach(r),Xb.forEach(r),Pe.forEach(r),Dh=n(f),N=o(f,"TR",{});var Le=l(N);jr=o(Le,"TD",{align:!0});var Yb=l(jr);jt=o(Yb,"A",{href:!0,rel:!0});var Qb=l(jt);Sh=h(Qb,"How to fine-tune a model on token classification"),Qb.forEach(r),Yb.forEach(r),xh=n(Le),Zr=o(Le,"TD",{align:!0});var Vb=l(Zr);Hh=h(Vb,"Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS)."),Vb.forEach(r),Mh=n(Le),el=o(Le,"TD",{align:!0});var Kb=l(el);Zt=o(Kb,"A",{href:!0,rel:!0});var Jb=l(Zt);tl=o(Jb,"IMG",{src:!0,alt:!0}),Jb.forEach(r),Kb.forEach(r),Ih=n(Le),al=o(Le,"TD",{align:!0});var jb=l(al);ea=o(jb,"A",{href:!0,rel:!0});var Zb=l(ea);ol=o(Zb,"IMG",{src:!0,alt:!0}),Zb.forEach(r),jb.forEach(r),Le.forEach(r),Oh=n(f),R=o(f,"TR",{});var Ue=l(R);rl=o(Ue,"TD",{align:!0});var ep=l(rl);ta=o(ep,"A",{href:!0,rel:!0});var tp=l(ta);Gh=h(tp,"How to fine-tune a model on question answering"),tp.forEach(r),ep.forEach(r),$h=n(Ue),ll=o(Ue,"TD",{align:!0});var ap=l(ll);Nh=h(ap,"Show how to preprocess the data and fine-tune a pretrained model on SQUAD."),ap.forEach(r),Rh=n(Ue),sl=o(Ue,"TD",{align:!0});var op=l(sl);aa=o(op,"A",{href:!0,rel:!0});var rp=l(aa);nl=o(rp,"IMG",{src:!0,alt:!0}),rp.forEach(r),op.forEach(r),Ch=n(Ue),il=o(Ue,"TD",{align:!0});var lp=l(il);oa=o(lp,"A",{href:!0,rel:!0});var sp=l(oa);gl=o(sp,"IMG",{src:!0,alt:!0}),sp.forEach(r),lp.forEach(r),Ue.forEach(r),zh=n(f),C=o(f,"TR",{});var Fe=l(C);hl=o(Fe,"TD",{align:!0});var np=l(hl);ra=o(np,"A",{href:!0,rel:!0});var ip=l(ra);Wh=h(ip,"How to fine-tune a model on multiple choice"),ip.forEach(r),np.forEach(r),qh=n(Fe),cl=o(Fe,"TD",{align:!0});var gp=l(cl);Ph=h(gp,"Show how to preprocess the data and fine-tune a pretrained model on SWAG."),gp.forEach(r),Lh=n(Fe),ul=o(Fe,"TD",{align:!0});var hp=l(ul);la=o(hp,"A",{href:!0,rel:!0});var cp=l(la);fl=o(cp,"IMG",{src:!0,alt:!0}),cp.forEach(r),hp.forEach(r),Uh=n(Fe),dl=o(Fe,"TD",{align:!0});var up=l(dl);sa=o(up,"A",{href:!0,rel:!0});var fp=l(sa);ml=o(fp,"IMG",{src:!0,alt:!0}),fp.forEach(r),up.forEach(r),Fe.forEach(r),Fh=n(f),z=o(f,"TR",{});var Xe=l(z);bl=o(Xe,"TD",{align:!0});var dp=l(bl);na=o(dp,"A",{href:!0,rel:!0});var mp=l(na);Xh=h(mp,"How to fine-tune a model on translation"),mp.forEach(r),dp.forEach(r),Bh=n(Xe),pl=o(Xe,"TD",{align:!0});var bp=l(pl);Yh=h(bp,"Show how to preprocess the data and fine-tune a pretrained model on WMT."),bp.forEach(r),Qh=n(Xe),_l=o(Xe,"TD",{align:!0});var pp=l(_l);ia=o(pp,"A",{href:!0,rel:!0});var _p=l(ia);vl=o(_p,"IMG",{src:!0,alt:!0}),_p.forEach(r),pp.forEach(r),Vh=n(Xe),wl=o(Xe,"TD",{align:!0});var vp=l(wl);ga=o(vp,"A",{href:!0,rel:!0});var wp=l(ga);El=o(wp,"IMG",{src:!0,alt:!0}),wp.forEach(r),vp.forEach(r),Xe.forEach(r),Kh=n(f),W=o(f,"TR",{});var Be=l(W);kl=o(Be,"TD",{align:!0});var Ep=l(kl);ha=o(Ep,"A",{href:!0,rel:!0});var kp=l(ha);Jh=h(kp,"How to fine-tune a model on summarization"),kp.forEach(r),Ep.forEach(r),jh=n(Be),Tl=o(Be,"TD",{align:!0});var Tp=l(Tl);Zh=h(Tp,"Show how to preprocess the data and fine-tune a pretrained model on XSUM."),Tp.forEach(r),ec=n(Be),yl=o(Be,"TD",{align:!0});var yp=l(yl);ca=o(yp,"A",{href:!0,rel:!0});var Ap=l(ca);Al=o(Ap,"IMG",{src:!0,alt:!0}),Ap.forEach(r),yp.forEach(r),tc=n(Be),Dl=o(Be,"TD",{align:!0});var Dp=l(Dl);ua=o(Dp,"A",{href:!0,rel:!0});var Sp=l(ua);Sl=o(Sp,"IMG",{src:!0,alt:!0}),Sp.forEach(r),Dp.forEach(r),Be.forEach(r),ac=n(f),q=o(f,"TR",{});var Ye=l(q);xl=o(Ye,"TD",{align:!0});var xp=l(xl);fa=o(xp,"A",{href:!0,rel:!0});var Hp=l(fa);oc=h(Hp,"How to fine-tune a speech recognition model in English"),Hp.forEach(r),xp.forEach(r),rc=n(Ye),Hl=o(Ye,"TD",{align:!0});var Mp=l(Hl);lc=h(Mp,"Show how to preprocess the data and fine-tune a pretrained Speech model on TIMIT"),Mp.forEach(r),sc=n(Ye),Ml=o(Ye,"TD",{align:!0});var Ip=l(Ml);da=o(Ip,"A",{href:!0,rel:!0});var Op=l(da);Il=o(Op,"IMG",{src:!0,alt:!0}),Op.forEach(r),Ip.forEach(r),nc=n(Ye),Ol=o(Ye,"TD",{align:!0});var Gp=l(Ol);ma=o(Gp,"A",{href:!0,rel:!0});var $p=l(ma);Gl=o($p,"IMG",{src:!0,alt:!0}),$p.forEach(r),Gp.forEach(r),Ye.forEach(r),ic=n(f),P=o(f,"TR",{});var Qe=l(P);$l=o(Qe,"TD",{align:!0});var Np=l($l);ba=o(Np,"A",{href:!0,rel:!0});var Rp=l(ba);gc=h(Rp,"How to fine-tune a speech recognition model in any language"),Rp.forEach(r),Np.forEach(r),hc=n(Qe),Nl=o(Qe,"TD",{align:!0});var Cp=l(Nl);cc=h(Cp,"Show how to preprocess the data and fine-tune a multi-lingually pretrained speech model on Common Voice"),Cp.forEach(r),uc=n(Qe),Rl=o(Qe,"TD",{align:!0});var zp=l(Rl);pa=o(zp,"A",{href:!0,rel:!0});var Wp=l(pa);Cl=o(Wp,"IMG",{src:!0,alt:!0}),Wp.forEach(r),zp.forEach(r),fc=n(Qe),zl=o(Qe,"TD",{align:!0});var qp=l(zl);_a=o(qp,"A",{href:!0,rel:!0});var Pp=l(_a);Wl=o(Pp,"IMG",{src:!0,alt:!0}),Pp.forEach(r),qp.forEach(r),Qe.forEach(r),dc=n(f),L=o(f,"TR",{});var Ve=l(L);ql=o(Ve,"TD",{align:!0});var Lp=l(ql);va=o(Lp,"A",{href:!0,rel:!0});var Up=l(va);mc=h(Up,"How to fine-tune a model on audio classification"),Up.forEach(r),Lp.forEach(r),bc=n(Ve),Pl=o(Ve,"TD",{align:!0});var Fp=l(Pl);pc=h(Fp,"Show how to preprocess the data and fine-tune a pretrained Speech model on Keyword Spotting"),Fp.forEach(r),_c=n(Ve),Ll=o(Ve,"TD",{align:!0});var Xp=l(Ll);wa=o(Xp,"A",{href:!0,rel:!0});var Bp=l(wa);Ul=o(Bp,"IMG",{src:!0,alt:!0}),Bp.forEach(r),Xp.forEach(r),vc=n(Ve),Fl=o(Ve,"TD",{align:!0});var Yp=l(Fl);Ea=o(Yp,"A",{href:!0,rel:!0});var Qp=l(Ea);Xl=o(Qp,"IMG",{src:!0,alt:!0}),Qp.forEach(r),Yp.forEach(r),Ve.forEach(r),wc=n(f),U=o(f,"TR",{});var Ke=l(U);Bl=o(Ke,"TD",{align:!0});var Vp=l(Bl);ka=o(Vp,"A",{href:!0,rel:!0});var Kp=l(ka);Ec=h(Kp,"How to train a language model from scratch"),Kp.forEach(r),Vp.forEach(r),kc=n(Ke),Yl=o(Ke,"TD",{align:!0});var Jp=l(Yl);Tc=h(Jp,"Highlight all the steps to effectively train Transformer model on custom data"),Jp.forEach(r),yc=n(Ke),Ql=o(Ke,"TD",{align:!0});var jp=l(Ql);Ta=o(jp,"A",{href:!0,rel:!0});var Zp=l(Ta);Vl=o(Zp,"IMG",{src:!0,alt:!0}),Zp.forEach(r),jp.forEach(r),Ac=n(Ke),Kl=o(Ke,"TD",{align:!0});var e_=l(Kl);ya=o(e_,"A",{href:!0,rel:!0});var t_=l(ya);Jl=o(t_,"IMG",{src:!0,alt:!0}),t_.forEach(r),e_.forEach(r),Ke.forEach(r),Dc=n(f),F=o(f,"TR",{});var Je=l(F);jl=o(Je,"TD",{align:!0});var a_=l(jl);Aa=o(a_,"A",{href:!0,rel:!0});var o_=l(Aa);Sc=h(o_,"How to generate text"),o_.forEach(r),a_.forEach(r),xc=n(Je),Zl=o(Je,"TD",{align:!0});var r_=l(Zl);Hc=h(r_,"How to use different decoding methods for language generation with transformers"),r_.forEach(r),Mc=n(Je),es=o(Je,"TD",{align:!0});var l_=l(es);Da=o(l_,"A",{href:!0,rel:!0});var s_=l(Da);ts=o(s_,"IMG",{src:!0,alt:!0}),s_.forEach(r),l_.forEach(r),Ic=n(Je),as=o(Je,"TD",{align:!0});var n_=l(as);Sa=o(n_,"A",{href:!0,rel:!0});var i_=l(Sa);os=o(i_,"IMG",{src:!0,alt:!0}),i_.forEach(r),n_.forEach(r),Je.forEach(r),Oc=n(f),X=o(f,"TR",{});var je=l(X);rs=o(je,"TD",{align:!0});var g_=l(rs);xa=o(g_,"A",{href:!0,rel:!0});var h_=l(xa);Gc=h(h_,"How to export model to ONNX"),h_.forEach(r),g_.forEach(r),$c=n(je),ls=o(je,"TD",{align:!0});var c_=l(ls);Nc=h(c_,"Highlight how to export and run inference workloads through ONNX"),c_.forEach(r),Rc=n(je),ei=o(je,"TD",{align:!0}),l(ei).forEach(r),Cc=n(je),ti=o(je,"TD",{align:!0}),l(ti).forEach(r),je.forEach(r),zc=n(f),B=o(f,"TR",{});var Ze=l(B);ss=o(Ze,"TD",{align:!0});var u_=l(ss);Ha=o(u_,"A",{href:!0,rel:!0});var f_=l(Ha);Wc=h(f_,"How to use Benchmarks"),f_.forEach(r),u_.forEach(r),qc=n(Ze),ns=o(Ze,"TD",{align:!0});var d_=l(ns);Pc=h(d_,"How to benchmark models with transformers"),d_.forEach(r),Lc=n(Ze),is=o(Ze,"TD",{align:!0});var m_=l(is);Ma=o(m_,"A",{href:!0,rel:!0});var b_=l(Ma);gs=o(b_,"IMG",{src:!0,alt:!0}),b_.forEach(r),m_.forEach(r),Uc=n(Ze),hs=o(Ze,"TD",{align:!0});var p_=l(hs);Ia=o(p_,"A",{href:!0,rel:!0});var __=l(Ia);cs=o(__,"IMG",{src:!0,alt:!0}),__.forEach(r),p_.forEach(r),Ze.forEach(r),Fc=n(f),Y=o(f,"TR",{});var et=l(Y);us=o(et,"TD",{align:!0});var v_=l(us);Oa=o(v_,"A",{href:!0,rel:!0});var w_=l(Oa);Xc=h(w_,"Reformer"),w_.forEach(r),v_.forEach(r),Bc=n(et),fs=o(et,"TD",{align:!0});var E_=l(fs);Yc=h(E_,"How Reformer pushes the limits of language modeling"),E_.forEach(r),Qc=n(et),ds=o(et,"TD",{align:!0});var k_=l(ds);Ga=o(k_,"A",{href:!0,rel:!0});var T_=l(Ga);ms=o(T_,"IMG",{src:!0,alt:!0}),T_.forEach(r),k_.forEach(r),Vc=n(et),bs=o(et,"TD",{align:!0});var y_=l(bs);$a=o(y_,"A",{href:!0,rel:!0});var A_=l($a);ps=o(A_,"IMG",{src:!0,alt:!0}),A_.forEach(r),y_.forEach(r),et.forEach(r),Kc=n(f),Q=o(f,"TR",{});var tt=l(Q);_s=o(tt,"TD",{align:!0});var D_=l(_s);Na=o(D_,"A",{href:!0,rel:!0});var S_=l(Na);Jc=h(S_,"How to fine-tune a model on image classification"),S_.forEach(r),D_.forEach(r),jc=n(tt),vs=o(tt,"TD",{align:!0});var x_=l(vs);Zc=h(x_,"Show how to preprocess the data and fine-tune any pretrained Vision model on Image Classification"),x_.forEach(r),eu=n(tt),ws=o(tt,"TD",{align:!0});var H_=l(ws);Ra=o(H_,"A",{href:!0,rel:!0});var M_=l(Ra);Es=o(M_,"IMG",{src:!0,alt:!0}),M_.forEach(r),H_.forEach(r),tu=n(tt),ks=o(tt,"TD",{align:!0});var I_=l(ks);Ca=o(I_,"A",{href:!0,rel:!0});var O_=l(Ca);Ts=o(O_,"IMG",{src:!0,alt:!0}),O_.forEach(r),I_.forEach(r),tt.forEach(r),f.forEach(r),Ni.forEach(r),ki=n(i),ue=o(i,"H3",{class:!0});var Ri=l(ue);we=o(Ri,"A",{id:!0,class:!0,href:!0});var G_=l(we);ai=o(G_,"SPAN",{});var $_=l(ai);Do(za.$$.fragment,$_),$_.forEach(r),G_.forEach(r),au=n(Ri),oi=o(Ri,"SPAN",{});var N_=l(oi);ou=h(N_,"TensorFlow Examples"),N_.forEach(r),Ri.forEach(r),Ti=n(i),Ee=o(i,"TABLE",{});var Ci=l(Ee);ri=o(Ci,"THEAD",{});var R_=l(ri);V=o(R_,"TR",{});var at=l(V);ys=o(at,"TH",{align:!0});var C_=l(ys);ru=h(C_,"Notebook"),C_.forEach(r),lu=n(at),As=o(at,"TH",{align:!0});var z_=l(As);su=h(z_,"Description"),z_.forEach(r),nu=n(at),li=o(at,"TH",{align:!0}),l(li).forEach(r),iu=n(at),si=o(at,"TH",{align:!0}),l(si).forEach(r),at.forEach(r),R_.forEach(r),gu=n(Ci),b=o(Ci,"TBODY",{});var p=l(b);K=o(p,"TR",{});var ot=l(K);Ds=o(ot,"TD",{align:!0});var W_=l(Ds);Wa=o(W_,"A",{href:!0,rel:!0});var q_=l(Wa);hu=h(q_,"Train your tokenizer"),q_.forEach(r),W_.forEach(r),cu=n(ot),Ss=o(ot,"TD",{align:!0});var P_=l(Ss);uu=h(P_,"How to train and use your very own tokenizer"),P_.forEach(r),fu=n(ot),xs=o(ot,"TD",{align:!0});var L_=l(xs);qa=o(L_,"A",{href:!0,rel:!0});var U_=l(qa);Hs=o(U_,"IMG",{src:!0,alt:!0}),U_.forEach(r),L_.forEach(r),du=n(ot),Ms=o(ot,"TD",{align:!0});var F_=l(Ms);Pa=o(F_,"A",{href:!0,rel:!0});var X_=l(Pa);Is=o(X_,"IMG",{src:!0,alt:!0}),X_.forEach(r),F_.forEach(r),ot.forEach(r),mu=n(p),J=o(p,"TR",{});var rt=l(J);Os=o(rt,"TD",{align:!0});var B_=l(Os);La=o(B_,"A",{href:!0,rel:!0});var Y_=l(La);bu=h(Y_,"Train your language model"),Y_.forEach(r),B_.forEach(r),pu=n(rt),Gs=o(rt,"TD",{align:!0});var Q_=l(Gs);_u=h(Q_,"How to easily start using transformers"),Q_.forEach(r),vu=n(rt),$s=o(rt,"TD",{align:!0});var V_=l($s);Ua=o(V_,"A",{href:!0,rel:!0});var K_=l(Ua);Ns=o(K_,"IMG",{src:!0,alt:!0}),K_.forEach(r),V_.forEach(r),wu=n(rt),Rs=o(rt,"TD",{align:!0});var J_=l(Rs);Fa=o(J_,"A",{href:!0,rel:!0});var j_=l(Fa);Cs=o(j_,"IMG",{src:!0,alt:!0}),j_.forEach(r),J_.forEach(r),rt.forEach(r),Eu=n(p),j=o(p,"TR",{});var lt=l(j);zs=o(lt,"TD",{align:!0});var Z_=l(zs);Xa=o(Z_,"A",{href:!0,rel:!0});var ev=l(Xa);ku=h(ev,"How to fine-tune a model on text classification"),ev.forEach(r),Z_.forEach(r),Tu=n(lt),Ws=o(lt,"TD",{align:!0});var tv=l(Ws);yu=h(tv,"Show how to preprocess the data and fine-tune a pretrained model on any GLUE task."),tv.forEach(r),Au=n(lt),qs=o(lt,"TD",{align:!0});var av=l(qs);Ba=o(av,"A",{href:!0,rel:!0});var ov=l(Ba);Ps=o(ov,"IMG",{src:!0,alt:!0}),ov.forEach(r),av.forEach(r),Du=n(lt),Ls=o(lt,"TD",{align:!0});var rv=l(Ls);Ya=o(rv,"A",{href:!0,rel:!0});var lv=l(Ya);Us=o(lv,"IMG",{src:!0,alt:!0}),lv.forEach(r),rv.forEach(r),lt.forEach(r),Su=n(p),Z=o(p,"TR",{});var st=l(Z);Fs=o(st,"TD",{align:!0});var sv=l(Fs);Qa=o(sv,"A",{href:!0,rel:!0});var nv=l(Qa);xu=h(nv,"How to fine-tune a model on language modeling"),nv.forEach(r),sv.forEach(r),Hu=n(st),Xs=o(st,"TD",{align:!0});var iv=l(Xs);Mu=h(iv,"Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task."),iv.forEach(r),Iu=n(st),Bs=o(st,"TD",{align:!0});var gv=l(Bs);Va=o(gv,"A",{href:!0,rel:!0});var hv=l(Va);Ys=o(hv,"IMG",{src:!0,alt:!0}),hv.forEach(r),gv.forEach(r),Ou=n(st),Qs=o(st,"TD",{align:!0});var cv=l(Qs);Ka=o(cv,"A",{href:!0,rel:!0});var uv=l(Ka);Vs=o(uv,"IMG",{src:!0,alt:!0}),uv.forEach(r),cv.forEach(r),st.forEach(r),Gu=n(p),ee=o(p,"TR",{});var nt=l(ee);Ks=o(nt,"TD",{align:!0});var fv=l(Ks);Ja=o(fv,"A",{href:!0,rel:!0});var dv=l(Ja);$u=h(dv,"How to fine-tune a model on token classification"),dv.forEach(r),fv.forEach(r),Nu=n(nt),Js=o(nt,"TD",{align:!0});var mv=l(Js);Ru=h(mv,"Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS)."),mv.forEach(r),Cu=n(nt),js=o(nt,"TD",{align:!0});var bv=l(js);ja=o(bv,"A",{href:!0,rel:!0});var pv=l(ja);Zs=o(pv,"IMG",{src:!0,alt:!0}),pv.forEach(r),bv.forEach(r),zu=n(nt),en=o(nt,"TD",{align:!0});var _v=l(en);Za=o(_v,"A",{href:!0,rel:!0});var vv=l(Za);tn=o(vv,"IMG",{src:!0,alt:!0}),vv.forEach(r),_v.forEach(r),nt.forEach(r),Wu=n(p),te=o(p,"TR",{});var it=l(te);an=o(it,"TD",{align:!0});var wv=l(an);eo=o(wv,"A",{href:!0,rel:!0});var Ev=l(eo);qu=h(Ev,"How to fine-tune a model on question answering"),Ev.forEach(r),wv.forEach(r),Pu=n(it),on=o(it,"TD",{align:!0});var kv=l(on);Lu=h(kv,"Show how to preprocess the data and fine-tune a pretrained model on SQUAD."),kv.forEach(r),Uu=n(it),rn=o(it,"TD",{align:!0});var Tv=l(rn);to=o(Tv,"A",{href:!0,rel:!0});var yv=l(to);ln=o(yv,"IMG",{src:!0,alt:!0}),yv.forEach(r),Tv.forEach(r),Fu=n(it),sn=o(it,"TD",{align:!0});var Av=l(sn);ao=o(Av,"A",{href:!0,rel:!0});var Dv=l(ao);nn=o(Dv,"IMG",{src:!0,alt:!0}),Dv.forEach(r),Av.forEach(r),it.forEach(r),Xu=n(p),ae=o(p,"TR",{});var gt=l(ae);gn=o(gt,"TD",{align:!0});var Sv=l(gn);oo=o(Sv,"A",{href:!0,rel:!0});var xv=l(oo);Bu=h(xv,"How to fine-tune a model on multiple choice"),xv.forEach(r),Sv.forEach(r),Yu=n(gt),hn=o(gt,"TD",{align:!0});var Hv=l(hn);Qu=h(Hv,"Show how to preprocess the data and fine-tune a pretrained model on SWAG."),Hv.forEach(r),Vu=n(gt),cn=o(gt,"TD",{align:!0});var Mv=l(cn);ro=o(Mv,"A",{href:!0,rel:!0});var Iv=l(ro);un=o(Iv,"IMG",{src:!0,alt:!0}),Iv.forEach(r),Mv.forEach(r),Ku=n(gt),fn=o(gt,"TD",{align:!0});var Ov=l(fn);lo=o(Ov,"A",{href:!0,rel:!0});var Gv=l(lo);dn=o(Gv,"IMG",{src:!0,alt:!0}),Gv.forEach(r),Ov.forEach(r),gt.forEach(r),Ju=n(p),oe=o(p,"TR",{});var ht=l(oe);mn=o(ht,"TD",{align:!0});var $v=l(mn);so=o($v,"A",{href:!0,rel:!0});var Nv=l(so);ju=h(Nv,"How to fine-tune a model on translation"),Nv.forEach(r),$v.forEach(r),Zu=n(ht),bn=o(ht,"TD",{align:!0});var Rv=l(bn);ef=h(Rv,"Show how to preprocess the data and fine-tune a pretrained model on WMT."),Rv.forEach(r),tf=n(ht),pn=o(ht,"TD",{align:!0});var Cv=l(pn);no=o(Cv,"A",{href:!0,rel:!0});var zv=l(no);_n=o(zv,"IMG",{src:!0,alt:!0}),zv.forEach(r),Cv.forEach(r),af=n(ht),vn=o(ht,"TD",{align:!0});var Wv=l(vn);io=o(Wv,"A",{href:!0,rel:!0});var qv=l(io);wn=o(qv,"IMG",{src:!0,alt:!0}),qv.forEach(r),Wv.forEach(r),ht.forEach(r),of=n(p),re=o(p,"TR",{});var ct=l(re);En=o(ct,"TD",{align:!0});var Pv=l(En);go=o(Pv,"A",{href:!0,rel:!0});var Lv=l(go);rf=h(Lv,"How to fine-tune a model on summarization"),Lv.forEach(r),Pv.forEach(r),lf=n(ct),kn=o(ct,"TD",{align:!0});var Uv=l(kn);sf=h(Uv,"Show how to preprocess the data and fine-tune a pretrained model on XSUM."),Uv.forEach(r),nf=n(ct),Tn=o(ct,"TD",{align:!0});var Fv=l(Tn);ho=o(Fv,"A",{href:!0,rel:!0});var Xv=l(ho);yn=o(Xv,"IMG",{src:!0,alt:!0}),Xv.forEach(r),Fv.forEach(r),gf=n(ct),An=o(ct,"TD",{align:!0});var Bv=l(An);co=o(Bv,"A",{href:!0,rel:!0});var Yv=l(co);Dn=o(Yv,"IMG",{src:!0,alt:!0}),Yv.forEach(r),Bv.forEach(r),ct.forEach(r),p.forEach(r),Ci.forEach(r),yi=n(i),fe=o(i,"H3",{class:!0});var zi=l(fe);ke=o(zi,"A",{id:!0,class:!0,href:!0});var Qv=l(ke);ni=o(Qv,"SPAN",{});var Vv=l(ni);Do(uo.$$.fragment,Vv),Vv.forEach(r),Qv.forEach(r),hf=n(zi),ii=o(zi,"SPAN",{});var Kv=l(ii);cf=h(Kv,"Optimum notebooks"),Kv.forEach(r),zi.forEach(r),Ai=n(i),Te=o(i,"P",{});var Wi=l(Te);uf=h(Wi,"\u{1F917} "),fo=o(Wi,"A",{href:!0,rel:!0});var Jv=l(fo);ff=h(Jv,"Optimum"),Jv.forEach(r),df=h(Wi," is an extension of \u{1F917} Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on targeted hardwares."),Wi.forEach(r),Di=n(i),ye=o(i,"TABLE",{});var qi=l(ye);gi=o(qi,"THEAD",{});var jv=l(gi);le=o(jv,"TR",{});var ut=l(le);Sn=o(ut,"TH",{align:!0});var Zv=l(Sn);mf=h(Zv,"Notebook"),Zv.forEach(r),bf=n(ut),xn=o(ut,"TH",{align:!0});var e1=l(xn);pf=h(e1,"Description"),e1.forEach(r),_f=n(ut),hi=o(ut,"TH",{align:!0}),l(hi).forEach(r),vf=n(ut),ci=o(ut,"TH",{align:!0}),l(ci).forEach(r),ut.forEach(r),jv.forEach(r),wf=n(qi),mo=o(qi,"TBODY",{});var Pi=l(mo);se=o(Pi,"TR",{});var ft=l(se);Hn=o(ft,"TD",{align:!0});var t1=l(Hn);bo=o(t1,"A",{href:!0,rel:!0});var a1=l(bo);Ef=h(a1,"How to quantize a model with ONNX Runtime for text classification"),a1.forEach(r),t1.forEach(r),kf=n(ft),Ae=o(ft,"TD",{align:!0});var Li=l(Ae);Tf=h(Li,"Show how to apply static and dynamic quantization on a model using "),po=o(Li,"A",{href:!0,rel:!0});var o1=l(po);yf=h(o1,"ONNX Runtime"),o1.forEach(r),Af=h(Li," for any GLUE task."),Li.forEach(r),Df=n(ft),Mn=o(ft,"TD",{align:!0});var r1=l(Mn);_o=o(r1,"A",{href:!0,rel:!0});var l1=l(_o);In=o(l1,"IMG",{src:!0,alt:!0}),l1.forEach(r),r1.forEach(r),Sf=n(ft),On=o(ft,"TD",{align:!0});var s1=l(On);vo=o(s1,"A",{href:!0,rel:!0});var n1=l(vo);Gn=o(n1,"IMG",{src:!0,alt:!0}),n1.forEach(r),s1.forEach(r),ft.forEach(r),xf=n(Pi),ne=o(Pi,"TR",{});var dt=l(ne);$n=o(dt,"TD",{align:!0});var i1=l($n);wo=o(i1,"A",{href:!0,rel:!0});var g1=l(wo);Hf=h(g1,"How to quantize a model with Intel Neural Compressor for text classification"),g1.forEach(r),i1.forEach(r),Mf=n(dt),De=o(dt,"TD",{align:!0});var Ui=l(De);If=h(Ui,"Show how to apply static, dynamic and aware training quantization on a model using "),Eo=o(Ui,"A",{href:!0,rel:!0});var h1=l(Eo);Of=h(h1,"Intel Neural Compressor (INC)"),h1.forEach(r),Gf=h(Ui," for any GLUE task."),Ui.forEach(r),$f=n(dt),Nn=o(dt,"TD",{align:!0});var c1=l(Nn);ko=o(c1,"A",{href:!0,rel:!0});var u1=l(ko);Rn=o(u1,"IMG",{src:!0,alt:!0}),u1.forEach(r),c1.forEach(r),Nf=n(dt),Cn=o(dt,"TD",{align:!0});var f1=l(Cn);To=o(f1,"A",{href:!0,rel:!0});var d1=l(To);zn=o(d1,"IMG",{src:!0,alt:!0}),d1.forEach(r),f1.forEach(r),dt.forEach(r),Pi.forEach(r),qi.forEach(r),Si=n(i),de=o(i,"H2",{class:!0});var Fi=l(de);Se=o(Fi,"A",{id:!0,class:!0,href:!0});var m1=l(Se);ui=o(m1,"SPAN",{});var b1=l(ui);Do(yo.$$.fragment,b1),b1.forEach(r),m1.forEach(r),Rf=n(Fi),fi=o(Fi,"SPAN",{});var p1=l(fi);Cf=h(p1,"Community notebooks:"),p1.forEach(r),Fi.forEach(r),xi=n(i),xe=o(i,"P",{});var Xi=l(xe);zf=h(Xi,"More notebooks developed by the community are available "),Wn=o(Xi,"A",{href:!0});var _1=l(Wn);Wf=h(_1,"here"),_1.forEach(r),qf=h(Xi,"."),Xi.forEach(r),this.h()},h(){t(ie,"name","hf:doc:metadata"),t(ie,"content",JSON.stringify(A1)),t(E,"id","transformers-notebooks"),t(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),t(E,"href","#transformers-notebooks"),t(w,"class","relative group"),t(me,"id","hugging-faces-notebooks"),t(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),t(me,"href","#hugging-faces-notebooks"),t(ge,"class","relative group"),t(be,"id","documentation-notebooks"),t(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),t(be,"href","#documentation-notebooks"),t(he,"class","relative group"),t(Ro,"align","left"),t(Co,"align","left"),t(Yn,"align","left"),t(Qn,"align","right"),t(_t,"href","https://github.com/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb"),t(_t,"rel","nofollow"),t(zo,"align","left"),t(Wo,"align","left"),c(Po.src,Pf="https://colab.research.google.com/assets/colab-badge.svg")||t(Po,"src",Pf),t(Po,"alt","Open in Colab"),t(vt,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb"),t(vt,"rel","nofollow"),t(qo,"align","left"),c(Uo.src,Lf="https://studiolab.sagemaker.aws/studiolab.svg")||t(Uo,"src",Lf),t(Uo,"alt","Open in AWS Studio"),t(wt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb"),t(wt,"rel","nofollow"),t(Lo,"align","right"),t(Et,"href","https://github.com/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb"),t(Et,"rel","nofollow"),t(Fo,"align","left"),t(Xo,"align","left"),c(Yo.src,Uf="https://colab.research.google.com/assets/colab-badge.svg")||t(Yo,"src",Uf),t(Yo,"alt","Open in Colab"),t(kt,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb"),t(kt,"rel","nofollow"),t(Bo,"align","left"),c(Vo.src,Ff="https://studiolab.sagemaker.aws/studiolab.svg")||t(Vo,"src",Ff),t(Vo,"alt","Open in AWS Studio"),t(Tt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb"),t(Tt,"rel","nofollow"),t(Qo,"align","right"),t(yt,"href","https://github.com/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb"),t(yt,"rel","nofollow"),t(Ko,"align","left"),t(Jo,"align","left"),c(Zo.src,Xf="https://colab.research.google.com/assets/colab-badge.svg")||t(Zo,"src",Xf),t(Zo,"alt","Open in Colab"),t(At,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb"),t(At,"rel","nofollow"),t(jo,"align","left"),c(tr.src,Bf="https://studiolab.sagemaker.aws/studiolab.svg")||t(tr,"src",Bf),t(tr,"alt","Open in AWS Studio"),t(Dt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb"),t(Dt,"rel","nofollow"),t(er,"align","right"),t(St,"href","https://github.com/huggingface/notebooks/blob/master/transformers_doc/training.ipynb"),t(St,"rel","nofollow"),t(ar,"align","left"),t(or,"align","left"),c(lr.src,Yf="https://colab.research.google.com/assets/colab-badge.svg")||t(lr,"src",Yf),t(lr,"alt","Open in Colab"),t(xt,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/training.ipynb"),t(xt,"rel","nofollow"),t(rr,"align","left"),c(nr.src,Qf="https://studiolab.sagemaker.aws/studiolab.svg")||t(nr,"src",Qf),t(nr,"alt","Open in AWS Studio"),t(Ht,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/training.ipynb"),t(Ht,"rel","nofollow"),t(sr,"align","right"),t(Mt,"href","https://github.com/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb"),t(Mt,"rel","nofollow"),t(ir,"align","left"),t(gr,"align","left"),c(cr.src,Vf="https://colab.research.google.com/assets/colab-badge.svg")||t(cr,"src",Vf),t(cr,"alt","Open in Colab"),t(It,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb"),t(It,"rel","nofollow"),t(hr,"align","left"),c(fr.src,Kf="https://studiolab.sagemaker.aws/studiolab.svg")||t(fr,"src",Kf),t(fr,"alt","Open in AWS Studio"),t(Ot,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb"),t(Ot,"rel","nofollow"),t(ur,"align","right"),t(Gt,"href","https://github.com/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb"),t(Gt,"rel","nofollow"),t(dr,"align","left"),t(mr,"align","left"),c(pr.src,Jf="https://colab.research.google.com/assets/colab-badge.svg")||t(pr,"src",Jf),t(pr,"alt","Open in Colab"),t($t,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb"),t($t,"rel","nofollow"),t(br,"align","left"),c(vr.src,jf="https://studiolab.sagemaker.aws/studiolab.svg")||t(vr,"src",jf),t(vr,"alt","Open in AWS Studio"),t(Nt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb"),t(Nt,"rel","nofollow"),t(_r,"align","right"),t(Rt,"href","https://github.com/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb"),t(Rt,"rel","nofollow"),t(wr,"align","left"),t(Er,"align","left"),c(Tr.src,Zf="https://colab.research.google.com/assets/colab-badge.svg")||t(Tr,"src",Zf),t(Tr,"alt","Open in Colab"),t(Ct,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb"),t(Ct,"rel","nofollow"),t(kr,"align","left"),c(Ar.src,ed="https://studiolab.sagemaker.aws/studiolab.svg")||t(Ar,"src",ed),t(Ar,"alt","Open in AWS Studio"),t(zt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb"),t(zt,"rel","nofollow"),t(yr,"align","right"),t(_e,"id","pytorch-examples"),t(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),t(_e,"href","#pytorch-examples"),t(ce,"class","relative group"),t(Dr,"align","left"),t(Sr,"align","left"),t(jn,"align","left"),t(Zn,"align","right"),t(qt,"href","https://github.com/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb"),t(qt,"rel","nofollow"),t(xr,"align","left"),t(Hr,"align","left"),c(Ir.src,td="https://colab.research.google.com/assets/colab-badge.svg")||t(Ir,"src",td),t(Ir,"alt","Open in Colab"),t(Pt,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb"),t(Pt,"rel","nofollow"),t(Mr,"align","left"),c(Gr.src,ad="https://studiolab.sagemaker.aws/studiolab.svg")||t(Gr,"src",ad),t(Gr,"alt","Open in AWS Studio"),t(Lt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb"),t(Lt,"rel","nofollow"),t(Or,"align","right"),t(Ut,"href","https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb"),t(Ut,"rel","nofollow"),t($r,"align","left"),t(Nr,"align","left"),c(Cr.src,od="https://colab.research.google.com/assets/colab-badge.svg")||t(Cr,"src",od),t(Cr,"alt","Open in Colab"),t(Ft,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb"),t(Ft,"rel","nofollow"),t(Rr,"align","left"),c(Wr.src,rd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Wr,"src",rd),t(Wr,"alt","Open in AWS Studio"),t(Xt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb"),t(Xt,"rel","nofollow"),t(zr,"align","right"),t(Bt,"href","https://github.com/huggingface/notebooks/blob/master/examples/text_classification.ipynb"),t(Bt,"rel","nofollow"),t(qr,"align","left"),t(Pr,"align","left"),c(Ur.src,ld="https://colab.research.google.com/assets/colab-badge.svg")||t(Ur,"src",ld),t(Ur,"alt","Open in Colab"),t(Yt,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb"),t(Yt,"rel","nofollow"),t(Lr,"align","left"),c(Xr.src,sd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Xr,"src",sd),t(Xr,"alt","Open in AWS Studio"),t(Qt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb"),t(Qt,"rel","nofollow"),t(Fr,"align","right"),t(Vt,"href","https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb"),t(Vt,"rel","nofollow"),t(Br,"align","left"),t(Yr,"align","left"),c(Vr.src,nd="https://colab.research.google.com/assets/colab-badge.svg")||t(Vr,"src",nd),t(Vr,"alt","Open in Colab"),t(Kt,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling.ipynb"),t(Kt,"rel","nofollow"),t(Qr,"align","left"),c(Jr.src,id="https://studiolab.sagemaker.aws/studiolab.svg")||t(Jr,"src",id),t(Jr,"alt","Open in AWS Studio"),t(Jt,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling.ipynb"),t(Jt,"rel","nofollow"),t(Kr,"align","right"),t(jt,"href","https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb"),t(jt,"rel","nofollow"),t(jr,"align","left"),t(Zr,"align","left"),c(tl.src,gd="https://colab.research.google.com/assets/colab-badge.svg")||t(tl,"src",gd),t(tl,"alt","Open in Colab"),t(Zt,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb"),t(Zt,"rel","nofollow"),t(el,"align","left"),c(ol.src,hd="https://studiolab.sagemaker.aws/studiolab.svg")||t(ol,"src",hd),t(ol,"alt","Open in AWS Studio"),t(ea,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb"),t(ea,"rel","nofollow"),t(al,"align","right"),t(ta,"href","https://github.com/huggingface/notebooks/blob/master/examples/question_answering.ipynb"),t(ta,"rel","nofollow"),t(rl,"align","left"),t(ll,"align","left"),c(nl.src,cd="https://colab.research.google.com/assets/colab-badge.svg")||t(nl,"src",cd),t(nl,"alt","Open in Colab"),t(aa,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb"),t(aa,"rel","nofollow"),t(sl,"align","left"),c(gl.src,ud="https://studiolab.sagemaker.aws/studiolab.svg")||t(gl,"src",ud),t(gl,"alt","Open in AWS Studio"),t(oa,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb"),t(oa,"rel","nofollow"),t(il,"align","right"),t(ra,"href","https://github.com/huggingface/notebooks/blob/master/examples/multiple_choice.ipynb"),t(ra,"rel","nofollow"),t(hl,"align","left"),t(cl,"align","left"),c(fl.src,fd="https://colab.research.google.com/assets/colab-badge.svg")||t(fl,"src",fd),t(fl,"alt","Open in Colab"),t(la,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/multiple_choice.ipynb"),t(la,"rel","nofollow"),t(ul,"align","left"),c(ml.src,dd="https://studiolab.sagemaker.aws/studiolab.svg")||t(ml,"src",dd),t(ml,"alt","Open in AWS Studio"),t(sa,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/multiple_choice.ipynb"),t(sa,"rel","nofollow"),t(dl,"align","right"),t(na,"href","https://github.com/huggingface/notebooks/blob/master/examples/translation.ipynb"),t(na,"rel","nofollow"),t(bl,"align","left"),t(pl,"align","left"),c(vl.src,md="https://colab.research.google.com/assets/colab-badge.svg")||t(vl,"src",md),t(vl,"alt","Open in Colab"),t(ia,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation.ipynb"),t(ia,"rel","nofollow"),t(_l,"align","left"),c(El.src,bd="https://studiolab.sagemaker.aws/studiolab.svg")||t(El,"src",bd),t(El,"alt","Open in AWS Studio"),t(ga,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/translation.ipynb"),t(ga,"rel","nofollow"),t(wl,"align","right"),t(ha,"href","https://github.com/huggingface/notebooks/blob/master/examples/summarization.ipynb"),t(ha,"rel","nofollow"),t(kl,"align","left"),t(Tl,"align","left"),c(Al.src,pd="https://colab.research.google.com/assets/colab-badge.svg")||t(Al,"src",pd),t(Al,"alt","Open in Colab"),t(ca,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization.ipynb"),t(ca,"rel","nofollow"),t(yl,"align","left"),c(Sl.src,_d="https://studiolab.sagemaker.aws/studiolab.svg")||t(Sl,"src",_d),t(Sl,"alt","Open in AWS Studio"),t(ua,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/summarization.ipynb"),t(ua,"rel","nofollow"),t(Dl,"align","right"),t(fa,"href","https://github.com/huggingface/notebooks/blob/master/examples/speech_recognition.ipynb"),t(fa,"rel","nofollow"),t(xl,"align","left"),t(Hl,"align","left"),c(Il.src,vd="https://colab.research.google.com/assets/colab-badge.svg")||t(Il,"src",vd),t(Il,"alt","Open in Colab"),t(da,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/speech_recognition.ipynb"),t(da,"rel","nofollow"),t(Ml,"align","left"),c(Gl.src,wd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Gl,"src",wd),t(Gl,"alt","Open in AWS Studio"),t(ma,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/speech_recognition.ipynb"),t(ma,"rel","nofollow"),t(Ol,"align","right"),t(ba,"href","https://github.com/huggingface/notebooks/blob/master/examples/multi_lingual_speech_recognition.ipynb"),t(ba,"rel","nofollow"),t($l,"align","left"),t(Nl,"align","left"),c(Cl.src,Ed="https://colab.research.google.com/assets/colab-badge.svg")||t(Cl,"src",Ed),t(Cl,"alt","Open in Colab"),t(pa,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/multi_lingual_speech_recognition.ipynb"),t(pa,"rel","nofollow"),t(Rl,"align","left"),c(Wl.src,kd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Wl,"src",kd),t(Wl,"alt","Open in AWS Studio"),t(_a,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/multi_lingual_speech_recognition.ipynb"),t(_a,"rel","nofollow"),t(zl,"align","right"),t(va,"href","https://github.com/huggingface/notebooks/blob/master/examples/audio_classification.ipynb"),t(va,"rel","nofollow"),t(ql,"align","left"),t(Pl,"align","left"),c(Ul.src,Td="https://colab.research.google.com/assets/colab-badge.svg")||t(Ul,"src",Td),t(Ul,"alt","Open in Colab"),t(wa,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb"),t(wa,"rel","nofollow"),t(Ll,"align","left"),c(Xl.src,yd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Xl,"src",yd),t(Xl,"alt","Open in AWS Studio"),t(Ea,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb"),t(Ea,"rel","nofollow"),t(Fl,"align","right"),t(ka,"href","https://github.com/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb"),t(ka,"rel","nofollow"),t(Bl,"align","left"),t(Yl,"align","left"),c(Vl.src,Ad="https://colab.research.google.com/assets/colab-badge.svg")||t(Vl,"src",Ad),t(Vl,"alt","Open in Colab"),t(Ta,"href","https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb"),t(Ta,"rel","nofollow"),t(Ql,"align","left"),c(Jl.src,Dd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Jl,"src",Dd),t(Jl,"alt","Open in AWS Studio"),t(ya,"href","https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb"),t(ya,"rel","nofollow"),t(Kl,"align","right"),t(Aa,"href","https://github.com/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb"),t(Aa,"rel","nofollow"),t(jl,"align","left"),t(Zl,"align","left"),c(ts.src,Sd="https://colab.research.google.com/assets/colab-badge.svg")||t(ts,"src",Sd),t(ts,"alt","Open in Colab"),t(Da,"href","https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb"),t(Da,"rel","nofollow"),t(es,"align","left"),c(os.src,xd="https://studiolab.sagemaker.aws/studiolab.svg")||t(os,"src",xd),t(os,"alt","Open in AWS Studio"),t(Sa,"href","https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb"),t(Sa,"rel","nofollow"),t(as,"align","right"),t(xa,"href","https://github.com/huggingface/notebooks/blob/master/examples/onnx-export.ipynb"),t(xa,"rel","nofollow"),t(rs,"align","left"),t(ls,"align","left"),t(ei,"align","left"),t(ti,"align","right"),t(Ha,"href","https://github.com/huggingface/notebooks/blob/master/examples/benchmark.ipynb"),t(Ha,"rel","nofollow"),t(ss,"align","left"),t(ns,"align","left"),c(gs.src,Hd="https://colab.research.google.com/assets/colab-badge.svg")||t(gs,"src",Hd),t(gs,"alt","Open in Colab"),t(Ma,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/benchmark.ipynb"),t(Ma,"rel","nofollow"),t(is,"align","left"),c(cs.src,Md="https://studiolab.sagemaker.aws/studiolab.svg")||t(cs,"src",Md),t(cs,"alt","Open in AWS Studio"),t(Ia,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/benchmark.ipynb"),t(Ia,"rel","nofollow"),t(hs,"align","right"),t(Oa,"href","https://github.com/huggingface/blog/blob/master/notebooks/03_reformer.ipynb"),t(Oa,"rel","nofollow"),t(us,"align","left"),t(fs,"align","left"),c(ms.src,Id="https://colab.research.google.com/assets/colab-badge.svg")||t(ms,"src",Id),t(ms,"alt","Open in Colab"),t(Ga,"href","https://colab.research.google.com/github/patrickvonplaten/blog/blob/master/notebooks/03_reformer.ipynb"),t(Ga,"rel","nofollow"),t(ds,"align","left"),c(ps.src,Od="https://studiolab.sagemaker.aws/studiolab.svg")||t(ps,"src",Od),t(ps,"alt","Open in AWS Studio"),t($a,"href","https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/master/notebooks/03_reformer.ipynb"),t($a,"rel","nofollow"),t(bs,"align","right"),t(Na,"href","https://github.com/huggingface/notebooks/blob/master/examples/image_classification.ipynb"),t(Na,"rel","nofollow"),t(_s,"align","left"),t(vs,"align","left"),c(Es.src,Gd="https://colab.research.google.com/assets/colab-badge.svg")||t(Es,"src",Gd),t(Es,"alt","Open in Colab"),t(Ra,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/image_classification.ipynb"),t(Ra,"rel","nofollow"),t(ws,"align","left"),c(Ts.src,$d="https://studiolab.sagemaker.aws/studiolab.svg")||t(Ts,"src",$d),t(Ts,"alt","Open in AWS Studio"),t(Ca,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/image_classification.ipynb"),t(Ca,"rel","nofollow"),t(ks,"align","right"),t(we,"id","tensorflow-examples"),t(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),t(we,"href","#tensorflow-examples"),t(ue,"class","relative group"),t(ys,"align","left"),t(As,"align","left"),t(li,"align","left"),t(si,"align","right"),t(Wa,"href","https://github.com/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb"),t(Wa,"rel","nofollow"),t(Ds,"align","left"),t(Ss,"align","left"),c(Hs.src,Nd="https://colab.research.google.com/assets/colab-badge.svg")||t(Hs,"src",Nd),t(Hs,"alt","Open in Colab"),t(qa,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb"),t(qa,"rel","nofollow"),t(xs,"align","left"),c(Is.src,Rd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Is,"src",Rd),t(Is,"alt","Open in AWS Studio"),t(Pa,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb"),t(Pa,"rel","nofollow"),t(Ms,"align","right"),t(La,"href","https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb"),t(La,"rel","nofollow"),t(Os,"align","left"),t(Gs,"align","left"),c(Ns.src,Cd="https://colab.research.google.com/assets/colab-badge.svg")||t(Ns,"src",Cd),t(Ns,"alt","Open in Colab"),t(Ua,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb"),t(Ua,"rel","nofollow"),t($s,"align","left"),c(Cs.src,zd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Cs,"src",zd),t(Cs,"alt","Open in AWS Studio"),t(Fa,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb"),t(Fa,"rel","nofollow"),t(Rs,"align","right"),t(Xa,"href","https://github.com/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb"),t(Xa,"rel","nofollow"),t(zs,"align","left"),t(Ws,"align","left"),c(Ps.src,Wd="https://colab.research.google.com/assets/colab-badge.svg")||t(Ps,"src",Wd),t(Ps,"alt","Open in Colab"),t(Ba,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb"),t(Ba,"rel","nofollow"),t(qs,"align","left"),c(Us.src,qd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Us,"src",qd),t(Us,"alt","Open in AWS Studio"),t(Ya,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb"),t(Ya,"rel","nofollow"),t(Ls,"align","right"),t(Qa,"href","https://github.com/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb"),t(Qa,"rel","nofollow"),t(Fs,"align","left"),t(Xs,"align","left"),c(Ys.src,Pd="https://colab.research.google.com/assets/colab-badge.svg")||t(Ys,"src",Pd),t(Ys,"alt","Open in Colab"),t(Va,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb"),t(Va,"rel","nofollow"),t(Bs,"align","left"),c(Vs.src,Ld="https://studiolab.sagemaker.aws/studiolab.svg")||t(Vs,"src",Ld),t(Vs,"alt","Open in AWS Studio"),t(Ka,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb"),t(Ka,"rel","nofollow"),t(Qs,"align","right"),t(Ja,"href","https://github.com/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb"),t(Ja,"rel","nofollow"),t(Ks,"align","left"),t(Js,"align","left"),c(Zs.src,Ud="https://colab.research.google.com/assets/colab-badge.svg")||t(Zs,"src",Ud),t(Zs,"alt","Open in Colab"),t(ja,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb"),t(ja,"rel","nofollow"),t(js,"align","left"),c(tn.src,Fd="https://studiolab.sagemaker.aws/studiolab.svg")||t(tn,"src",Fd),t(tn,"alt","Open in AWS Studio"),t(Za,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb"),t(Za,"rel","nofollow"),t(en,"align","right"),t(eo,"href","https://github.com/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb"),t(eo,"rel","nofollow"),t(an,"align","left"),t(on,"align","left"),c(ln.src,Xd="https://colab.research.google.com/assets/colab-badge.svg")||t(ln,"src",Xd),t(ln,"alt","Open in Colab"),t(to,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb"),t(to,"rel","nofollow"),t(rn,"align","left"),c(nn.src,Bd="https://studiolab.sagemaker.aws/studiolab.svg")||t(nn,"src",Bd),t(nn,"alt","Open in AWS Studio"),t(ao,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb"),t(ao,"rel","nofollow"),t(sn,"align","right"),t(oo,"href","https://github.com/huggingface/notebooks/blob/master/examples/multiple_choice-tf.ipynb"),t(oo,"rel","nofollow"),t(gn,"align","left"),t(hn,"align","left"),c(un.src,Yd="https://colab.research.google.com/assets/colab-badge.svg")||t(un,"src",Yd),t(un,"alt","Open in Colab"),t(ro,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/multiple_choice-tf.ipynb"),t(ro,"rel","nofollow"),t(cn,"align","left"),c(dn.src,Qd="https://studiolab.sagemaker.aws/studiolab.svg")||t(dn,"src",Qd),t(dn,"alt","Open in AWS Studio"),t(lo,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/multiple_choice-tf.ipynb"),t(lo,"rel","nofollow"),t(fn,"align","right"),t(so,"href","https://github.com/huggingface/notebooks/blob/master/examples/translation-tf.ipynb"),t(so,"rel","nofollow"),t(mn,"align","left"),t(bn,"align","left"),c(_n.src,Vd="https://colab.research.google.com/assets/colab-badge.svg")||t(_n,"src",Vd),t(_n,"alt","Open in Colab"),t(no,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation-tf.ipynb"),t(no,"rel","nofollow"),t(pn,"align","left"),c(wn.src,Kd="https://studiolab.sagemaker.aws/studiolab.svg")||t(wn,"src",Kd),t(wn,"alt","Open in AWS Studio"),t(io,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/translation-tf.ipynb"),t(io,"rel","nofollow"),t(vn,"align","right"),t(go,"href","https://github.com/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb"),t(go,"rel","nofollow"),t(En,"align","left"),t(kn,"align","left"),c(yn.src,Jd="https://colab.research.google.com/assets/colab-badge.svg")||t(yn,"src",Jd),t(yn,"alt","Open in Colab"),t(ho,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb"),t(ho,"rel","nofollow"),t(Tn,"align","left"),c(Dn.src,jd="https://studiolab.sagemaker.aws/studiolab.svg")||t(Dn,"src",jd),t(Dn,"alt","Open in AWS Studio"),t(co,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb"),t(co,"rel","nofollow"),t(An,"align","right"),t(ke,"id","optimum-notebooks"),t(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),t(ke,"href","#optimum-notebooks"),t(fe,"class","relative group"),t(fo,"href","https://github.com/huggingface/optimum"),t(fo,"rel","nofollow"),t(Sn,"align","left"),t(xn,"align","left"),t(hi,"align","left"),t(ci,"align","right"),t(bo,"href","https://github.com/huggingface/notebooks/blob/master/examples/text_classification_quantization_ort.ipynb"),t(bo,"rel","nofollow"),t(Hn,"align","left"),t(po,"href","https://github.com/microsoft/onnxruntime"),t(po,"rel","nofollow"),t(Ae,"align","left"),c(In.src,Zd="https://colab.research.google.com/assets/colab-badge.svg")||t(In,"src",Zd),t(In,"alt","Open in Colab"),t(_o,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_ort.ipynb"),t(_o,"rel","nofollow"),t(Mn,"align","left"),c(Gn.src,em="https://studiolab.sagemaker.aws/studiolab.svg")||t(Gn,"src",em),t(Gn,"alt","Open in AWS Studio"),t(vo,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_ort.ipynb"),t(vo,"rel","nofollow"),t(On,"align","right"),t(wo,"href","https://github.com/huggingface/notebooks/blob/master/examples/text_classification_quantization_inc.ipynb"),t(wo,"rel","nofollow"),t($n,"align","left"),t(Eo,"href","https://github.com/intel/neural-compressor"),t(Eo,"rel","nofollow"),t(De,"align","left"),c(Rn.src,tm="https://colab.research.google.com/assets/colab-badge.svg")||t(Rn,"src",tm),t(Rn,"alt","Open in Colab"),t(ko,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_inc.ipynb"),t(ko,"rel","nofollow"),t(Nn,"align","left"),c(zn.src,am="https://studiolab.sagemaker.aws/studiolab.svg")||t(zn,"src",am),t(zn,"alt","Open in AWS Studio"),t(To,"href","https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_inc.ipynb"),t(To,"rel","nofollow"),t(Cn,"align","right"),t(Se,"id","community-notebooks"),t(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),t(Se,"href","#community-notebooks"),t(de,"class","relative group"),t(Wn,"href","community#community-notebooks")},m(i,d){e(document.head,ie),m(i,Oo,d),m(i,w,d),e(w,E),e(E,qn),So(mt,qn,null),e(w,Yi),e(w,Pn),e(Pn,Qi),m(i,di,d),m(i,Go,d),e(Go,Vi),m(i,mi,d),m(i,$o,d),e($o,Ki),m(i,bi,d),m(i,ge,d),e(ge,me),e(me,Ln),So(bt,Ln,null),e(ge,Ji),e(ge,Un),e(Un,ji),m(i,pi,d),m(i,he,d),e(he,be),e(be,Fn),So(pt,Fn,null),e(he,Zi),e(he,Xn),e(Xn,eg),m(i,_i,d),m(i,No,d),e(No,tg),m(i,vi,d),m(i,pe,d),e(pe,Bn),e(Bn,k),e(k,Ro),e(Ro,ag),e(k,og),e(k,Co),e(Co,rg),e(k,lg),e(k,Yn),e(k,sg),e(k,Qn),e(pe,ng),e(pe,_),e(_,T),e(T,zo),e(zo,_t),e(_t,ig),e(T,gg),e(T,Wo),e(Wo,hg),e(T,cg),e(T,qo),e(qo,vt),e(vt,Po),e(T,ug),e(T,Lo),e(Lo,wt),e(wt,Uo),e(_,fg),e(_,y),e(y,Fo),e(Fo,Et),e(Et,dg),e(y,mg),e(y,Xo),e(Xo,bg),e(y,pg),e(y,Bo),e(Bo,kt),e(kt,Yo),e(y,_g),e(y,Qo),e(Qo,Tt),e(Tt,Vo),e(_,vg),e(_,A),e(A,Ko),e(Ko,yt),e(yt,wg),e(A,Eg),e(A,Jo),e(Jo,kg),e(A,Tg),e(A,jo),e(jo,At),e(At,Zo),e(A,yg),e(A,er),e(er,Dt),e(Dt,tr),e(_,Ag),e(_,D),e(D,ar),e(ar,St),e(St,Dg),e(D,Sg),e(D,or),e(or,xg),e(D,Hg),e(D,rr),e(rr,xt),e(xt,lr),e(D,Mg),e(D,sr),e(sr,Ht),e(Ht,nr),e(_,Ig),e(_,S),e(S,ir),e(ir,Mt),e(Mt,Og),e(S,Gg),e(S,gr),e(gr,$g),e(S,Ng),e(S,hr),e(hr,It),e(It,cr),e(S,Rg),e(S,ur),e(ur,Ot),e(Ot,fr),e(_,Cg),e(_,x),e(x,dr),e(dr,Gt),e(Gt,zg),e(x,Wg),e(x,mr),e(mr,qg),e(x,Pg),e(x,br),e(br,$t),e($t,pr),e(x,Lg),e(x,_r),e(_r,Nt),e(Nt,vr),e(_,Ug),e(_,H),e(H,wr),e(wr,Rt),e(Rt,Fg),e(H,Xg),e(H,Er),e(Er,Bg),e(H,Yg),e(H,kr),e(kr,Ct),e(Ct,Tr),e(H,Qg),e(H,yr),e(yr,zt),e(zt,Ar),m(i,wi,d),m(i,ce,d),e(ce,_e),e(_e,Vn),So(Wt,Vn,null),e(ce,Vg),e(ce,Kn),e(Kn,Kg),m(i,Ei,d),m(i,ve,d),e(ve,Jn),e(Jn,M),e(M,Dr),e(Dr,Jg),e(M,jg),e(M,Sr),e(Sr,Zg),e(M,eh),e(M,jn),e(M,th),e(M,Zn),e(ve,ah),e(ve,u),e(u,I),e(I,xr),e(xr,qt),e(qt,oh),e(I,rh),e(I,Hr),e(Hr,lh),e(I,sh),e(I,Mr),e(Mr,Pt),e(Pt,Ir),e(I,nh),e(I,Or),e(Or,Lt),e(Lt,Gr),e(u,ih),e(u,O),e(O,$r),e($r,Ut),e(Ut,gh),e(O,hh),e(O,Nr),e(Nr,ch),e(O,uh),e(O,Rr),e(Rr,Ft),e(Ft,Cr),e(O,fh),e(O,zr),e(zr,Xt),e(Xt,Wr),e(u,dh),e(u,G),e(G,qr),e(qr,Bt),e(Bt,mh),e(G,bh),e(G,Pr),e(Pr,ph),e(G,_h),e(G,Lr),e(Lr,Yt),e(Yt,Ur),e(G,vh),e(G,Fr),e(Fr,Qt),e(Qt,Xr),e(u,wh),e(u,$),e($,Br),e(Br,Vt),e(Vt,Eh),e($,kh),e($,Yr),e(Yr,Th),e($,yh),e($,Qr),e(Qr,Kt),e(Kt,Vr),e($,Ah),e($,Kr),e(Kr,Jt),e(Jt,Jr),e(u,Dh),e(u,N),e(N,jr),e(jr,jt),e(jt,Sh),e(N,xh),e(N,Zr),e(Zr,Hh),e(N,Mh),e(N,el),e(el,Zt),e(Zt,tl),e(N,Ih),e(N,al),e(al,ea),e(ea,ol),e(u,Oh),e(u,R),e(R,rl),e(rl,ta),e(ta,Gh),e(R,$h),e(R,ll),e(ll,Nh),e(R,Rh),e(R,sl),e(sl,aa),e(aa,nl),e(R,Ch),e(R,il),e(il,oa),e(oa,gl),e(u,zh),e(u,C),e(C,hl),e(hl,ra),e(ra,Wh),e(C,qh),e(C,cl),e(cl,Ph),e(C,Lh),e(C,ul),e(ul,la),e(la,fl),e(C,Uh),e(C,dl),e(dl,sa),e(sa,ml),e(u,Fh),e(u,z),e(z,bl),e(bl,na),e(na,Xh),e(z,Bh),e(z,pl),e(pl,Yh),e(z,Qh),e(z,_l),e(_l,ia),e(ia,vl),e(z,Vh),e(z,wl),e(wl,ga),e(ga,El),e(u,Kh),e(u,W),e(W,kl),e(kl,ha),e(ha,Jh),e(W,jh),e(W,Tl),e(Tl,Zh),e(W,ec),e(W,yl),e(yl,ca),e(ca,Al),e(W,tc),e(W,Dl),e(Dl,ua),e(ua,Sl),e(u,ac),e(u,q),e(q,xl),e(xl,fa),e(fa,oc),e(q,rc),e(q,Hl),e(Hl,lc),e(q,sc),e(q,Ml),e(Ml,da),e(da,Il),e(q,nc),e(q,Ol),e(Ol,ma),e(ma,Gl),e(u,ic),e(u,P),e(P,$l),e($l,ba),e(ba,gc),e(P,hc),e(P,Nl),e(Nl,cc),e(P,uc),e(P,Rl),e(Rl,pa),e(pa,Cl),e(P,fc),e(P,zl),e(zl,_a),e(_a,Wl),e(u,dc),e(u,L),e(L,ql),e(ql,va),e(va,mc),e(L,bc),e(L,Pl),e(Pl,pc),e(L,_c),e(L,Ll),e(Ll,wa),e(wa,Ul),e(L,vc),e(L,Fl),e(Fl,Ea),e(Ea,Xl),e(u,wc),e(u,U),e(U,Bl),e(Bl,ka),e(ka,Ec),e(U,kc),e(U,Yl),e(Yl,Tc),e(U,yc),e(U,Ql),e(Ql,Ta),e(Ta,Vl),e(U,Ac),e(U,Kl),e(Kl,ya),e(ya,Jl),e(u,Dc),e(u,F),e(F,jl),e(jl,Aa),e(Aa,Sc),e(F,xc),e(F,Zl),e(Zl,Hc),e(F,Mc),e(F,es),e(es,Da),e(Da,ts),e(F,Ic),e(F,as),e(as,Sa),e(Sa,os),e(u,Oc),e(u,X),e(X,rs),e(rs,xa),e(xa,Gc),e(X,$c),e(X,ls),e(ls,Nc),e(X,Rc),e(X,ei),e(X,Cc),e(X,ti),e(u,zc),e(u,B),e(B,ss),e(ss,Ha),e(Ha,Wc),e(B,qc),e(B,ns),e(ns,Pc),e(B,Lc),e(B,is),e(is,Ma),e(Ma,gs),e(B,Uc),e(B,hs),e(hs,Ia),e(Ia,cs),e(u,Fc),e(u,Y),e(Y,us),e(us,Oa),e(Oa,Xc),e(Y,Bc),e(Y,fs),e(fs,Yc),e(Y,Qc),e(Y,ds),e(ds,Ga),e(Ga,ms),e(Y,Vc),e(Y,bs),e(bs,$a),e($a,ps),e(u,Kc),e(u,Q),e(Q,_s),e(_s,Na),e(Na,Jc),e(Q,jc),e(Q,vs),e(vs,Zc),e(Q,eu),e(Q,ws),e(ws,Ra),e(Ra,Es),e(Q,tu),e(Q,ks),e(ks,Ca),e(Ca,Ts),m(i,ki,d),m(i,ue,d),e(ue,we),e(we,ai),So(za,ai,null),e(ue,au),e(ue,oi),e(oi,ou),m(i,Ti,d),m(i,Ee,d),e(Ee,ri),e(ri,V),e(V,ys),e(ys,ru),e(V,lu),e(V,As),e(As,su),e(V,nu),e(V,li),e(V,iu),e(V,si),e(Ee,gu),e(Ee,b),e(b,K),e(K,Ds),e(Ds,Wa),e(Wa,hu),e(K,cu),e(K,Ss),e(Ss,uu),e(K,fu),e(K,xs),e(xs,qa),e(qa,Hs),e(K,du),e(K,Ms),e(Ms,Pa),e(Pa,Is),e(b,mu),e(b,J),e(J,Os),e(Os,La),e(La,bu),e(J,pu),e(J,Gs),e(Gs,_u),e(J,vu),e(J,$s),e($s,Ua),e(Ua,Ns),e(J,wu),e(J,Rs),e(Rs,Fa),e(Fa,Cs),e(b,Eu),e(b,j),e(j,zs),e(zs,Xa),e(Xa,ku),e(j,Tu),e(j,Ws),e(Ws,yu),e(j,Au),e(j,qs),e(qs,Ba),e(Ba,Ps),e(j,Du),e(j,Ls),e(Ls,Ya),e(Ya,Us),e(b,Su),e(b,Z),e(Z,Fs),e(Fs,Qa),e(Qa,xu),e(Z,Hu),e(Z,Xs),e(Xs,Mu),e(Z,Iu),e(Z,Bs),e(Bs,Va),e(Va,Ys),e(Z,Ou),e(Z,Qs),e(Qs,Ka),e(Ka,Vs),e(b,Gu),e(b,ee),e(ee,Ks),e(Ks,Ja),e(Ja,$u),e(ee,Nu),e(ee,Js),e(Js,Ru),e(ee,Cu),e(ee,js),e(js,ja),e(ja,Zs),e(ee,zu),e(ee,en),e(en,Za),e(Za,tn),e(b,Wu),e(b,te),e(te,an),e(an,eo),e(eo,qu),e(te,Pu),e(te,on),e(on,Lu),e(te,Uu),e(te,rn),e(rn,to),e(to,ln),e(te,Fu),e(te,sn),e(sn,ao),e(ao,nn),e(b,Xu),e(b,ae),e(ae,gn),e(gn,oo),e(oo,Bu),e(ae,Yu),e(ae,hn),e(hn,Qu),e(ae,Vu),e(ae,cn),e(cn,ro),e(ro,un),e(ae,Ku),e(ae,fn),e(fn,lo),e(lo,dn),e(b,Ju),e(b,oe),e(oe,mn),e(mn,so),e(so,ju),e(oe,Zu),e(oe,bn),e(bn,ef),e(oe,tf),e(oe,pn),e(pn,no),e(no,_n),e(oe,af),e(oe,vn),e(vn,io),e(io,wn),e(b,of),e(b,re),e(re,En),e(En,go),e(go,rf),e(re,lf),e(re,kn),e(kn,sf),e(re,nf),e(re,Tn),e(Tn,ho),e(ho,yn),e(re,gf),e(re,An),e(An,co),e(co,Dn),m(i,yi,d),m(i,fe,d),e(fe,ke),e(ke,ni),So(uo,ni,null),e(fe,hf),e(fe,ii),e(ii,cf),m(i,Ai,d),m(i,Te,d),e(Te,uf),e(Te,fo),e(fo,ff),e(Te,df),m(i,Di,d),m(i,ye,d),e(ye,gi),e(gi,le),e(le,Sn),e(Sn,mf),e(le,bf),e(le,xn),e(xn,pf),e(le,_f),e(le,hi),e(le,vf),e(le,ci),e(ye,wf),e(ye,mo),e(mo,se),e(se,Hn),e(Hn,bo),e(bo,Ef),e(se,kf),e(se,Ae),e(Ae,Tf),e(Ae,po),e(po,yf),e(Ae,Af),e(se,Df),e(se,Mn),e(Mn,_o),e(_o,In),e(se,Sf),e(se,On),e(On,vo),e(vo,Gn),e(mo,xf),e(mo,ne),e(ne,$n),e($n,wo),e(wo,Hf),e(ne,Mf),e(ne,De),e(De,If),e(De,Eo),e(Eo,Of),e(De,Gf),e(ne,$f),e(ne,Nn),e(Nn,ko),e(ko,Rn),e(ne,Nf),e(ne,Cn),e(Cn,To),e(To,zn),m(i,Si,d),m(i,de,d),e(de,Se),e(Se,ui),So(yo,ui,null),e(de,Rf),e(de,fi),e(fi,Cf),m(i,xi,d),m(i,xe,d),e(xe,zf),e(xe,Wn),e(Wn,Wf),e(xe,qf),Hi=!0},p:T1,i(i){Hi||(xo(mt.$$.fragment,i),xo(bt.$$.fragment,i),xo(pt.$$.fragment,i),xo(Wt.$$.fragment,i),xo(za.$$.fragment,i),xo(uo.$$.fragment,i),xo(yo.$$.fragment,i),Hi=!0)},o(i){Ho(mt.$$.fragment,i),Ho(bt.$$.fragment,i),Ho(pt.$$.fragment,i),Ho(Wt.$$.fragment,i),Ho(za.$$.fragment,i),Ho(uo.$$.fragment,i),Ho(yo.$$.fragment,i),Hi=!1},d(i){r(ie),i&&r(Oo),i&&r(w),Mo(mt),i&&r(di),i&&r(Go),i&&r(mi),i&&r($o),i&&r(bi),i&&r(ge),Mo(bt),i&&r(pi),i&&r(he),Mo(pt),i&&r(_i),i&&r(No),i&&r(vi),i&&r(pe),i&&r(wi),i&&r(ce),Mo(Wt),i&&r(Ei),i&&r(ve),i&&r(ki),i&&r(ue),Mo(za),i&&r(Ti),i&&r(Ee),i&&r(yi),i&&r(fe),Mo(uo),i&&r(Ai),i&&r(Te),i&&r(Di),i&&r(ye),i&&r(Si),i&&r(de),Mo(yo),i&&r(xi),i&&r(xe)}}}const A1={local:"transformers-notebooks",sections:[{local:"hugging-faces-notebooks",sections:[{local:"documentation-notebooks",title:"Documentation notebooks"},{local:"pytorch-examples",title:"PyTorch Examples"},{local:"tensorflow-examples",title:"TensorFlow Examples"},{local:"optimum-notebooks",title:"Optimum notebooks"}],title:"Hugging Face's notebooks \u{1F917}"},{local:"community-notebooks",title:"Community notebooks:"}],title:"\u{1F917} Transformers Notebooks"};function D1(Bi,ie,Oo){let{fw:w}=ie;return Bi.$$set=E=>{"fw"in E&&Oo(0,w=E.fw)},[w]}class H1 extends v1{constructor(ie){super();w1(this,ie,D1,y1,E1,{fw:0})}}export{H1 as default,A1 as metadata};
266
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/performance.mdx-a6515fea.js
import{S as Yz,i as Qz,s as Jz,e as s,k as h,w as m,t as l,M as Zz,c as r,d as t,m as f,a as i,x as d,h as n,b as c,N as PA,F as a,g as p,y as u,q as v,o as w,B as g}from"../chunks/vendor-4833417e.js";import{T as Xz}from"../chunks/Tip-fffd6df1.js";import{I as _}from"../chunks/IconCopyLink-4b81c553.js";import{C as b}from"../chunks/CodeBlock-6a3d1b46.js";import{D as Kz}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function eN(ys){let y,k;return{c(){y=s("p"),k=l("Note: In order to properly clear the memory after experiments we need restart the Python kernel between experiments. Run all steps above and then just one of the experiments below.")},l(E){y=r(E,"P",{});var A=i(y);k=n(A,"Note: In order to properly clear the memory after experiments we need restart the Python kernel between experiments. Run all steps above and then just one of the experiments below."),A.forEach(t)},m(E,A){p(E,y,A),a(y,k)},d(E){E&&t(y)}}}function tN(ys){let y;return{c(){y=l(`Note that in order to use the 8-bit optimizer with an existing pretrained model a change to the embedding layer is needed. Read [this issue](https://github.com/huggingface/transformers/issues/14819) for more information.`)},l(k){y=n(k,`Note that in order to use the 8-bit optimizer with an existing pretrained model a change to the embedding layer is needed. Read [this issue](https://github.com/huggingface/transformers/issues/14819) for more information.`)},m(k,E){p(k,y,E)},d(k){k&&t(y)}}}function aN(ys){let y,k,E,A,Th,Es,zy,jh,Ny,Zd,Io,xh,Dh,Cy,Kd,Ps,eu,te,Ly,Uo,My,Oy,$s,By,qy,tu,ks,au,ae,Vy,Gh,Hy,Fy,Ih,Wy,Ry,su,Pt,Xy,As,Uh,Yy,Qy,ru,Ts,iu,$t,Jy,So,Zy,Ky,ou,js,lu,zo,e2,nu,xs,pu,No,t2,hu,Ds,fu,Co,a2,cu,Pe,kt,Sh,Gs,s2,zh,r2,mu,At,i2,Nh,o2,l2,du,Is,uu,Tt,n2,Ch,p2,h2,vu,Us,wu,Ss,gu,Lo,f2,_u,zs,bu,jt,yu,$e,xt,Lh,Ns,c2,Mh,m2,Eu,Dt,d2,Mo,u2,v2,Pu,Cs,$u,Ls,ku,Oo,w2,Au,ke,Gt,Oh,Ms,g2,Bh,_2,Tu,Bo,b2,ju,N,y2,qo,E2,P2,qh,$2,k2,Vo,A2,T2,xu,Os,Du,Bs,Gu,G,j2,Vh,x2,D2,Hh,G2,I2,Fh,U2,S2,Wh,z2,N2,Iu,Ho,C2,Uu,Ae,It,Rh,qs,L2,Xh,M2,Su,Fo,O2,zu,Ut,B2,Vs,q2,V2,Nu,se,H2,Wo,F2,W2,Ro,R2,X2,Cu,Hs,Lu,Fs,Mu,Xo,Y2,Ou,Te,St,Yh,Ws,Q2,Qh,J2,Bu,re,Z2,Jh,K2,e0,Zh,t0,a0,qu,Rs,Vu,Xs,Hu,Yo,s0,Fu,Ys,Wu,Qs,Ru,Qo,r0,Xu,je,zt,Kh,Js,i0,ef,o0,Yu,Jo,l0,Qu,xe,Nt,tf,Zs,n0,af,p0,Ju,Ct,h0,sf,f0,c0,Zu,Ks,Ku,er,ev,Zo,m0,tv,tr,av,ar,sv,Ko,d0,rv,De,Lt,rf,sr,u0,of,v0,iv,el,w0,ov,I,g0,tl,_0,b0,al,y0,E0,rr,P0,$0,lf,k0,A0,lv,sl,T0,nv,Mt,pv,ir,hv,Ot,j0,nf,x0,D0,fv,or,cv,lr,mv,rl,G0,dv,nr,uv,pr,vv,il,I0,wv,ol,ll,$A,gv,Ge,Bt,pf,hr,U0,hf,S0,_v,ie,z0,nl,N0,C0,pl,L0,M0,bv,fr,yv,hl,O0,Ev,cr,Pv,T,B0,mr,ff,q0,V0,fl,H0,F0,dr,cf,W0,R0,mf,X0,Y0,ur,df,Q0,J0,$v,qt,Z0,uf,K0,e3,kv,vr,Av,cl,t3,Tv,ml,a3,jv,Ie,Vt,vf,wr,s3,wf,r3,xv,dl,i3,Dv,Ht,gf,o3,l3,_f,n3,Gv,ul,p3,Iv,vl,h3,Uv,wl,f3,Sv,Ue,Ft,bf,gr,c3,yf,m3,zv,C,d3,Ef,u3,v3,Pf,w3,g3,$f,_3,b3,Nv,L,y3,gl,E3,P3,_l,$3,k3,kf,A3,T3,Cv,_r,Lv,bl,j3,Mv,Se,Wt,Af,br,x3,Tf,D3,Ov,Rt,G3,yl,I3,U3,Bv,El,S3,qv,ze,Xt,jf,yr,z3,xf,N3,Vv,Pl,C3,Hv,Ne,Yt,Df,Er,L3,Gf,M3,Fv,$l,O3,Wv,kl,Al,B3,Pr,If,q3,V3,Uf,H3,Rv,Tl,F3,Xv,Qt,Sf,W3,R3,zf,X3,Yv,Ce,Jt,Nf,$r,Y3,Cf,Q3,Qv,jl,J3,Jv,oe,Lf,Z3,K3,Mf,e6,t6,kr,a6,xl,s6,r6,Zv,Dl,i6,Kv,P,Of,o6,l6,Bf,n6,p6,qf,h6,f6,Vf,c6,m6,Hf,d6,u6,Ff,v6,w6,Wf,g6,ew,Le,Zt,Rf,Ar,_6,Xf,b6,tw,Me,Kt,Yf,Tr,y6,Qf,E6,aw,Gl,P6,sw,jr,Jf,$6,k6,rw,Il,A6,iw,Ul,T6,ow,Sl,j6,lw,zl,x6,nw,Nl,D6,pw,Cl,G6,hw,xr,Zf,I6,U6,fw,Ll,S6,cw,Ml,z6,mw,Oe,ea,Kf,Dr,N6,ec,C6,dw,Ol,L6,uw,Bl,M6,vw,Gr,ww,ql,O6,gw,Vl,B6,_w,Ir,bw,Hl,q6,yw,Ur,Ew,Fl,V6,Pw,Sr,$w,le,H6,tc,F6,W6,ac,R6,X6,kw,Wl,Y6,Aw,Rl,Q6,Tw,Be,ta,sc,zr,J6,rc,Z6,jw,Nr,Cr,K6,eE,xw,aa,tE,Lr,aE,sE,Dw,Xl,ic,rE,Gw,M,iE,oc,oE,lE,lc,nE,pE,nc,hE,fE,Iw,Yl,cE,Uw,Ql,mE,Sw,sa,pc,Mr,hc,dE,uE,Jl,vE,wE,Or,Br,fc,gE,_E,Zl,bE,yE,qr,cc,EE,PE,Kl,$E,zw,en,kE,Nw,ra,AE,mc,TE,jE,Cw,tn,xE,Lw,Vr,Mw,x,DE,dc,GE,IE,uc,UE,SE,vc,zE,NE,wc,CE,LE,gc,ME,Ow,qe,ia,_c,Hr,OE,bc,BE,Bw,Ve,oa,yc,Fr,qE,Ec,VE,qw,an,HE,Vw,Wr,FE,sn,WE,Hw,He,la,Pc,Rr,RE,$c,XE,Fw,rn,YE,Ww,ne,Xr,kc,Ac,QE,JE,Yr,ZE,Tc,KE,eP,tP,Qr,jc,xc,aP,sP,Jr,rP,Dc,iP,oP,lP,Zr,Gc,Ic,nP,pP,Kr,hP,Uc,fP,cP,Rw,on,mP,Xw,ei,dP,ti,uP,Yw,Fe,na,Sc,ai,vP,zc,wP,Qw,ln,gP,Jw,j,Nc,_P,bP,Cc,yP,EP,Lc,PP,$P,Mc,kP,AP,Oc,TP,jP,Bc,xP,Zw,nn,DP,Kw,pn,GP,eg,hn,IP,tg,We,pa,qc,si,UP,Vc,SP,ag,ha,Hc,zP,NP,Fc,CP,sg,Re,fa,Wc,ri,LP,Rc,MP,rg,pe,Xc,OP,BP,fn,qP,ii,VP,HP,Yc,FP,ig,Xe,ca,Qc,oi,WP,Jc,RP,og,cn,Zc,XP,lg,Ye,ma,Kc,li,YP,em,QP,ng,mn,tm,JP,pg,dn,ZP,hg,Qe,da,am,ni,KP,sm,e5,fg,un,t5,cg,Je,ua,rm,pi,a5,im,s5,mg,vn,r5,dg,Ze,va,om,hi,i5,wa,lm,o5,l5,nm,n5,p5,ug,wn,h5,vg,Ke,ga,pm,fi,f5,hm,c5,wg,gn,m5,gg,O,ci,d5,fm,u5,v5,w5,mi,g5,cm,_5,b5,y5,di,E5,mm,P5,$5,k5,dm,A5,_g,_n,T5,bg,bn,yn,kA,yg,_a,j5,ui,x5,D5,Eg,En,G5,Pg,et,ba,um,vi,I5,vm,U5,$g,Pn,S5,kg,$n,z5,Ag,B,wm,N5,C5,gm,L5,M5,wi,O5,_m,B5,q5,V5,bm,H5,Tg,kn,F5,jg,ya,W5,ym,R5,X5,xg,An,Y5,Dg,gi,Gg,Ea,Q5,Em,J5,Z5,Ig,Pa,Pm,R,Tn,K5,e4,jn,t4,a4,xn,s4,r4,Dn,i4,o4,X,Y,Gn,l4,n4,In,p4,h4,Un,f4,c4,Sn,m4,d4,Q,zn,u4,v4,Nn,w4,g4,Cn,_4,b4,Ln,y4,E4,J,Mn,P4,$4,On,k4,A4,Bn,T4,j4,qn,x4,D4,Z,Vn,G4,I4,Hn,U4,S4,Fn,z4,N4,Wn,C4,Ug,$a,L4,$m,M4,O4,Sg,Rn,B4,zg,Xn,q4,Ng,ka,V4,_i,H4,F4,Cg,Yn,W4,Lg,Aa,Qn,R4,bi,X4,Y4,Jn,Q4,yi,J4,Mg,he,Z4,Ei,K4,e$,Pi,t$,a$,Og,tt,Ta,km,$i,s$,Am,r$,Bg,fe,i$,Tm,o$,l$,ki,n$,p$,qg,Zn,h$,Vg,at,ja,jm,Ai,f$,xm,c$,Hg,Kn,m$,Fg,ep,d$,Wg,tp,u$,Rg,xa,v$,Dm,w$,g$,Xg,st,Da,Gm,Ti,_$,Im,b$,Yg,ce,y$,Um,E$,P$,Sm,$$,k$,Qg,ap,A$,Jg,sp,T$,Zg,rp,j$,Kg,ip,x$,e1,Ga,D$,zm,G$,I$,t1,op,U$,a1,ji,s1,xi,S$,Nm,z$,r1,lp,N$,i1,Di,o1,np,C$,l1,me,L$,Gi,M$,O$,Ii,B$,q$,n1,rt,Ia,Cm,Ui,V$,Lm,H$,p1,Ua,F$,pp,W$,R$,h1,Sa,X$,Mm,Y$,Q$,f1,it,za,Om,Si,J$,Bm,Z$,c1,hp,K$,m1,fp,ek,d1,zi,u1,cp,tk,v1,Na,ak,Ni,sk,rk,w1,mp,ik,g1,q,ok,qm,lk,nk,Vm,pk,hk,Hm,fk,ck,_1,de,mk,Fm,dk,uk,Wm,vk,wk,b1,Ca,gk,Rm,_k,bk,y1,ue,yk,Ci,Ek,Pk,Li,$k,kk,E1,ot,La,Xm,Mi,Ak,Ym,Tk,P1,ve,jk,Oi,xk,Dk,Bi,Gk,Ik,$1,Ma,Uk,Qm,Sk,zk,k1,dp,Nk,A1,lt,Oa,Jm,qi,Ck,Zm,Lk,T1,up,Mk,j1,V,Ok,Vi,Bk,qk,Km,Vk,Hk,ed,Fk,Wk,x1,vp,Rk,D1,Hi,G1,Ba,Xk,td,Yk,Qk,I1,nt,qa,ad,Fi,Jk,sd,Zk,U1,wp,Kk,S1,we,e8,Wi,t8,a8,Ri,s8,r8,z1,Xi,Yi,i8,o8,N1,Va,l8,Qi,n8,p8,C1,H,h8,gp,f8,c8,Ji,m8,d8,Zi,u8,v8,L1,pt,Ha,rd,Ki,w8,id,g8,M1,ht,od,_8,b8,ld,y8,E8,O1,Fa,nd,P8,$8,pd,k8,B1,_p,A8,q1,eo,to,T8,j8,V1,Wa,hd,x8,D8,bp,G8,ao,fd,I8,U8,so,S8,cd,z8,N8,H1,ro,io,C8,L8,F1,yp,M8,W1,U,md,O8,B8,dd,q8,V8,oo,H8,ud,F8,W8,R8,Ep,X8,vd,Y8,Q8,wd,J8,R1,Pp,Z8,X1,Ra,K8,lo,e7,t7,Y1,$p,a7,Q1,kp,s7,J1,Ap,r7,Z1,Xa,i7,no,o7,l7,K1,Tp,n7,e_,Ya,gd,ft,jp,p7,h7,_d,f7,c7,xp,m7,d7,ct,mt,Dp,u7,v7,bd,w7,g7,Gp,_7,b7,dt,Ip,y7,E7,yd,P7,$7,Up,k7,A7,ut,Sp,T7,j7,Ed,x7,D7,zp,G7,t_,Np,I7,a_,Cp,U7,s_,Lp,S7,r_,Mp,z7,i_,po,Pd,N7,C7,o_,ho,l_,D,L7,$d,M7,O7,kd,B7,q7,Ad,V7,H7,Td,F7,W7,jd,R7,n_,vt,Qa,xd,fo,X7,Dd,Y7,p_,Op,Q7,h_,Ja,Bp,Gd,J7,Z7,K7,qp,Id,e9,t9,f_,wt,Za,Ud,co,a9,Sd,s9,c_,gt,r9,zd,i9,o9,mo,l9,m_,_t,Ka,Nd,uo,n9,Cd,p9,d_,bt,es,Ld,vo,h9,Md,f9,u_,Vp,c9,v_,Hp,m9,w_,Fp,d9,g_,Wp,Rp,AA,__,ts,u9,wo,v9,w9,b_,Xp,g9,y_,Yp,_9,E_,Qp,b9,P_,Jp,y9,$_,ge,Od,go,E9,P9,Bd,_o,$9,k9,qd,bo,A9,k_,$,T9,yo,j9,x9,Eo,D9,G9,Po,I9,U9,$o,S9,z9,ko,N9,C9,Zp,L9,M9,A_,yt,as,Vd,Ao,O9,Hd,B9,T_,ss,q9,To,V9,H9,j_,rs,F9,Fd,W9,R9,x_,Kp,X9,D_,_e,Y9,jo,Q9,J9,xo,Z9,K9,G_,eh,eA,I_,Et,is,Wd,Do,tA,Rd,aA,U_,th,sA,S_,ah,rA,z_;return Es=new _({}),Ps=new Kz({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/performance.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/performance.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/performance.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/performance.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/performance.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/performance.ipynb"}]}}),ks=new b({props:{code:"pip install transformers datasets accelerate nvidia-ml-py3",highlighted:"pip install transformers datasets accelerate nvidia-ml-py3"}}),Ts=new b({props:{code:`import numpy as np from datasets import Dataset seq_len, dataset_size = 512, 512 dummy_data = { "input_ids": np.random.randint(100, 30000, (dataset_size, seq_len)), "labels": np.random.randint(0, 1, (dataset_size)), } ds = Dataset.from_dict(dummy_data) ds.set_format("pt")`,highlighted:`<span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> Dataset seq_len, dataset_size = <span class="hljs-number">512</span>, <span class="hljs-number">512</span> dummy_data = { <span class="hljs-string">&quot;input_ids&quot;</span>: np.random.randint(<span class="hljs-number">100</span>, <span class="hljs-number">30000</span>, (dataset_size, seq_len)), <span class="hljs-string">&quot;labels&quot;</span>: np.random.randint(<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, (dataset_size)), } ds = Dataset.from_dict(dummy_data) ds.set_format(<span class="hljs-string">&quot;pt&quot;</span>)`}}),js=new b({props:{code:`from pynvml import * def print_gpu_utilization(): nvmlInit() handle = nvmlDeviceGetHandleByIndex(0) info = nvmlDeviceGetMemoryInfo(handle) print(f"GPU memory occupied: {info.used//1024**2} MB.") def print_summary(result): print(f"Time: {result.metrics['train_runtime']:.2f}") print(f"Samples/second: {result.metrics['train_samples_per_second']:.2f}") print_gpu_utilization()`,highlighted:`<span class="hljs-keyword">from</span> pynvml <span class="hljs-keyword">import</span> * <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_gpu_utilization</span>(): nvmlInit() handle = nvmlDeviceGetHandleByIndex(<span class="hljs-number">0</span>) info = nvmlDeviceGetMemoryInfo(handle) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;GPU memory occupied: <span class="hljs-subst">{info.used//<span class="hljs-number">1024</span>**<span class="hljs-number">2</span>}</span> MB.&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_summary</span>(<span class="hljs-params">result</span>): <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Time: <span class="hljs-subst">{result.metrics[<span class="hljs-string">&#x27;train_runtime&#x27;</span>]:<span class="hljs-number">.2</span>f}</span>&quot;</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Samples/second: <span class="hljs-subst">{result.metrics[<span class="hljs-string">&#x27;train_samples_per_second&#x27;</span>]:<span class="hljs-number">.2</span>f}</span>&quot;</span>) print_gpu_utilization()`}}),xs=new b({props:{code:"print_gpu_utilization()",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>print_gpu_utilization() GPU memory occupied: <span class="hljs-number">0</span> MB.`}}),Ds=new b({props:{code:`import torch torch.ones((1, 1)).to("cuda") print_gpu_utilization()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.ones((<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)).to(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>print_gpu_utilization() GPU memory occupied: <span class="hljs-number">1343</span> MB.`}}),Gs=new _({}),Is=new b({props:{code:`from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("bert-large-uncased").to("cuda") print_gpu_utilization()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-large-uncased&quot;</span>).to(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>print_gpu_utilization() GPU memory occupied: <span class="hljs-number">2631</span> MB.`}}),Us=new b({props:{code:"nvidia-smi",highlighted:"nvidia-smi"}}),Ss=new b({props:{code:`Tue Jan 11 08:58:05 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.91.03 Driver Version: 460.91.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla V100-SXM2... On | 00000000:00:04.0 Off | 0 | | N/A 37C P0 39W / 300W | 2631MiB / 16160MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 3721 C ...nvs/codeparrot/bin/python 2629MiB | +-----------------------------------------------------------------------------+`,highlighted:`Tue Jan 11 08:58:05 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.91.03 Driver Version: 460.91.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla V100-SXM2... On | 00000000:00:04.0 Off | 0 | | N/A 37C P0 39W / 300W | 2631MiB / 16160MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 3721 C ...nvs/codeparrot/bin/python 2629MiB | +-----------------------------------------------------------------------------+`}}),zs=new b({props:{code:`default_args = { "output_dir": "tmp", "evaluation_strategy": "steps", "num_train_epochs": 1, "log_level": "error", "report_to": "none", }`,highlighted:`default_args = { <span class="hljs-string">&quot;output_dir&quot;</span>: <span class="hljs-string">&quot;tmp&quot;</span>, <span class="hljs-string">&quot;evaluation_strategy&quot;</span>: <span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-string">&quot;num_train_epochs&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;log_level&quot;</span>: <span class="hljs-string">&quot;error&quot;</span>, <span class="hljs-string">&quot;report_to&quot;</span>: <span class="hljs-string">&quot;none&quot;</span>, }`}}),jt=new Xz({props:{$$slots:{default:[eN]},$$scope:{ctx:ys}}}),Ns=new _({}),Cs=new b({props:{code:`from transformers import TrainingArguments, Trainer, logging logging.set_verbosity_error() training_args = TrainingArguments(per_device_train_batch_size=4, **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments, Trainer, logging logging.set_verbosity_error() training_args = TrainingArguments(per_device_train_batch_size=<span class="hljs-number">4</span>, **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`}}),Ls=new b({props:{code:`Time: 57.82 Samples/second: 8.86 GPU memory occupied: 14949 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 57.82 Samples/second: 8.86 GPU memory occupied: 14949 MB.`}}),Ms=new _({}),Os=new b({props:{code:`training_args = TrainingArguments(per_device_train_batch_size=1, gradient_accumulation_steps=4, **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`,highlighted:`training_args = TrainingArguments(per_device_train_batch_size=<span class="hljs-number">1</span>, gradient_accumulation_steps=<span class="hljs-number">4</span>, **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`}}),Bs=new b({props:{code:`Time: 66.03 Samples/second: 7.75 GPU memory occupied: 8681 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 66.03 Samples/second: 7.75 GPU memory occupied: 8681 MB.`}}),qs=new _({}),Hs=new b({props:{code:`training_args = TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, gradient_checkpointing=True, **default_args ) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`,highlighted:`training_args = TrainingArguments( per_device_train_batch_size=<span class="hljs-number">1</span>, gradient_accumulation_steps=<span class="hljs-number">4</span>, gradient_checkpointing=<span class="hljs-literal">True</span>, **default_args ) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`}}),Fs=new b({props:{code:`Time: 85.47 Samples/second: 5.99 GPU memory occupied: 6775 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 85.47 Samples/second: 5.99 GPU memory occupied: 6775 MB.`}}),Ws=new _({}),Rs=new b({props:{code:`training_args = TrainingArguments(per_device_train_batch_size=4, fp16=True, **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`,highlighted:`training_args = TrainingArguments(per_device_train_batch_size=<span class="hljs-number">4</span>, fp16=<span class="hljs-literal">True</span>, **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`}}),Xs=new b({props:{code:`Time: 27.46 Samples/second: 18.64 GPU memory occupied: 13939 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 27.46 Samples/second: 18.64 GPU memory occupied: 13939 MB.`}}),Ys=new b({props:{code:`training_args = TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, gradient_checkpointing=True, fp16=True, **default_args, ) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`,highlighted:`training_args = TrainingArguments( per_device_train_batch_size=<span class="hljs-number">1</span>, gradient_accumulation_steps=<span class="hljs-number">4</span>, gradient_checkpointing=<span class="hljs-literal">True</span>, fp16=<span class="hljs-literal">True</span>, **default_args, ) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`}}),Qs=new b({props:{code:`Time: 50.76 Samples/second: 10.09 GPU memory occupied: 7275 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 50.76 Samples/second: 10.09 GPU memory occupied: 7275 MB.`}}),Js=new _({}),Zs=new _({}),Ks=new b({props:{code:`training_args = TrainingArguments(per_device_train_batch_size=4, optim="adafactor", **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`,highlighted:`training_args = TrainingArguments(per_device_train_batch_size=<span class="hljs-number">4</span>, optim=<span class="hljs-string">&quot;adafactor&quot;</span>, **default_args) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`}}),er=new b({props:{code:`Time: 64.31 Samples/second: 7.96 GPU memory occupied: 12295 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 64.31 Samples/second: 7.96 GPU memory occupied: 12295 MB.`}}),tr=new b({props:{code:`training_args = TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, gradient_checkpointing=True, fp16=True, optim="adafactor", **default_args, ) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`,highlighted:`training_args = TrainingArguments( per_device_train_batch_size=<span class="hljs-number">1</span>, gradient_accumulation_steps=<span class="hljs-number">4</span>, gradient_checkpointing=<span class="hljs-literal">True</span>, fp16=<span class="hljs-literal">True</span>, optim=<span class="hljs-string">&quot;adafactor&quot;</span>, **default_args, ) trainer = Trainer(model=model, args=training_args, train_dataset=ds) result = trainer.train() print_summary(result)`}}),ar=new b({props:{code:`Time: 56.54 Samples/second: 9.06 GPU memory occupied: 4847 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 56.54 Samples/second: 9.06 GPU memory occupied: 4847 MB.`}}),sr=new _({}),Mt=new Xz({props:{$$slots:{default:[tN]},$$scope:{ctx:ys}}}),ir=new b({props:{code:`import bitsandbytes as bnb from torch import nn from transformers.trainer_pt_utils import get_parameter_names training_args = TrainingArguments(per_device_train_batch_size=4, **default_args) decay_parameters = get_parameter_names(model, [nn.LayerNorm]) decay_parameters = [name for name in decay_parameters if "bias" not in name] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if n in decay_parameters], "weight_decay": training_args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if n not in decay_parameters], "weight_decay": 0.0, }, ] optimizer_kwargs = { "betas": (training_args.adam_beta1, training_args.adam_beta2), "eps": training_args.adam_epsilon, } optimizer_kwargs["lr"] = training_args.learning_rate adam_bnb_optim = bnb.optim.Adam8bit( optimizer_grouped_parameters, betas=(training_args.adam_beta1, training_args.adam_beta2), eps=training_args.adam_epsilon, lr=training_args.learning_rate, )`,highlighted:`<span class="hljs-keyword">import</span> bitsandbytes <span class="hljs-keyword">as</span> bnb <span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-keyword">from</span> transformers.trainer_pt_utils <span class="hljs-keyword">import</span> get_parameter_names training_args = TrainingArguments(per_device_train_batch_size=<span class="hljs-number">4</span>, **default_args) decay_parameters = get_parameter_names(model, [nn.LayerNorm]) decay_parameters = [name <span class="hljs-keyword">for</span> name <span class="hljs-keyword">in</span> decay_parameters <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;bias&quot;</span> <span class="hljs-keyword">not</span> <span class="hljs-keyword">in</span> name] optimizer_grouped_parameters = [ { <span class="hljs-string">&quot;params&quot;</span>: [p <span class="hljs-keyword">for</span> n, p <span class="hljs-keyword">in</span> model.named_parameters() <span class="hljs-keyword">if</span> n <span class="hljs-keyword">in</span> decay_parameters], <span class="hljs-string">&quot;weight_decay&quot;</span>: training_args.weight_decay, }, { <span class="hljs-string">&quot;params&quot;</span>: [p <span class="hljs-keyword">for</span> n, p <span class="hljs-keyword">in</span> model.named_parameters() <span class="hljs-keyword">if</span> n <span class="hljs-keyword">not</span> <span class="hljs-keyword">in</span> decay_parameters], <span class="hljs-string">&quot;weight_decay&quot;</span>: <span class="hljs-number">0.0</span>, }, ] optimizer_kwargs = { <span class="hljs-string">&quot;betas&quot;</span>: (training_args.adam_beta1, training_args.adam_beta2), <span class="hljs-string">&quot;eps&quot;</span>: training_args.adam_epsilon, } optimizer_kwargs[<span class="hljs-string">&quot;lr&quot;</span>] = training_args.learning_rate adam_bnb_optim = bnb.optim.Adam8bit( optimizer_grouped_parameters, betas=(training_args.adam_beta1, training_args.adam_beta2), eps=training_args.adam_epsilon, lr=training_args.learning_rate, )`}}),or=new b({props:{code:`trainer = Trainer(model=model, args=training_args, train_dataset=ds, optimizers=(adam_bnb_optim, None)) result = trainer.train() print_summary(result)`,highlighted:`trainer = Trainer(model=model, args=training_args, train_dataset=ds, optimizers=(adam_bnb_optim, <span class="hljs-literal">None</span>)) result = trainer.train() print_summary(result)`}}),lr=new b({props:{code:`Time: 55.95 Samples/second: 9.15 GPU memory occupied: 13085 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 55.95 Samples/second: 9.15 GPU memory occupied: 13085 MB.`}}),nr=new b({props:{code:`training_args = TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, gradient_checkpointing=True, fp16=True, **default_args, ) trainer = Trainer(model=model, args=training_args, train_dataset=ds, optimizers=(adam_bnb_optim, None)) result = trainer.train() print_summary(result)`,highlighted:`training_args = TrainingArguments( per_device_train_batch_size=<span class="hljs-number">1</span>, gradient_accumulation_steps=<span class="hljs-number">4</span>, gradient_checkpointing=<span class="hljs-literal">True</span>, fp16=<span class="hljs-literal">True</span>, **default_args, ) trainer = Trainer(model=model, args=training_args, train_dataset=ds, optimizers=(adam_bnb_optim, <span class="hljs-literal">None</span>)) result = trainer.train() print_summary(result)`}}),pr=new b({props:{code:`Time: 49.46 Samples/second: 10.35 GPU memory occupied: 5363 MB.`,highlighted:`<span class="hljs-keyword">Time:</span> 49.46 Samples/second: 10.35 GPU memory occupied: 5363 MB.`}}),hr=new _({}),fr=new b({props:{code:`training_args = TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, gradient_checkpointing=True, fp16=True, **default_args, )`,highlighted:`training_args = TrainingArguments( per_device_train_batch_size=<span class="hljs-number">1</span>, gradient_accumulation_steps=<span class="hljs-number">4</span>, gradient_checkpointing=<span class="hljs-literal">True</span>, fp16=<span class="hljs-literal">True</span>, **default_args, )`}}),cr=new b({props:{code:`from accelerate import Accelerator from torch.utils.data.dataloader import DataLoader dataloader = DataLoader(ds, batch_size=training_args.per_device_train_batch_size) if training_args.gradient_checkpointing: model.gradient_checkpointing_enable() accelerator = Accelerator(fp16=training_args.fp16) model, optimizer, dataloader = accelerator.prepare(model, adam_bnb_optim, dataloader) model.train() for step, batch in enumerate(dataloader, start=1): loss = model(**batch).loss loss = loss / training_args.gradient_accumulation_steps accelerator.backward(loss) if step % training_args.gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad()`,highlighted:`<span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> Accelerator <span class="hljs-keyword">from</span> torch.utils.data.dataloader <span class="hljs-keyword">import</span> DataLoader dataloader = DataLoader(ds, batch_size=training_args.per_device_train_batch_size) <span class="hljs-keyword">if</span> training_args.gradient_checkpointing: model.gradient_checkpointing_enable() accelerator = Accelerator(fp16=training_args.fp16) model, optimizer, dataloader = accelerator.prepare(model, adam_bnb_optim, dataloader) model.train() <span class="hljs-keyword">for</span> step, batch <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(dataloader, start=<span class="hljs-number">1</span>): loss = model(**batch).loss loss = loss / training_args.gradient_accumulation_steps accelerator.backward(loss) <span class="hljs-keyword">if</span> step % training_args.gradient_accumulation_steps == <span class="hljs-number">0</span>: optimizer.step() optimizer.zero_grad()`}}),vr=new b({props:{code:"print_gpu_utilization()",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>print_gpu_utilization() GPU memory occupied: <span class="hljs-number">5363</span> MB.`}}),wr=new _({}),gr=new _({}),_r=new b({props:{code:"accelerate config",highlighted:"accelerate config"}}),br=new _({}),yr=new _({}),Er=new _({}),$r=new _({}),Ar=new _({}),Tr=new _({}),Dr=new _({}),Gr=new b({props:{code:"nvidia-smi topo -m",highlighted:'<span class="hljs-symbol">nvidia</span>-<span class="hljs-keyword">smi</span> topo -m'}}),Ir=new b({props:{code:` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A`,highlighted:` <span class="hljs-attribute">GPU0</span> GPU1 CPU Affinity NUMA Affinity <span class="hljs-attribute">GPU0</span> X NV2 <span class="hljs-number">0</span>-<span class="hljs-number">23</span> N/A <span class="hljs-attribute">GPU1</span> NV2 X <span class="hljs-number">0</span>-<span class="hljs-number">23</span> N/A`}}),Ur=new b({props:{code:` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A`,highlighted:` <span class="hljs-attribute">GPU0</span> GPU1 CPU Affinity NUMA Affinity <span class="hljs-attribute">GPU0</span> X PHB <span class="hljs-number">0</span>-<span class="hljs-number">11</span> N/A <span class="hljs-attribute">GPU1</span> PHB X <span class="hljs-number">0</span>-<span class="hljs-number">11</span> N/A`}}),Sr=new b({props:{code:` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks`,highlighted:` X = Self SYS = Connection traversing PCIe <span class="hljs-keyword">as</span> well <span class="hljs-keyword">as</span> <span class="hljs-keyword">the</span> SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe <span class="hljs-keyword">as</span> well <span class="hljs-keyword">as</span> <span class="hljs-keyword">the</span> interconnect between PCIe Host Bridges <span class="hljs-keyword">within</span> <span class="hljs-keyword">a</span> NUMA node PHB = Connection traversing PCIe <span class="hljs-keyword">as</span> well <span class="hljs-keyword">as</span> <span class="hljs-keyword">a</span> PCIe Host Bridge (typically <span class="hljs-keyword">the</span> CPU) PXB = Connection traversing multiple PCIe bridges (<span class="hljs-keyword">without</span> traversing <span class="hljs-keyword">the</span> PCIe Host Bridge) PIX = Connection traversing <span class="hljs-keyword">at</span> most <span class="hljs-keyword">a</span> single PCIe bridge NV<span class="hljs-comment"># = Connection traversing a bonded set of # NVLinks</span>`}}),zr=new _({}),Vr=new b({props:{code:`# DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch \\ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \\ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \\ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 python -m torch.distributed.launch \\ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path gpt2 \\ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69}`,highlighted:`<span class="hljs-comment"># DDP w/ NVLink</span> <span class="hljs-string">rm</span> -<span class="hljs-string">r</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span>; <span class="hljs-string">CUDA_VISIBLE_DEVICES</span>=<span class="hljs-string">0</span>,<span class="hljs-string">1</span> <span class="hljs-string">python</span> -<span class="hljs-string">m</span> <span class="hljs-string">torch</span>.<span class="hljs-string">distributed</span>.<span class="hljs-string">launch</span> \\ <span class="hljs-built_in">--nproc_per_node</span> <span class="hljs-string">2</span> <span class="hljs-string">examples</span>/<span class="hljs-string">pytorch</span>/<span class="hljs-string">language-modeling</span>/<span class="hljs-string">run_clm</span>.<span class="hljs-string">py</span> <span class="hljs-built_in">--model_name_or_path</span> <span class="hljs-string">gpt2</span> \\ <span class="hljs-built_in">--dataset_name</span> <span class="hljs-string">wikitext</span> <span class="hljs-built_in">--dataset_config_name</span> <span class="hljs-string">wikitext-2-raw-v1</span> <span class="hljs-built_in">--do_train</span> \\ <span class="hljs-built_in">--output_dir</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span> <span class="hljs-built_in">--per_device_train_batch_size</span> <span class="hljs-string">4</span> <span class="hljs-built_in">--max_steps</span> <span class="hljs-string">200</span> {<span class="hljs-string">&#x27;train_runtime&#x27;</span>: <span class="hljs-string">101</span>.<span class="hljs-string">9003</span>, <span class="hljs-string">&#x27;train_samples_per_second&#x27;</span>: <span class="hljs-string">1</span>.<span class="hljs-string">963</span>, <span class="hljs-string">&#x27;epoch&#x27;</span>: <span class="hljs-string">0</span>.<span class="hljs-string">69</span>} <span class="hljs-comment"># DDP w/o NVLink</span> <span class="hljs-string">rm</span> -<span class="hljs-string">r</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span>; <span class="hljs-string">CUDA_VISIBLE_DEVICES</span>=<span class="hljs-string">0</span>,<span class="hljs-string">1</span> <span class="hljs-string">NCCL_P2P_DISABLE</span>=<span class="hljs-string">1</span> <span class="hljs-string">python</span> -<span class="hljs-string">m</span> <span class="hljs-string">torch</span>.<span class="hljs-string">distributed</span>.<span class="hljs-string">launch</span> \\ <span class="hljs-built_in">--nproc_per_node</span> <span class="hljs-string">2</span> <span class="hljs-string">examples</span>/<span class="hljs-string">pytorch</span>/<span class="hljs-string">language-modeling</span>/<span class="hljs-string">run_clm</span>.<span class="hljs-string">py</span> <span class="hljs-built_in">--model_name_or_path</span> <span class="hljs-string">gpt2</span> \\ <span class="hljs-built_in">--dataset_name</span> <span class="hljs-string">wikitext</span> <span class="hljs-built_in">--dataset_config_name</span> <span class="hljs-string">wikitext-2-raw-v1</span> <span class="hljs-built_in">--do_train</span> <span class="hljs-built_in">--output_dir</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span> <span class="hljs-built_in">--per_device_train_batch_size</span> <span class="hljs-string">4</span> <span class="hljs-built_in">--max_steps</span> <span class="hljs-string">200</span> {<span class="hljs-string">&#x27;train_runtime&#x27;</span>: <span class="hljs-string">131</span>.<span class="hljs-string">4367</span>, <span class="hljs-string">&#x27;train_samples_per_second&#x27;</span>: <span class="hljs-string">1</span>.<span class="hljs-string">522</span>, <span class="hljs-string">&#x27;epoch&#x27;</span>: <span class="hljs-string">0</span>.<span class="hljs-string">69</span>}`}}),Hr=new _({}),Fr=new _({}),Rr=new _({}),ai=new _({}),si=new _({}),ri=new _({}),oi=new _({}),li=new _({}),ni=new _({}),pi=new _({}),hi=new _({}),fi=new _({}),vi=new _({}),gi=new b({props:{code:`export BS=16 python -m torch.distributed.launch \\ --nproc_per_node 2 examples/pytorch/text-classification/run_glue.py \\ --model_name_or_path bert-base-cased \\ --task_name mrpc \\ --do_train \\ --do_eval \\ --max_seq_length 128 \\ --per_device_train_batch_size $BS \\ --learning_rate 2e-5 \\ --num_train_epochs 3.0 \\ --output_dir /tmp/mrpc \\ --overwrite_output_dir \\ --fp16`,highlighted:`export BS=<span class="hljs-number">16</span> python -m torch<span class="hljs-selector-class">.distributed</span><span class="hljs-selector-class">.launch</span> \\ <span class="hljs-attr">--nproc_per_node</span> <span class="hljs-number">2</span> examples/pytorch/text-classification/run_glue<span class="hljs-selector-class">.py</span> \\ <span class="hljs-attr">--model_name_or_path</span> bert-base-cased \\ <span class="hljs-attr">--task_name</span> mrpc \\ <span class="hljs-attr">--do_train</span> \\ <span class="hljs-attr">--do_eval</span> \\ <span class="hljs-attr">--max_seq_length</span> <span class="hljs-number">128</span> \\ <span class="hljs-attr">--per_device_train_batch_size</span> <span class="hljs-variable">$BS</span> \\ <span class="hljs-attr">--learning_rate</span> <span class="hljs-number">2</span>e-<span class="hljs-number">5</span> \\ <span class="hljs-attr">--num_train_epochs</span> <span class="hljs-number">3.0</span> \\ <span class="hljs-attr">--output_dir</span> /tmp/mrpc \\ <span class="hljs-attr">--overwrite_output_dir</span> \\ <span class="hljs-attr">--fp16</span>`}}),$i=new _({}),Ai=new _({}),Ti=new _({}),ji=new b({props:{code:`from torch.cuda.amp import autocast with autocast(dtype=torch.bfloat16): loss, outputs = ...`,highlighted:`<span class="hljs-keyword">from</span> torch.cuda.amp <span class="hljs-keyword">import</span> <span class="hljs-built_in">auto</span><span class="hljs-keyword">cast</span> with <span class="hljs-built_in">auto</span><span class="hljs-keyword">cast</span>(dtype=torch.bfloat16): loss, outputs = ...`}}),Di=new b({props:{code:`python -c 'import transformers; print(f"BF16 support is {transformers.file_utils.is_torch_bf16_available()}")'`,highlighted:'python -c &#x27;<span class="hljs-keyword">import</span> transformers; <span class="hljs-keyword">print</span>(f<span class="hljs-string">&quot;BF16 support is {transformers.file_utils.is_torch_bf16_available()}&quot;</span>)&#x27;'}}),Ui=new _({}),Si=new _({}),zi=new b({props:{code:`import torch torch.backends.cuda.matmul.allow_tf32 = True`,highlighted:`import torch torch<span class="hljs-selector-class">.backends</span><span class="hljs-selector-class">.cuda</span><span class="hljs-selector-class">.matmul</span><span class="hljs-selector-class">.allow_tf32</span> = True`}}),Mi=new _({}),qi=new _({}),Hi=new b({props:{code:"model.gradient_checkpointing_enable()",highlighted:"model.gradient_checkpointing_enable()"}}),Fi=new _({}),Ki=new _({}),ho=new b({props:{code:` # DP rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \\ python examples/pytorch/language-modeling/run_clm.py \\ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \\ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 110.5948, 'train_samples_per_second': 1.808, 'epoch': 0.69} # DDP w/ NVlink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \\ python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \\ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \\ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVlink rm -r /tmp/test-clm; NCCL_P2P_DISABLE=1 CUDA_VISIBLE_DEVICES=0,1 \\ python -m torch.distributed.launch --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \\ --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \\ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69}`,highlighted:` <span class="hljs-comment"># DP</span> <span class="hljs-string">rm</span> -<span class="hljs-string">r</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span>; <span class="hljs-string">CUDA_VISIBLE_DEVICES</span>=<span class="hljs-string">0</span>,<span class="hljs-string">1</span> \\ <span class="hljs-string">python</span> <span class="hljs-string">examples</span>/<span class="hljs-string">pytorch</span>/<span class="hljs-string">language-modeling</span>/<span class="hljs-string">run_clm</span>.<span class="hljs-string">py</span> \\ <span class="hljs-built_in">--model_name_or_path</span> <span class="hljs-string">gpt2</span> <span class="hljs-built_in">--dataset_name</span> <span class="hljs-string">wikitext</span> <span class="hljs-built_in">--dataset_config_name</span> <span class="hljs-string">wikitext-2-raw-v1</span> \\ <span class="hljs-built_in">--do_train</span> <span class="hljs-built_in">--output_dir</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span> <span class="hljs-built_in">--per_device_train_batch_size</span> <span class="hljs-string">4</span> <span class="hljs-built_in">--max_steps</span> <span class="hljs-string">200</span> {<span class="hljs-string">&#x27;train_runtime&#x27;</span>: <span class="hljs-string">110</span>.<span class="hljs-string">5948</span>, <span class="hljs-string">&#x27;train_samples_per_second&#x27;</span>: <span class="hljs-string">1</span>.<span class="hljs-string">808</span>, <span class="hljs-string">&#x27;epoch&#x27;</span>: <span class="hljs-string">0</span>.<span class="hljs-string">69</span>} <span class="hljs-comment"># DDP w/ NVlink</span> <span class="hljs-string">rm</span> -<span class="hljs-string">r</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span>; <span class="hljs-string">CUDA_VISIBLE_DEVICES</span>=<span class="hljs-string">0</span>,<span class="hljs-string">1</span> \\ <span class="hljs-string">python</span> -<span class="hljs-string">m</span> <span class="hljs-string">torch</span>.<span class="hljs-string">distributed</span>.<span class="hljs-string">launch</span> <span class="hljs-built_in">--nproc_per_node</span> <span class="hljs-string">2</span> <span class="hljs-string">examples</span>/<span class="hljs-string">pytorch</span>/<span class="hljs-string">language-modeling</span>/<span class="hljs-string">run_clm</span>.<span class="hljs-string">py</span> \\ <span class="hljs-built_in">--model_name_or_path</span> <span class="hljs-string">gpt2</span> <span class="hljs-built_in">--dataset_name</span> <span class="hljs-string">wikitext</span> <span class="hljs-built_in">--dataset_config_name</span> <span class="hljs-string">wikitext-2-raw-v1</span> \\ <span class="hljs-built_in">--do_train</span> <span class="hljs-built_in">--output_dir</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span> <span class="hljs-built_in">--per_device_train_batch_size</span> <span class="hljs-string">4</span> <span class="hljs-built_in">--max_steps</span> <span class="hljs-string">200</span> {<span class="hljs-string">&#x27;train_runtime&#x27;</span>: <span class="hljs-string">101</span>.<span class="hljs-string">9003</span>, <span class="hljs-string">&#x27;train_samples_per_second&#x27;</span>: <span class="hljs-string">1</span>.<span class="hljs-string">963</span>, <span class="hljs-string">&#x27;epoch&#x27;</span>: <span class="hljs-string">0</span>.<span class="hljs-string">69</span>} <span class="hljs-comment"># DDP w/o NVlink</span> <span class="hljs-string">rm</span> -<span class="hljs-string">r</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span>; <span class="hljs-string">NCCL_P2P_DISABLE</span>=<span class="hljs-string">1</span> <span class="hljs-string">CUDA_VISIBLE_DEVICES</span>=<span class="hljs-string">0</span>,<span class="hljs-string">1</span> \\ <span class="hljs-string">python</span> -<span class="hljs-string">m</span> <span class="hljs-string">torch</span>.<span class="hljs-string">distributed</span>.<span class="hljs-string">launch</span> <span class="hljs-built_in">--nproc_per_node</span> <span class="hljs-string">2</span> <span class="hljs-string">examples</span>/<span class="hljs-string">pytorch</span>/<span class="hljs-string">language-modeling</span>/<span class="hljs-string">run_clm</span>.<span class="hljs-string">py</span> \\ <span class="hljs-built_in">--model_name_or_path</span> <span class="hljs-string">gpt2</span> <span class="hljs-built_in">--dataset_name</span> <span class="hljs-string">wikitext</span> <span class="hljs-built_in">--dataset_config_name</span> <span class="hljs-string">wikitext-2-raw-v1</span> \\ <span class="hljs-built_in">--do_train</span> <span class="hljs-built_in">--output_dir</span> /<span class="hljs-string">tmp</span>/<span class="hljs-string">test-clm</span> <span class="hljs-built_in">--per_device_train_batch_size</span> <span class="hljs-string">4</span> <span class="hljs-built_in">--max_steps</span> <span class="hljs-string">200</span> {<span class="hljs-string">&#x27;train_runtime&#x27;</span>: <span class="hljs-string">131</span>.<span class="hljs-string">4367</span>, <span class="hljs-string">&#x27;train_samples_per_second&#x27;</span>: <span class="hljs-string">1</span>.<span class="hljs-string">522</span>, <span class="hljs-string">&#x27;epoch&#x27;</span>: <span class="hljs-string">0</span>.<span class="hljs-string">69</span>}`}}),fo=new _({}),co=new _({}),uo=new _({}),vo=new _({}),Ao=new _({}),Do=new _({}),{c(){y=s("meta"),k=h(),E=s("h1"),A=s("a"),Th=s("span"),m(Es.$$.fragment),zy=h(),jh=s("span"),Ny=l("Performance and Scalability: How To Fit a Bigger Model and Train It Faster"),Zd=h(),Io=s("blockquote"),xh=s("p"),Dh=s("em"),Cy=l("Or how to escape the dreaded \u201CRuntimeError: CUDA error: out of memory\u201D error."),Kd=h(),m(Ps.$$.fragment),eu=h(),te=s("p"),Ly=l("Training ever larger models can become challenging even on modern GPUs. Due to their immense size we often run out of GPU memory and training can take very long. In this section we have a look at a few tricks to reduce the memory footprint and speed up training for large models and how they are integrated in the "),Uo=s("a"),My=l("Trainer"),Oy=l(" and "),$s=s("a"),By=l("\u{1F917} Accelerate"),qy=l(". Before we start make sure you have installed the following libraries:"),tu=h(),m(ks.$$.fragment),au=h(),ae=s("p"),Vy=l("The "),Gh=s("code"),Hy=l("nvidia-ml-py3"),Fy=l(" library allows us to monitor the memory usage of the models from within Python. You might be familiar with the "),Ih=s("code"),Wy=l("nvidia-smi"),Ry=l(" command in the terminal - this library allows to access the same information in Python directly."),su=h(),Pt=s("p"),Xy=l("Then we create some dummy data. We create random token IDs between 100 and 30000 and binary labels for a classifier. In total we get 512 sequences each with length 512 and store them in a "),As=s("a"),Uh=s("code"),Yy=l("Dataset"),Qy=l(" with PyTorch format."),ru=h(),m(Ts.$$.fragment),iu=h(),$t=s("p"),Jy=l("We want to print some summary statistics for the GPU utilization and the training run with the "),So=s("a"),Zy=l("Trainer"),Ky=l(". We setup a two helper functions to do just that:"),ou=h(),m(js.$$.fragment),lu=h(),zo=s("p"),e2=l("Let\u2019s verify that we start with a free GPU memory:"),nu=h(),m(xs.$$.fragment),pu=h(),No=s("p"),t2=l("That looks good: the GPU memory is not occupied as we would expect before we load any models. If that\u2019s not the case on your machine make sure to stop all processes that are using GPU memory. However, not all free GPU memory can be used by the user. When a model is loaded to the GPU also the kernels are loaded which can take up 1-2GB of memory. To see how much it is we load a tiny tensor into the GPU which triggers the kernels to be loaded as well."),hu=h(),m(Ds.$$.fragment),fu=h(),Co=s("p"),a2=l("We see that the kernels alone take up 1.3GB of GPU memory. Now let\u2019s see how much space the model uses."),cu=h(),Pe=s("h2"),kt=s("a"),Sh=s("span"),m(Gs.$$.fragment),s2=h(),zh=s("span"),r2=l("Load Model"),mu=h(),At=s("p"),i2=l("First, we load the "),Nh=s("code"),o2=l("bert-large-uncased"),l2=l(" model. We load the model weights directly to the GPU so that we can check how much space just weights use."),du=h(),m(Is.$$.fragment),uu=h(),Tt=s("p"),n2=l("We can see that the model weights alone take up 1.3 GB of the GPU memory. The exact number depends on the specific GPU you are using. Note that on newer GPUs a model can sometimes take up more space since the weights are loaded in an optimized fashion that speeds up the usage of the model. Now we can also quickly check if we get the same result as with "),Ch=s("code"),p2=l("nvidia-smi"),h2=l(" CLI:"),vu=h(),m(Us.$$.fragment),wu=h(),m(Ss.$$.fragment),gu=h(),Lo=s("p"),f2=l("We get the same number as before and you can also see that we are using a V100 GPU with 16GB of memory. So now we can start training the model and see how the GPU memory consumption changes. First, we set up a few standard training arguments that we will use across all our experiments:"),_u=h(),m(zs.$$.fragment),bu=h(),m(jt.$$.fragment),yu=h(),$e=s("h2"),xt=s("a"),Lh=s("span"),m(Ns.$$.fragment),c2=h(),Mh=s("span"),m2=l("Vanilla Training"),Eu=h(),Dt=s("p"),d2=l("As a first experiment we will use the "),Mo=s("a"),u2=l("Trainer"),v2=l(" and train the model without any further modifications and a batch size of 4:"),Pu=h(),m(Cs.$$.fragment),$u=h(),m(Ls.$$.fragment),ku=h(),Oo=s("p"),w2=l("We see that already a relatively small batch size almost fills up our GPU\u2019s entire memory. However, a larger batch size can often result in faster model convergence or better end performance. So ideally we want to tune the batch size to our model\u2019s needs and not to the GPU limitations. A simple trick to effectively train larger batch size is gradient accumulation."),Au=h(),ke=s("h2"),Gt=s("a"),Oh=s("span"),m(Ms.$$.fragment),g2=h(),Bh=s("span"),_2=l("Gradient Accumulation"),Tu=h(),Bo=s("p"),b2=l("The idea behind gradient accumulation is to instead of calculating the gradients for the whole batch at once to do it in smaller steps. The way we do that is to calculate the gradients iteratively in smaller batches by doing a forward and backward pass through the model and accumulating the gradients in the process. When enough gradients are accumulated we run the model\u2019s optimization step. This way we can easily increase the overall batch size to numbers that would never fit into the GPU\u2019s memory. In turn, however, the added forward and backward passes can slow down the training a bit."),ju=h(),N=s("p"),y2=l("We can use gradient accumulation in the "),qo=s("a"),E2=l("Trainer"),P2=l(" by simply adding the "),qh=s("code"),$2=l("gradient_accumulation_steps"),k2=l(" argument to "),Vo=s("a"),A2=l("TrainingArguments"),T2=l(". Let\u2019s see how it impacts the models memory footprint:"),xu=h(),m(Os.$$.fragment),Du=h(),m(Bs.$$.fragment),Gu=h(),G=s("p"),j2=l("We can see that the memory footprint was dramatically reduced at the cost of being only slightly slower than the vanilla run. Of course, this would change as you increase the number of accumulation steps. In general you would want to max out the GPU usage as much as possible. So in our case, the batch_size of 4 was already pretty close to the GPU\u2019s limit. If we wanted to train with a batch size of 64 we should not use "),Vh=s("code"),x2=l("per_device_train_batch_size=1"),D2=l(" and "),Hh=s("code"),G2=l("gradient_accumulation_steps=64"),I2=l(" but instead "),Fh=s("code"),U2=l("per_device_train_batch_size=4"),S2=l(" and "),Wh=s("code"),z2=l("gradient_accumulation_steps=16"),N2=l(" which has the same effective batch size while making better use of the available GPU resources."),Iu=h(),Ho=s("p"),C2=l("Next we have a look at another trick to save a little bit more GPU memory called gradient checkpointing."),Uu=h(),Ae=s("h2"),It=s("a"),Rh=s("span"),m(qs.$$.fragment),L2=h(),Xh=s("span"),M2=l("Gradient Checkpointing"),Su=h(),Fo=s("p"),O2=l("Even when we set the batch size to 1 and use gradient accumulation we can still run out of memory when working with large models. In order to compute the gradients during the backward pass all activations from the forward pass are normally saved. This can create a big memory overhead. Alternatively, one could forget all activations during the forward pass and recompute them on demand during the backward pass. This would however add a significant computational overhead and slow down training."),zu=h(),Ut=s("p"),B2=l("Gradient checkpointing strikes a compromise between the two approaches and saves strategically selected activations throughout the computational graph so only a fraction of the activations need to be re-computed for the gradients. See "),Vs=s("a"),q2=l("this great article"),V2=l(" explaining the ideas behind gradient checkpointing."),Nu=h(),se=s("p"),H2=l("To enable gradient checkpointing in the "),Wo=s("a"),F2=l("Trainer"),W2=l(" we only need ot pass it as a flag to the "),Ro=s("a"),R2=l("TrainingArguments"),X2=l(". Everything else is handled under the hood:"),Cu=h(),m(Hs.$$.fragment),Lu=h(),m(Fs.$$.fragment),Mu=h(),Xo=s("p"),Y2=l("We can see that this saved some more memory but at the same time training became a bit slower. A general rule of thumb is that gradient checkpointing slows down training by about 20%. Let\u2019s have a look at another method with which we can regain some speed: mixed precision training."),Ou=h(),Te=s("h2"),St=s("a"),Yh=s("span"),m(Ws.$$.fragment),Q2=h(),Qh=s("span"),J2=l("FP16 Training"),Bu=h(),re=s("p"),Z2=l("The idea of mixed precision training is that no all variables need to be stored in full (32-bit) floating point precision. If we can reduce the precision the variales and their computations are faster. The main advantage comes from saving the activations in half (16-bit) precision. Although the gradients are also computed in half precision they are converted back to full precision for the optimization step so no memory is saved here. Since the model is present on the GPU in both 16-bit and 32-bit precision this can use more GPU memory (1.5x the original model is on the GPU), especially for small batch sizes. Since some computations are performed in full and some in half precision this approach is also called mixed precision training. Enabling mixed precision training is also just a matter of setting the "),Jh=s("code"),K2=l("fp16"),e0=l(" flag to "),Zh=s("code"),t0=l("True"),a0=l(":"),qu=h(),m(Rs.$$.fragment),Vu=h(),m(Xs.$$.fragment),Hu=h(),Yo=s("p"),s0=l("We can see that this is almost twice as fast as the vanilla training. Let\u2019s add it to the mix of the previous methods:"),Fu=h(),m(Ys.$$.fragment),Wu=h(),m(Qs.$$.fragment),Ru=h(),Qo=s("p"),r0=l("We can see that with these tweaks we use about half the GPU memory as at the beginning while also being slightly faster. But we are not done, yet! There is another area where we can save GPU memory: the optimizer."),Xu=h(),je=s("h2"),zt=s("a"),Kh=s("span"),m(Js.$$.fragment),i0=h(),ef=s("span"),o0=l("Optimizer"),Yu=h(),Jo=s("p"),l0=l("The most common optimizer used to train transformer model is Adam or AdamW (Adam with weight decay). Adam achieves good convergence by storing the rolling average of the previous gradients which, however, adds an additional memory footprint of the order of the number of model parameters. One remedy to this is to use an alternative optimizer such as Adafactor."),Qu=h(),xe=s("h3"),Nt=s("a"),tf=s("span"),m(Zs.$$.fragment),n0=h(),af=s("span"),p0=l("Adafactor"),Ju=h(),Ct=s("p"),h0=l("Instead of keeping the rolling average for each element in the weight matrices Adafactor only stores aggregated information (row- and column-wise sums of the rolling averages) which reduces the footprint considerably. One downside of Adafactor is that in some instances convergence can be slower than Adam\u2019s so some experimentation is advised here. We can use Adafactor simply by setting "),sf=s("code"),f0=l('optim="adafactor"'),c0=l(":"),Zu=h(),m(Ks.$$.fragment),Ku=h(),m(er.$$.fragment),ev=h(),Zo=s("p"),m0=l("We can see that this saves a few more GB on the GPU. Let\u2019s see how it looks when we add it to the other methods we introduced earlier:"),tv=h(),m(tr.$$.fragment),av=h(),m(ar.$$.fragment),sv=h(),Ko=s("p"),d0=l("We went from 15 GB memory usage to 5 GB - a 3x improvement while maintaining the throughput! However, as mentioned before, the convergence of Adafactor can be worse than Adam. There is an alternative to Adafactor called 8-bit Adam that takes a slightly different approach."),rv=h(),De=s("h3"),Lt=s("a"),rf=s("span"),m(sr.$$.fragment),u0=h(),of=s("span"),v0=l("8-bit Adam"),iv=h(),el=s("p"),w0=l("Instead of aggregating optimizer states like Adafactor, 8-bit Adam keeps the full state and quantizes it. Quantization means that it stores the state with lower precision and dequantizes it only for the optimization. This is similar to the idea behind FP16 training where using variables with lower precision saves memory."),ov=h(),I=s("p"),g0=l("In contrast to the previous approaches is this one not integrated into the "),tl=s("a"),_0=l("Trainer"),b0=l(" as a simple flag. We need to install the 8-bit optimizer and then pass it as a custom optimizer to the "),al=s("a"),y0=l("Trainer"),E0=l(". Follow the installation guide in the Github "),rr=s("a"),P0=l("repo"),$0=l(" to install the "),lf=s("code"),k0=l("bitsandbytes"),A0=l(" library that implements the 8-bit Adam optimizer."),lv=h(),sl=s("p"),T0=l("Once installed, we just need to initialize the the optimizer. Although this looks like a considerable amount of work it actually just involves two steps: first we need to group the model\u2019s parameters into two groups where to one group we apply weight decay and to the other we don\u2019t. Usually, biases and layer norm parameters are not weight decayed. Then in a second step we just do some argument housekeeping to use the same parameters as the previously used AdamW optimizer."),nv=h(),m(Mt.$$.fragment),pv=h(),m(ir.$$.fragment),hv=h(),Ot=s("p"),j0=l("We can now pass the custom optimizer as an argument to the "),nf=s("code"),x0=l("Trainer"),D0=l(":"),fv=h(),m(or.$$.fragment),cv=h(),m(lr.$$.fragment),mv=h(),rl=s("p"),G0=l("We can see that we get a similar memory improvement as with Adafactor while keeping the full rolling average of the gradients. Let\u2019s repeat the experiment with the full settings:"),dv=h(),m(nr.$$.fragment),uv=h(),m(pr.$$.fragment),vv=h(),il=s("p"),I0=l("Again, we get about a 3x memory improvement and even slightly higher throughput as using Adafactor. So we have seen how we can optimize the memory footprint of large models. The following plot summarizes all our experiments:"),wv=h(),ol=s("p"),ll=s("img"),gv=h(),Ge=s("h2"),Bt=s("a"),pf=s("span"),m(hr.$$.fragment),U0=h(),hf=s("span"),S0=l("Using \u{1F917} Accelerate"),_v=h(),ie=s("p"),z0=l("So far we have used the "),nl=s("a"),N0=l("Trainer"),C0=l(" to run the experiments but a more flexible alternative to that approach is to use \u{1F917} Accelerate. With \u{1F917} Accelerate you have full control over the training loop and can essentially write the loop in pure PyTorch with some minor modifications. In turn it allows you to easily scale across different infrastructures such as CPUs, GPUs, TPUs, or distributed multi-GPU setups without changing any code. Let\u2019s see what it takes to implement all of the above tweaks in \u{1F917} Accelerate. We can still use the "),pl=s("a"),L0=l("TrainingArguments"),M0=l(" to wrap the training settings:"),bv=h(),m(fr.$$.fragment),yv=h(),hl=s("p"),O0=l("The full example training loop with \u{1F917} Accelerate is only a handful of lines of code long:"),Ev=h(),m(cr.$$.fragment),Pv=h(),T=s("p"),B0=l("First we wrap the dataset in a "),mr=s("a"),ff=s("code"),q0=l("DataLoader"),V0=l(". Then we can enable gradient checkpointing by calling the model\u2019s "),fl=s("a"),H0=l("gradient_checkpointing_enable()"),F0=l(" method. When we initialize the "),dr=s("a"),cf=s("code"),W0=l("Accelerator"),R0=l(" we can specifiy if we want to use mixed precision training and it will take care of it for us in the "),mf=s("code"),X0=l("prepare"),Y0=l(" call. During the "),ur=s("a"),df=s("code"),Q0=l("prepare"),J0=l(" call the dataloader will also be distributed across workers should we use multiple GPUs. We use the same 8-bit optimizer from the earlier experiments."),$v=h(),qt=s("p"),Z0=l("Finally, we can write the main training loop. Note that the "),uf=s("code"),K0=l("backward"),e3=l(" call is handled by \u{1F917} Accelerate. We can also see how gradient accumulation works: we normalize the loss so we get the average at the end of accumulation and once we have enough steps we run the optimization. Now the question is: does this use the same amount of memory as the previous steps? Let\u2019s check:"),kv=h(),m(vr.$$.fragment),Av=h(),cl=s("p"),t3=l("Indeed it does. Implementing these optimization techniques with \u{1F917} Accelerate only takes a handful of lines of code and comes with the benefit of more flexiblity in the training loop."),Tv=h(),ml=s("p"),a3=l("Now, let\u2019s take a step back and discuss what we should optimize for when scaling the training of large models."),jv=h(),Ie=s("h2"),Vt=s("a"),vf=s("span"),m(wr.$$.fragment),s3=h(),wf=s("span"),r3=l("How to scale"),xv=h(),dl=s("p"),i3=l("When we train models there are a two aspects we want to optimize at the same time:"),Dv=h(),Ht=s("ul"),gf=s("li"),o3=l("Data throughput/training time"),l3=h(),_f=s("li"),n3=l("Model performance"),Gv=h(),ul=s("p"),p3=l("We have seen that each method changes the memory usage and throughput. In general we want to maximize the throughput (samples/second) to minimize the training cost. This is generally achieved by utilizing the GPU as much as possible and thus filling GPU memory to its limit. For example, as mentioned earlier, we only employ gradient accumulation when we want to use a batch size beyond the size of the GPU memory. If the desired batch size fits into memory then there is no reason to apply gradient accumulation which will only slow down training."),Iv=h(),vl=s("p"),h3=l("The second objective is model performance. Just because we can does not mean we should use a large batch size. As part of hyperparameter tuning you should determine which batch size yields the best result and then optimize the throughput accordingly."),Uv=h(),wl=s("p"),f3=l("Sometimes, even when applying all the above tweaks the throughput on a given GPU might still not be good enough. One easy solution is to change the type of GPU. For example switching from let\u2019s say a K80 (which you typically get on Google Colab) to a fancier GPU such as the V100 or A100. Although they are more expensive they are usually more cost effective than cheaper GPUs due to their larger memory and faster architecture. For some applications, such as pretraining, this might still not be fast enough. In this case you want to scale your experiment to several GPUs."),Sv=h(),Ue=s("h2"),Ft=s("a"),bf=s("span"),m(gr.$$.fragment),c3=h(),yf=s("span"),m3=l("Multi-GPU Training"),zv=h(),C=s("p"),d3=l("If your model fits on a single GPU scaling to many GPUs can be achieved fairly easily with data parallelism. The idea is very similar to gradient accumulation with the distinction that instead of running the forward and backward passes during the accumulation in sequence on a single machine they are performed in parallel on multiple machines. So each GPU gets a small batch, runs the forward and backward passes and then the gradients from all machines are aggregated and the model is optimized. You can combine this with all the methods we described before. For example, if you have 4 GPUs and use "),Ef=s("code"),u3=l("per_device_train_batch_size=12"),v3=l(" and "),Pf=s("code"),w3=l("gradient_accumulation_steps=3"),g3=l(" you will have an effective batch size of "),$f=s("code"),_3=l("4*12*3=144"),b3=l("."),Nv=h(),L=s("p"),y3=l("The "),gl=s("a"),E3=l("Trainer"),P3=l(" allows for distributed training and if you execute your "),_l=s("a"),$3=l("Trainer"),k3=l(" training script on a machine with multiple GPUs it will automatically utilize all of them, hence the name "),kf=s("code"),A3=l("per_device_train_batch_size"),T3=l(". In \u{1F917} Accelerate you can configure the infrastructure setup with the following command:"),Cv=h(),m(_r.$$.fragment),Lv=h(),bl=s("p"),j3=l("Until now we have opperated under the assumption that we can fit the model onto a single GPU without or with the introduced tricks . But what if this is not possible? We still have a few tricks up our sleeves!"),Mv=h(),Se=s("h2"),Wt=s("a"),Af=s("span"),m(br.$$.fragment),x3=h(),Tf=s("span"),D3=l("What if my model still does not fit?"),Ov=h(),Rt=s("p"),G3=l("If the model does not fit on a single GPU with all the mentioned tricks there are still more methods we can apply although life starts to get a bit more complicated. This usually involves some form of pipeline or tensor parallelism where the model itself is distributed across several GPUs. One can also make use of DeepSpeed which implements some of these parallelism strategies along with some more optimization to reduce the memory footprint such as partitioning the optimizer states. You can read more about this in the "),yl=s("a"),I3=l("\u201CModel Parallelism\u201D section"),U3=l("."),Bv=h(),El=s("p"),S3=l("This concludes the practical part of this guide for scaling the training of large models. The following section goes into more details on some of the aspects discussed above."),qv=h(),ze=s("h2"),Xt=s("a"),jf=s("span"),m(yr.$$.fragment),z3=h(),xf=s("span"),N3=l("Further discussions"),Vv=h(),Pl=s("p"),C3=l("This section gives brief ideas on how to make training faster and support bigger models. Later sections will expand, demonstrate and elucidate each of these."),Hv=h(),Ne=s("h2"),Yt=s("a"),Df=s("span"),m(Er.$$.fragment),L3=h(),Gf=s("span"),M3=l("Faster Training"),Fv=h(),$l=s("p"),O3=l("Hardware:"),Wv=h(),kl=s("ul"),Al=s("li"),B3=l("fast connectivity between GPUs"),Pr=s("ul"),If=s("li"),q3=l("intra-node: NVLink"),V3=h(),Uf=s("li"),H3=l("inter-node: Infiniband / Intel OPA"),Rv=h(),Tl=s("p"),F3=l("Software:"),Xv=h(),Qt=s("ul"),Sf=s("li"),W3=l("Data Parallel / Distributed Data Parallel"),R3=h(),zf=s("li"),X3=l("fp16 (autocast caching)"),Yv=h(),Ce=s("h2"),Jt=s("a"),Nf=s("span"),m($r.$$.fragment),Y3=h(),Cf=s("span"),Q3=l("Bigger Models"),Qv=h(),jl=s("p"),J3=l("Hardware:"),Jv=h(),oe=s("ul"),Lf=s("li"),Z3=l("bigger GPUs"),K3=h(),Mf=s("li"),e6=l("more GPUs"),t6=h(),kr=s("li"),a6=l("more CPU and NVMe (offloaded to by "),xl=s("a"),s6=l("DeepSpeed-Infinity"),r6=l(")"),Zv=h(),Dl=s("p"),i6=l("Software:"),Kv=h(),P=s("ul"),Of=s("li"),o6=l("Model Scalability (ZeRO and 3D Parallelism)"),l6=h(),Bf=s("li"),n6=l("Low-memory Optimizers"),p6=h(),qf=s("li"),h6=l("fp16/bf16 (smaller data/faster throughput)"),f6=h(),Vf=s("li"),c6=l("tf32 (faster throughput)"),m6=h(),Hf=s("li"),d6=l("Gradient accumulation"),u6=h(),Ff=s("li"),v6=l("Gradient checkpointing"),w6=h(),Wf=s("li"),g6=l("Sparsity"),ew=h(),Le=s("h2"),Zt=s("a"),Rf=s("span"),m(Ar.$$.fragment),_6=h(),Xf=s("span"),b6=l("Hardware"),tw=h(),Me=s("h3"),Kt=s("a"),Yf=s("span"),m(Tr.$$.fragment),y6=h(),Qf=s("span"),E6=l("Power and Cooling"),aw=h(),Gl=s("p"),P6=l("If you bought an expensive high end GPU make sure you give it the correct power and sufficient cooling."),sw=h(),jr=s("p"),Jf=s("strong"),$6=l("Power"),k6=l(":"),rw=h(),Il=s("p"),A6=l("Some high end consumer GPU cards have 2 and sometimes 3 PCI-E 8-Pin power sockets. Make sure you have as many independent 12V PCI-E 8-Pin cables plugged into the card as there are sockets. Do not use the 2 splits at one end of the same cable (also known as pigtail cable). That is if you have 2 sockets on the GPU, you want 2 PCI-E 8-Pin cables going from your PSU to the card and not one that has 2 PCI-E 8-Pin connectors at the end! You won\u2019t get the full performance out of your card otherwise."),iw=h(),Ul=s("p"),T6=l("Each PCI-E 8-Pin power cable needs to be plugged into a 12V rail on the PSU side and can supply up to 150W of power."),ow=h(),Sl=s("p"),j6=l("Some other cards may use a PCI-E 12-Pin connectors, and these can deliver up to 500-600W of power."),lw=h(),zl=s("p"),x6=l("Low end cards may use 6-Pin connectors, which supply up to 75W of power."),nw=h(),Nl=s("p"),D6=l("Additionally you want the high-end PSU that has stable voltage. Some lower quality ones may not give the card the stable voltage it needs to function at its peak."),pw=h(),Cl=s("p"),G6=l("And of course the PSU needs to have enough unused Watts to power the card."),hw=h(),xr=s("p"),Zf=s("strong"),I6=l("Cooling"),U6=l(":"),fw=h(),Ll=s("p"),S6=l("When a GPU gets overheated it would start throttling down and will not deliver full performance. And it will shutdown if it gets too hot."),cw=h(),Ml=s("p"),z6=l("It\u2019s hard to tell the exact best temperature to strive for when a GPU is heavily loaded, but probably anything under +80C is good, but lower is better - perhaps 70-75C is an excellent range to be in. The throttling down is likely to start at around 84-90C. But other than throttling performance a prolonged very higher temperature is likely to reduce the lifespan of a GPU."),mw=h(),Oe=s("h3"),ea=s("a"),Kf=s("span"),m(Dr.$$.fragment),N6=h(),ec=s("span"),C6=l("Multi-GPU Connectivity"),dw=h(),Ol=s("p"),L6=l("If you use multiple GPUs the way cards are inter-connected can have a huge impact on the total training time."),uw=h(),Bl=s("p"),M6=l("If the GPUs are on the same physical node, you can run:"),vw=h(),m(Gr.$$.fragment),ww=h(),ql=s("p"),O6=l("and it will tell you how the GPUs are inter-connected."),gw=h(),Vl=s("p"),B6=l("On a machine with dual-GPU and which are connected with NVLink, you will most likely see something like:"),_w=h(),m(Ir.$$.fragment),bw=h(),Hl=s("p"),q6=l("on a different machine w/o NVLink we may see:"),yw=h(),m(Ur.$$.fragment),Ew=h(),Fl=s("p"),V6=l("The report includes this legend:"),Pw=h(),m(Sr.$$.fragment),$w=h(),le=s("p"),H6=l("So the first report "),tc=s("code"),F6=l("NV2"),W6=l(" tells us the GPUs are interconnected with 2 NVLinks, and the second report "),ac=s("code"),R6=l("PHB"),X6=l(" we have a typical consumer-level PCIe+Bridge setup."),kw=h(),Wl=s("p"),Y6=l("Check what type of connectivity you have on your setup. Some of these will make the communication between cards faster (e.g. NVLink), others slower (e.g. PHB)."),Aw=h(),Rl=s("p"),Q6=l("Depending on the type of scalability solution used, the connectivity speed could have a major or a minor impact. If the GPUs need to sync rarely, as in DDP, the impact of a slower connection will be less significant. If the GPUs need to send messages to each other often, as in ZeRO-DP, then faster connectivity becomes super important to achieve faster training."),Tw=h(),Be=s("h3"),ta=s("a"),sc=s("span"),m(zr.$$.fragment),J6=h(),rc=s("span"),Z6=l("NVlink"),jw=h(),Nr=s("p"),Cr=s("a"),K6=l("NVLink"),eE=l(" is a wire-based serial multi-lane near-range communications link developed by Nvidia."),xw=h(),aa=s("p"),tE=l("Each new generation provides a faster bandwidth, e.g. here is a quote from "),Lr=s("a"),aE=l("Nvidia Ampere GA102 GPU Architecture"),sE=l(":"),Dw=h(),Xl=s("blockquote"),ic=s("p"),rE=l(`Third-Generation NVLink\xAE GA102 GPUs utilize NVIDIA\u2019s third-generation NVLink interface, which includes four x4 links, with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. (Note that 3-Way and 4-Way SLI configurations are not supported.)`),Gw=h(),M=s("p"),iE=l("So the higher "),oc=s("code"),oE=l("X"),lE=l(" you get in the report of "),lc=s("code"),nE=l("NVX"),pE=l(" in the output of "),nc=s("code"),hE=l("nvidia-smi topo -m"),fE=l(" the better. The generation will depend on your GPU architecture."),Iw=h(),Yl=s("p"),cE=l("Let\u2019s compare the execution of a gpt2 language model training over a small sample of wikitext."),Uw=h(),Ql=s("p"),mE=l("The results are:"),Sw=h(),sa=s("table"),pc=s("thead"),Mr=s("tr"),hc=s("th"),dE=l("NVlink"),uE=h(),Jl=s("th"),vE=l("Time"),wE=h(),Or=s("tbody"),Br=s("tr"),fc=s("td"),gE=l("Y"),_E=h(),Zl=s("td"),bE=l("101s"),yE=h(),qr=s("tr"),cc=s("td"),EE=l("N"),PE=h(),Kl=s("td"),$E=l("131s"),zw=h(),en=s("p"),kE=l("You can see that NVLink completes the training ~23% faster."),Nw=h(),ra=s("p"),AE=l("In the second benchmark we use "),mc=s("code"),TE=l("NCCL_P2P_DISABLE=1"),jE=l(" to tell the GPUs not to use NVLink."),Cw=h(),tn=s("p"),xE=l("Here is the full benchmark code and outputs:"),Lw=h(),m(Vr.$$.fragment),Mw=h(),x=s("p"),DE=l("Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks ("),dc=s("code"),GE=l("NV2"),IE=l(" in "),uc=s("code"),UE=l("nvidia-smi topo -m"),SE=l(`) Software: `),vc=s("code"),zE=l("pytorch-1.8-to-be"),NE=l(" + "),wc=s("code"),CE=l("cuda-11.0"),LE=l(" / "),gc=s("code"),ME=l("transformers==4.3.0.dev0"),Ow=h(),qe=s("h2"),ia=s("a"),_c=s("span"),m(Hr.$$.fragment),OE=h(),bc=s("span"),BE=l("Software"),Bw=h(),Ve=s("h3"),oa=s("a"),yc=s("span"),m(Fr.$$.fragment),qE=h(),Ec=s("span"),VE=l("Model Scalability"),qw=h(),an=s("p"),HE=l("When you can\u2019t fit a model into the available GPU memory, you need to start using a solution that allows you to scale a large model to use multiple GPUs in parallel."),Vw=h(),Wr=s("p"),FE=l("For indepth details on ZeRO and various other model parallelism protocols please see: "),sn=s("a"),WE=l("Model Parallelism"),Hw=h(),He=s("h3"),la=s("a"),Pc=s("span"),m(Rr.$$.fragment),RE=h(),$c=s("span"),XE=l("Anatomy of Model's Operations"),Fw=h(),rn=s("p"),YE=l("Transformers architecture includes 3 main groups of operations grouped below by compute-intensity."),Ww=h(),ne=s("ol"),Xr=s("li"),kc=s("p"),Ac=s("strong"),QE=l("Tensor Contractions"),JE=h(),Yr=s("p"),ZE=l("Linear layers and components of Multi-Head Attention all do batched "),Tc=s("strong"),KE=l("matrix-matrix multiplications"),eP=l(". These operations are the most compute-intensive part of training a transformer."),tP=h(),Qr=s("li"),jc=s("p"),xc=s("strong"),aP=l("Statistical Normalizations"),sP=h(),Jr=s("p"),rP=l("Softmax and layer normalization are less compute-intensive than tensor contractions, and involve one or more "),Dc=s("strong"),iP=l("reduction operations"),oP=l(", the result of which is then applied via a map."),lP=h(),Zr=s("li"),Gc=s("p"),Ic=s("strong"),nP=l("Element-wise Operators"),pP=h(),Kr=s("p"),hP=l("These are the remaining operators: "),Uc=s("strong"),fP=l("biases, dropout, activations, and residual connections"),cP=l(". These are the least compute-intensive operations."),Rw=h(),on=s("p"),mP=l("This knowledge can be helpful to know when analyzing performance bottlenecks."),Xw=h(),ei=s("p"),dP=l("This summary is derived from "),ti=s("a"),uP=l("Data Movement Is All You Need: A Case Study on Optimizing Transformers 2020"),Yw=h(),Fe=s("h3"),na=s("a"),Sc=s("span"),m(ai.$$.fragment),vP=h(),zc=s("span"),wP=l("Anatomy of Model's Memory"),Qw=h(),ln=s("p"),gP=l("The components on GPU memory are the following:"),Jw=h(),j=s("ol"),Nc=s("li"),_P=l("model weights"),bP=h(),Cc=s("li"),yP=l("optimizer states"),EP=h(),Lc=s("li"),PP=l("gradients"),$P=h(),Mc=s("li"),kP=l("forward activations saved for gradient computation"),AP=h(),Oc=s("li"),TP=l("temporary buffers"),jP=h(),Bc=s("li"),xP=l("functionality-specific memory"),Zw=h(),nn=s("p"),DP=l("A typical model trained in mixed precision with AdamW requires 18 bytes per model parameter plus activation memory."),Kw=h(),pn=s("p"),GP=l("For inference there are no optimizer states and gradients, so we can subtract those. And thus we end up with 6 bytes per model parameter for mixed precision inference, plus activation memory."),eg=h(),hn=s("p"),IP=l("Let\u2019s look at the details."),tg=h(),We=s("h4"),pa=s("a"),qc=s("span"),m(si.$$.fragment),UP=h(),Vc=s("span"),SP=l("Model Weights"),ag=h(),ha=s("ul"),Hc=s("li"),zP=l("4 bytes * number of parameters for fp32 training"),NP=h(),Fc=s("li"),CP=l("6 bytes * number of parameters for mixed precision training"),sg=h(),Re=s("h4"),fa=s("a"),Wc=s("span"),m(ri.$$.fragment),LP=h(),Rc=s("span"),MP=l("Optimizer States"),rg=h(),pe=s("ul"),Xc=s("li"),OP=l("8 bytes * number of parameters for normal AdamW (maintains 2 states)"),BP=h(),fn=s("li"),qP=l("2 bytes * number of parameters for 8-bit AdamW optimizers like "),ii=s("a"),VP=l("bitsandbytes"),HP=h(),Yc=s("li"),FP=l("4 bytes * number of parameters for optimizers like SGD (maintains only 1 state)"),ig=h(),Xe=s("h4"),ca=s("a"),Qc=s("span"),m(oi.$$.fragment),WP=h(),Jc=s("span"),RP=l("Gradients"),og=h(),cn=s("ul"),Zc=s("li"),XP=l("4 bytes * number of parameters for either fp32 or mixed precision training"),lg=h(),Ye=s("h4"),ma=s("a"),Kc=s("span"),m(li.$$.fragment),YP=h(),em=s("span"),QP=l("Forward Activations"),ng=h(),mn=s("ul"),tm=s("li"),JP=l("size depends on many factors, the key ones being sequence length, hidden size and batch size."),pg=h(),dn=s("p"),ZP=l("There are the input and output that are being passed and returned by the forward and the backward functions and the forward activations saved for gradient computation."),hg=h(),Qe=s("h4"),da=s("a"),am=s("span"),m(ni.$$.fragment),KP=h(),sm=s("span"),e5=l("Temporary Memory"),fg=h(),un=s("p"),t5=l("Additionally there are all kinds of temporary variables which get released once the calculation is done, but in the moment these could require additional memory and could push to OOM. Therefore when coding it\u2019s crucial to think strategically about such temporary variables and sometimes to explicitly free those as soon as they are no longer needed."),cg=h(),Je=s("h4"),ua=s("a"),rm=s("span"),m(pi.$$.fragment),a5=h(),im=s("span"),s5=l("Functionality-specific memory"),mg=h(),vn=s("p"),r5=l("Then your software could have special memory needs. For example, when generating text using beam search, the software needs to maintain multiple copies of inputs and outputs."),dg=h(),Ze=s("h3"),va=s("a"),om=s("span"),m(hi.$$.fragment),i5=h(),wa=s("span"),lm=s("code"),o5=l("forward"),l5=l(" vs "),nm=s("code"),n5=l("backward"),p5=l(" Execution Speed"),ug=h(),wn=s("p"),h5=l("For convolutions and linear layers there are 2x flops in the backward compared to the forward, which generally translates into ~2x slower (sometimes more, because sizes in the backward tend to be more awkward). Activations are usually bandwidth-limited, and it\u2019s typical for an activation to have to read more data in the backward than in the forward (e.g. activation forward reads once, writes once, activation backward reads twice, gradOutput and output of the forward, and writes once, gradInput)."),vg=h(),Ke=s("h3"),ga=s("a"),pm=s("span"),m(fi.$$.fragment),f5=h(),hm=s("span"),c5=l("Floating Data Types"),wg=h(),gn=s("p"),m5=l("Here are the commonly used floating point data types choice of which impacts both memory usage and throughput:"),gg=h(),O=s("ul"),ci=s("li"),d5=l("fp32 ("),fm=s("code"),u5=l("float32"),v5=l(")"),w5=h(),mi=s("li"),g5=l("fp16 ("),cm=s("code"),_5=l("float16"),b5=l(")"),y5=h(),di=s("li"),E5=l("bf16 ("),mm=s("code"),P5=l("bfloat16"),$5=l(")"),k5=h(),dm=s("li"),A5=l("tf32 (CUDA internal data type)"),_g=h(),_n=s("p"),T5=l("Here is a diagram that shows how these data types correlate to each other."),bg=h(),bn=s("p"),yn=s("img"),yg=h(),_a=s("p"),j5=l("(source: "),ui=s("a"),x5=l("NVIDIA Blog"),D5=l(")"),Eg=h(),En=s("p"),G5=l("While fp16 and fp32 have been around for quite some time, bf16 and tf32 are only available on the Ampere architecture GPUS. TPUs support bf16 as well."),Pg=h(),et=s("h4"),ba=s("a"),um=s("span"),m(vi.$$.fragment),I5=h(),vm=s("span"),U5=l("fp16"),$g=h(),Pn=s("p"),S5=l("AMP = Automatic Mixed Precision"),kg=h(),$n=s("p"),z5=l("If we look at what\u2019s happening with FP16 training (mixed precision) we have:"),Ag=h(),B=s("ul"),wm=s("li"),N5=l("the model has two copies in memory: one in half-precision for the forward/backward computations and one in full precision - no memory saved here"),C5=h(),gm=s("li"),L5=l("the forward activations saved for gradient computation are in half-precision - memory is saved here"),M5=h(),wi=s("li"),O5=l("the gradients are computed in half-precision "),_m=s("em"),B5=l("but"),q5=l(" converted to full-precision for the update, no saving there"),V5=h(),bm=s("li"),H5=l("the optimizer states are in full precision as all the updates are done in full-precision"),Tg=h(),kn=s("p"),F5=l("So the savings only happen for the forward activations saved for the backward computation, and there is a slight overhead because the model weights are stored both in half- and full-precision."),jg=h(),ya=s("p"),W5=l("In \u{1F917} Transformers fp16 mixed precision is enabled by passing "),ym=s("code"),R5=l("--fp16"),X5=l(" to the \u{1F917} Trainer."),xg=h(),An=s("p"),Y5=l("Now let\u2019s look at a simple text-classification fine-tuning on 2 GPUs (I\u2019m giving the command for reference):"),Dg=h(),m(gi.$$.fragment),Gg=h(),Ea=s("p"),Q5=l("Since the only savings we get are in the model activations saved for the backward passed, it\u2019s logical that the bigger those activations are, the bigger the saving will be. If we try different batch sizes, I indeed get (this is with "),Em=s("code"),J5=l("nvidia-smi"),Z5=l(" so not completely reliable as said above but it will be a fair comparison):"),Ig=h(),Pa=s("table"),Pm=s("thead"),R=s("tr"),Tn=s("th"),K5=l("batch size"),e4=h(),jn=s("th"),t4=l("w/o \u2014fp16"),a4=h(),xn=s("th"),s4=l("w/ \u2014fp16"),r4=h(),Dn=s("th"),i4=l("savings"),o4=h(),X=s("tbody"),Y=s("tr"),Gn=s("td"),l4=l("8"),n4=h(),In=s("td"),p4=l("4247"),h4=h(),Un=s("td"),f4=l("4163"),c4=h(),Sn=s("td"),m4=l("84"),d4=h(),Q=s("tr"),zn=s("td"),u4=l("16"),v4=h(),Nn=s("td"),w4=l("4971"),g4=h(),Cn=s("td"),_4=l("4793"),b4=h(),Ln=s("td"),y4=l("178"),E4=h(),J=s("tr"),Mn=s("td"),P4=l("32"),$4=h(),On=s("td"),k4=l("6827"),A4=h(),Bn=s("td"),T4=l("6207"),j4=h(),qn=s("td"),x4=l("620"),D4=h(),Z=s("tr"),Vn=s("td"),G4=l("64"),I4=h(),Hn=s("td"),U4=l("10037"),S4=h(),Fn=s("td"),z4=l("8061"),N4=h(),Wn=s("td"),C4=l("1976"),Ug=h(),$a=s("p"),L4=l("So there is only a real memory saving if we train at a high batch size (and it\u2019s not half) and at batch sizes lower than 8, you actually get a bigger memory footprint (because of the overhead mentioned above). The gain for FP16 training is that in each of those cases, the training with the flag "),$m=s("code"),M4=l("--fp16"),O4=l(" is twice as fast, which does require every tensor to have every dimension be a multiple of 8 (examples pad the tensors to a sequence length that is a multiple of 8)."),Sg=h(),Rn=s("p"),B4=l("Summary: FP16 with apex or AMP will only give you some memory savings with a reasonably high batch size."),zg=h(),Xn=s("p"),q4=l("Additionally, under mixed precision when possible, it\u2019s important that the batch size is a multiple of 8 to efficiently use tensor cores."),Ng=h(),ka=s("p"),V4=l("Note that in some situations the speed up can be as big as 5x when using mixed precision. e.g. we have observed that while using "),_i=s("a"),H4=l("Megatron-Deepspeed"),F4=l("."),Cg=h(),Yn=s("p"),W4=l("Some amazing tutorials to read on mixed precision:"),Lg=h(),Aa=s("ul"),Qn=s("li"),R4=l("@sgugger wrote a great explanation of mixed precision "),bi=s("a"),X4=l("here"),Y4=h(),Jn=s("li"),Q4=l("Aleksey Bilogur\u2019s "),yi=s("a"),J4=l("A developer-friendly guide to mixed precision training with PyTorch"),Mg=h(),he=s("p"),Z4=l(`You can also see a variety of benchmarks on fp16 vs other precisions: `),Ei=s("a"),K4=l("RTX-3090"),e$=l(` and `),Pi=s("a"),t$=l("A100"),a$=l("."),Og=h(),tt=s("h5"),Ta=s("a"),km=s("span"),m($i.$$.fragment),s$=h(),Am=s("span"),r$=l("fp16 caching"),Bg=h(),fe=s("p"),i$=l("pytorch "),Tm=s("code"),o$=l("autocast"),l$=l(" which performs AMP include a caching feature, which speed things up by caching fp16-converted values. Here is the full description from this "),ki=s("a"),n$=l("comment"),p$=l(":"),qg=h(),Zn=s("p"),h$=l("Autocast maintains a cache of the FP16 casts of model parameters (leaves). This helps streamline parameter reuse: if the same FP32 param is used in several different FP16list ops, like several matmuls, instead of re-casting the param to FP16 on entering each matmul, the cast will occur on the first matmul, the casted FP16 copy will be cached, and for all later matmuls the FP16 copy will be reused. The cache is maintained only within a particular outermost autocast context. When you exit the autocast context the cache is dropped. For recommended usage, in which autocast wraps the forward pass, and then you exit the context before calling backward(), this means the cache only lasts the duration of the forward pass each iteration, and will be rebuilt next iteration. (The cache of FP16-casted copies MUST be rebuilt each iteration. The FP32 parameters get updated by the optimizer, so the FP16 copies must be recreated, otherwise the FP16 values will be stale.)"),Vg=h(),at=s("h5"),ja=s("a"),jm=s("span"),m(Ai.$$.fragment),f$=h(),xm=s("span"),c$=l("fp16 Inference"),Hg=h(),Kn=s("p"),m$=l("While normally inference is done with fp16/amp as with training, it\u2019s also possible to use the full fp16 mode without using mixed precision. This is especially a good fit if the pretrained model weights are already in fp16. So a lot less memory is used: 2 bytes per parameter vs 6 bytes with mixed precision!"),Fg=h(),ep=s("p"),d$=l("How good the results this will deliver will depend on the model. If it can handle fp16 without overflows and accuracy issues, then it\u2019ll definitely better to use the full fp16 mode."),Wg=h(),tp=s("p"),u$=l("For example, LayerNorm has to be done in fp32 and recent pytorch (1.10+) has been fixed to do that regardless of the input types, but earlier pytorch versions accumulate in the input type which can be an issue."),Rg=h(),xa=s("p"),v$=l("In \u{1F917} Transformers the full fp16 inference is enabled by passing "),Dm=s("code"),w$=l("--fp16_full_eval"),g$=l(" to the \u{1F917} Trainer."),Xg=h(),st=s("h4"),Da=s("a"),Gm=s("span"),m(Ti.$$.fragment),_$=h(),Im=s("span"),b$=l("bf16"),Yg=h(),ce=s("p"),y$=l("If you own Ampere or newer hardware you can start using bf16 for your training and evaluation. While bf16 has a worse precision than fp16, it has a much much bigger dynamic range. Therefore, if in the past you were experiencing overflow issues while training the model, bf16 will prevent this from happening most of the time. Remember that in fp16 the biggest number you can have is "),Um=s("code"),E$=l("65535"),P$=l(" and any number above that will overflow. A bf16 number can be as large as "),Sm=s("code"),$$=l("3.39e+38"),k$=l(" (!) which is about the same as fp32 - because both have 8-bits used for the numerical range."),Qg=h(),ap=s("p"),A$=l("Automatic Mixed Precision (AMP) is the same as with fp16, except it\u2019ll use bf16."),Jg=h(),sp=s("p"),T$=l("Thanks to the fp32-like dynamic range with bf16 mixed precision loss scaling is no longer needed."),Zg=h(),rp=s("p"),j$=l("If you have tried to finetune models pre-trained under bf16 mixed precision (e.g. T5) it\u2019s very likely that you have encountered overflow issues. Now you should be able to finetune those models without any issues."),Kg=h(),ip=s("p"),x$=l("That said, also be aware that if you pre-trained a model in bf16, it\u2019s likely to have overflow issues if someone tries to finetune it in fp16 down the road. So once started on the bf16-mode path it\u2019s best to remain on it and not switch to fp16."),e1=h(),Ga=s("p"),D$=l("In \u{1F917} Transformers bf16 mixed precision is enabled by passing "),zm=s("code"),G$=l("--bf16"),I$=l(" to the \u{1F917} Trainer."),t1=h(),op=s("p"),U$=l("If you use your own trainer, this is just:"),a1=h(),m(ji.$$.fragment),s1=h(),xi=s("p"),S$=l("If you need to switch a tensor to bf16, it\u2019s just: "),Nm=s("code"),z$=l("t.to(dtype=torch.bfloat16)"),r1=h(),lp=s("p"),N$=l("Here is how you can check if your setup supports bf16:"),i1=h(),m(Di.$$.fragment),o1=h(),np=s("p"),C$=l("On the other hand bf16 has a much worse precision than fp16, so there are certain situations where you\u2019d still want to use fp16 and not bf16."),l1=h(),me=s("p"),L$=l(`You can also see a variety of benchmarks on bf16 vs other precisions: `),Gi=s("a"),M$=l("RTX-3090"),O$=l(` and `),Ii=s("a"),B$=l("A100"),q$=l("."),n1=h(),rt=s("h5"),Ia=s("a"),Cm=s("span"),m(Ui.$$.fragment),V$=h(),Lm=s("span"),H$=l("bf16 Inference"),p1=h(),Ua=s("p"),F$=l("Same as with fp16, you can do inference in either the mixed precision bf16 or using the full bf16 mode. The same caveats apply. For details see "),pp=s("a"),W$=l("fp16 Inference"),R$=l("."),h1=h(),Sa=s("p"),X$=l("In \u{1F917} Transformers the full bf16 inference is enabled by passing "),Mm=s("code"),Y$=l("--bf16_full_eval"),Q$=l(" to the \u{1F917} Trainer."),f1=h(),it=s("h4"),za=s("a"),Om=s("span"),m(Si.$$.fragment),J$=h(),Bm=s("span"),Z$=l("tf32"),c1=h(),hp=s("p"),K$=l("The Ampere hardware uses a magical data type called tf32. It has the same numerical range as fp32 (8-bits), but instead of 23 bits precision it has only 10 bits (same as fp16). In total it uses only 19 bits."),m1=h(),fp=s("p"),ek=l("It\u2019s magical in the sense that you can use the normal fp32 training and/or inference code and by enabling tf32 support you can get up to 3x throughput improvement. All you need to do is to add this to your code:"),d1=h(),m(zi.$$.fragment),u1=h(),cp=s("p"),tk=l("When this is done CUDA will automatically switch to using tf32 instead of fp32 where it\u2019s possible. This, of course, assumes that the used GPU is from the Ampere series."),v1=h(),Na=s("p"),ak=l("Like all cases with reduced precision this may or may not be satisfactory for your needs, so you have to experiment and see. According to "),Ni=s("a"),sk=l("NVIDIA research"),rk=l(" the majority of machine learning training shouldn\u2019t be impacted and showed the same perplexity and convergence as the fp32 training."),w1=h(),mp=s("p"),ik=l("If you\u2019re already using fp16 or bf16 mixed precision it may help with the throughput as well."),g1=h(),q=s("p"),ok=l("You can enable this mode in the \u{1F917} Trainer with "),qm=s("code"),lk=l("--tf32"),nk=l(", or disable it with "),Vm=s("code"),pk=l("--tf32 0"),hk=l(" or "),Hm=s("code"),fk=l("--no_tf32"),ck=l(`. By default the PyTorch default is used.`),_1=h(),de=s("p"),mk=l("Note: tf32 mode is internal to CUDA and can\u2019t be accessed directly via "),Fm=s("code"),dk=l("tensor.to(dtype=torch.tf32)"),uk=l(" as "),Wm=s("code"),vk=l("torch.tf32"),wk=l(" doesn\u2019t exit."),b1=h(),Ca=s("p"),gk=l("Note: you need "),Rm=s("code"),_k=l("torch>=1.7"),bk=l(" to enjoy this feature."),y1=h(),ue=s("p"),yk=l(`You can also see a variety of benchmarks on tf32 vs other precisions: `),Ci=s("a"),Ek=l("RTX-3090"),Pk=l(` and `),Li=s("a"),$k=l("A100"),kk=l("."),E1=h(),ot=s("h3"),La=s("a"),Xm=s("span"),m(Mi.$$.fragment),Ak=h(),Ym=s("span"),Tk=l("Gradient Accumulation"),P1=h(),ve=s("p"),jk=l("Since gradient accumulation essentially is identical to having a larger batch size, just as with the larger batch size here you are likely to see a 20-30% speedup due to the optimizer running less often. For example, see benchmarks for "),Oi=s("a"),xk=l("RTX-3090"),Dk=l(` and `),Bi=s("a"),Gk=l("A100"),Ik=l("."),$1=h(),Ma=s("p"),Uk=l("To activate this feature in \u{1F917} Trainer add "),Qm=s("code"),Sk=l("--gradient_accumulation_steps 4"),zk=l(" to its arguments (experiment with the value to get the best performance)."),k1=h(),dp=s("p"),Nk=l("It\u2019s important to remember that using gradient accumulation you may end up with a much larger effective batch size, so you may need to adjust the learning rate, its warm up and for very short datasets it\u2019ll impact the loss as the training will end up doing less steps than normal."),A1=h(),lt=s("h3"),Oa=s("a"),Jm=s("span"),m(qi.$$.fragment),Ck=h(),Zm=s("span"),Lk=l("Gradient Checkpointing"),T1=h(),up=s("p"),Mk=l("One way to use significantly less GPU memory is to enabled \u201CGradient Checkpointing\u201D (also known as \u201Cactivation checkpointing\u201D). When enabled, a lot of memory can be freed at the cost of small decrease in the training speed due to recomputing parts of the graph during back-propagation. The slowdown will depend on the model but quite often it is around 20-30%."),j1=h(),V=s("p"),Ok=l("This technique was first shared in the paper: "),Vi=s("a"),Bk=l("Training Deep Nets with Sublinear Memory Cost"),qk=l(". The paper will also give you the exact details on the savings, but it\u2019s in the ballpark of "),Km=s("code"),Vk=l("O(sqrt(n))"),Hk=l(", where "),ed=s("code"),Fk=l("n"),Wk=l(" is the number of feed-forward layers."),x1=h(),vp=s("p"),Rk=l("To activate this feature in \u{1F917} Transformers for models that support it, use:"),D1=h(),m(Hi.$$.fragment),G1=h(),Ba=s("p"),Xk=l("or add "),td=s("code"),Yk=l("--gradient_checkpointing"),Qk=l(" to the Trainer arguments."),I1=h(),nt=s("h3"),qa=s("a"),ad=s("span"),m(Fi.$$.fragment),Jk=h(),sd=s("span"),Zk=l("Batch sizes"),U1=h(),wp=s("p"),Kk=l("One gets the most efficient performance when batch sizes and input/output neuron counts are divisible by a certain number, which typically starts at 8, but can be much higher as well. That number varies a lot depending on the specific hardware being used and the dtype of the model."),S1=h(),we=s("p"),e8=l("For example for fully connected layers (which correspond to GEMMs), NVIDIA provides recommendations for "),Wi=s("a"),t8=l("input/output neuron counts"),a8=l(" and "),Ri=s("a"),s8=l("batch size"),r8=l("."),z1=h(),Xi=s("p"),Yi=s("a"),i8=l("Tensor Core Requirements"),o8=l(" define the multiplier based on the dtype and the hardware. For example, for fp16 a multiple of 8 is recommended, but on A100 it\u2019s 64!"),N1=h(),Va=s("p"),l8=l("For parameters that are small, there is also "),Qi=s("a"),n8=l("Dimension Quantization Effects"),p8=l(" to consider, this is where tiling happens and the right multiplier can have a significant speedup."),C1=h(),H=s("p"),h8=l("Additionally, as explained in the "),gp=s("a"),f8=l("Gradient Accumulation"),c8=l(` section, the bigger the batch size the less often the optimizer is run, the faster the training is (considering the same dataset length). See benchmarks for `),Ji=s("a"),m8=l("RTX-3090"),d8=l(` and `),Zi=s("a"),u8=l("A100"),v8=l("."),L1=h(),pt=s("h3"),Ha=s("a"),rd=s("span"),m(Ki.$$.fragment),w8=h(),id=s("span"),g8=l("DP vs DDP"),M1=h(),ht=s("p"),od=s("code"),_8=l("DistributedDataParallel"),b8=l(" (DDP) is typically faster than "),ld=s("code"),y8=l("DataParallel"),E8=l(" (DP), but it is not always the case:"),O1=h(),Fa=s("ul"),nd=s("li"),P8=l("while DP is python threads-based, DDP is multiprocess-based - and as such it has no python threads limitations, such as GIL"),$8=h(),pd=s("li"),k8=l("on the other hand a slow inter-connectivity between the GPU cards could lead to an actual slower outcome with DDP"),B1=h(),_p=s("p"),A8=l("Here are the main differences in the inter-GPU communication overhead between the two modes:"),q1=h(),eo=s("p"),to=s("a"),T8=l("DDP"),j8=l(":"),V1=h(),Wa=s("ul"),hd=s("li"),x8=l("At the start time the main process replicates the model once from gpu 0 to the rest of gpus"),D8=h(),bp=s("li"),G8=l("Then for each batch:"),ao=s("ol"),fd=s("li"),I8=l("each gpu consumes each own mini-batch of data directly"),U8=h(),so=s("li"),S8=l("during "),cd=s("code"),z8=l("backward"),N8=l(", once the local gradients are ready, they are then averaged across all processes"),H1=h(),ro=s("p"),io=s("a"),C8=l("DP"),L8=l(":"),F1=h(),yp=s("p"),M8=l("For each batch:"),W1=h(),U=s("ol"),md=s("li"),O8=l("gpu 0 reads the batch of data and then sends a mini-batch to each gpu"),B8=h(),dd=s("li"),q8=l("replicates the up-to-date model from gpu 0 to each gpu"),V8=h(),oo=s("li"),H8=l("runs "),ud=s("code"),F8=l("forward"),W8=l(" and sends output from each gpu to gpu 0, computes loss"),R8=h(),Ep=s("li"),X8=l("scatters loss from gpu 0 to all gpus, runs "),vd=s("code"),Y8=l("backward"),Q8=h(),wd=s("li"),J8=l("sends gradients from each gpu to gpu 0 and averages those"),R1=h(),Pp=s("p"),Z8=l("The only communication DDP performs per batch is sending gradients, whereas DP does 5 different data exchanges per batch."),X1=h(),Ra=s("p"),K8=l("DP copies data within the process via python threads, whereas DDP copies data via "),lo=s("a"),e7=l("torch.distributed"),t7=l("."),Y1=h(),$p=s("p"),a7=l("Under DP gpu 0 performs a lot more work than the rest of the gpus, thus resulting in under-utilization of gpus."),Q1=h(),kp=s("p"),s7=l("You can use DDP across multiple machines, but this is not the case with DP."),J1=h(),Ap=s("p"),r7=l("There are other differences between DP and DDP but they aren\u2019t relevant to this discussion."),Z1=h(),Xa=s("p"),i7=l("If you want to go really deep into understanding these 2 modes, this "),no=s("a"),o7=l("article"),l7=l(" is highly recommended, as it has great diagrams, includes multiple benchmarks and profiler outputs on various hardware, explains all the nuances that you may need to know."),K1=h(),Tp=s("p"),n7=l("Let\u2019s look at an actual benchmark:"),e_=h(),Ya=s("table"),gd=s("thead"),ft=s("tr"),jp=s("th"),p7=l("Type"),h7=h(),_d=s("th"),f7=l("NVlink"),c7=h(),xp=s("th"),m7=l("Time"),d7=h(),ct=s("tbody"),mt=s("tr"),Dp=s("td"),u7=l("2:DP"),v7=h(),bd=s("td"),w7=l("Y"),g7=h(),Gp=s("td"),_7=l("110s"),b7=h(),dt=s("tr"),Ip=s("td"),y7=l("2:DDP"),E7=h(),yd=s("td"),P7=l("Y"),$7=h(),Up=s("td"),k7=l("101s"),A7=h(),ut=s("tr"),Sp=s("td"),T7=l("2:DDP"),j7=h(),Ed=s("td"),x7=l("N"),D7=h(),zp=s("td"),G7=l("131s"),t_=h(),Np=s("p"),I7=l("Analysis:"),a_=h(),Cp=s("p"),U7=l("Here DP is ~10% slower than DDP w/ NVlink, but ~15% faster than DDP w/o NVlink"),s_=h(),Lp=s("p"),S7=l("The real difference will depend on how much data each GPU needs to sync with the others - the more there is to sync, the more a slow link will slow down the total runtime."),r_=h(),Mp=s("p"),z7=l("Here is the full benchmark code and outputs:"),i_=h(),po=s("p"),Pd=s("code"),N7=l("NCCL_P2P_DISABLE=1"),C7=l(" was used to disable the NVLink feature on the corresponding benchmark."),o_=h(),m(ho.$$.fragment),l_=h(),D=s("p"),L7=l("Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks ("),$d=s("code"),M7=l("NV2"),O7=l(" in "),kd=s("code"),B7=l("nvidia-smi topo -m"),q7=l(`) Software: `),Ad=s("code"),V7=l("pytorch-1.8-to-be"),H7=l(" + "),Td=s("code"),F7=l("cuda-11.0"),W7=l(" / "),jd=s("code"),R7=l("transformers==4.3.0.dev0"),n_=h(),vt=s("h3"),Qa=s("a"),xd=s("span"),m(fo.$$.fragment),X7=h(),Dd=s("span"),Y7=l("DataLoader"),p_=h(),Op=s("p"),Q7=l("One of the important requirements to reach great training speed is the ability to feed the GPU at the maximum speed it can handle. By default everything happens in the main process and it might not be able to read the data from disk fast enough, and thus create a bottleneck, leading to GPU under-utilization."),h_=h(),Ja=s("ul"),Bp=s("li"),Gd=s("code"),J7=l("DataLoader(pin_memory=True, ...)"),Z7=l(" which ensures that the data gets preloaded into the pinned memory on CPU and typically leads to much faster transfers from CPU to GPU memory."),K7=h(),qp=s("li"),Id=s("code"),e9=l("DataLoader(num_workers=4, ...)"),t9=l(" - spawn several workers to pre-load data faster - during training watch the GPU utilization stats and if it\u2019s far from 100% experiment with raising the number of workers. Of course, the problem could be elsewhere so a very big number of workers won\u2019t necessarily lead to a better performance."),f_=h(),wt=s("h2"),Za=s("a"),Ud=s("span"),m(co.$$.fragment),a9=h(),Sd=s("span"),s9=l("Faster optimizer"),c_=h(),gt=s("p"),r9=l("pytorch-nightly introduced "),zd=s("code"),i9=l("torch.optim._multi_tensor"),o9=l(" which should significantly speed up the optimizers for situations with lots of small feature tensors. It should eventually become the default, but if you want to experiment with it sooner and don\u2019t mind using the bleed-edge, see: "),mo=s("a"),l9=l("https://github.com/huggingface/transformers/issues/9965"),m_=h(),_t=s("h3"),Ka=s("a"),Nd=s("span"),m(uo.$$.fragment),n9=h(),Cd=s("span"),p9=l("Sparsity"),d_=h(),bt=s("h4"),es=s("a"),Ld=s("span"),m(vo.$$.fragment),h9=h(),Md=s("span"),f9=l("Mixture of Experts"),u_=h(),Vp=s("p"),c9=l(`Quite a few of the recent papers reported a 4-5x training speedup and a faster inference by integrating Mixture of Experts (MoE) into the Transformer models.`),v_=h(),Hp=s("p"),m9=l("Since it has been discovered that more parameters lead to better performance, this technique allows to increase the number of parameters by an order of magnitude without increasing training costs."),w_=h(),Fp=s("p"),d9=l("In this approach every other FFN layer is replaced with a MoE Layer which consists of many experts, with a gated function that trains each expert in a balanced way depending on the input token\u2019s position in a sequence."),g_=h(),Wp=s("p"),Rp=s("img"),__=h(),ts=s("p"),u9=l("(source: "),wo=s("a"),v9=l("GLAM"),w9=l(")"),b_=h(),Xp=s("p"),g9=l("You can find exhaustive details and comparison tables in the papers listed at the end of this section."),y_=h(),Yp=s("p"),_9=l("The main drawback of this approach is that it requires staggering amounts of GPU memory - almost an order of magnitude larger than its dense equivalent. Various distillation and approaches are proposed to how to overcome the much higher memory requirements."),E_=h(),Qp=s("p"),b9=l("There is direct trade-off though, you can use just a few experts with a 2-3x smaller base model instead of dozens or hundreds experts leading to a 5x smaller model and thus increase the training speed moderately while increasing the memory requirements moderately as well."),P_=h(),Jp=s("p"),y9=l("Most related papers and implementations are built around Tensorflow/TPUs:"),$_=h(),ge=s("ul"),Od=s("li"),go=s("a"),E9=l("GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding"),P9=h(),Bd=s("li"),_o=s("a"),$9=l("Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity"),k9=h(),qd=s("li"),bo=s("a"),A9=l("GLaM: Generalist Language Model (GLaM)"),k_=h(),$=s("p"),T9=l("And for Pytorch DeepSpeed has built one as well: "),yo=s("a"),j9=l("DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale"),x9=l(", "),Eo=s("a"),D9=l("Mixture of Experts"),G9=l(" - blog posts: "),Po=s("a"),I9=l("1"),U9=l(", "),$o=s("a"),S9=l("2"),z9=l(" and specific deployment with large transformer-based natural language generation models: "),ko=s("a"),N9=l("blog post"),C9=l(", "),Zp=s("a"),L9=l("Megatron-Deepspeed branch"),M9=l("."),A_=h(),yt=s("h3"),as=s("a"),Vd=s("span"),m(Ao.$$.fragment),O9=h(),Hd=s("span"),B9=l("Efficient Software Prebuilds"),T_=h(),ss=s("p"),q9=l("PyTorch\u2019s "),To=s("a"),V9=l("pip and conda builds"),H9=l(" come prebuit with the cuda toolkit which is enough to run PyTorch, but it is insufficient if you need to build cuda extensions."),j_=h(),rs=s("p"),F9=l("At times it may take an additional effort to pre-build some components, e.g., if you\u2019re using libraries like "),Fd=s("code"),W9=l("apex"),R9=l(" that don\u2019t come pre-compiled. In other situations figuring out how to install the right cuda toolkit system-wide can be complicated. To address these users\u2019 needs PyTorch and NVIDIA release a new version of NGC docker container which already comes with everything prebuilt and you just need to install your programs on it and it will run out of the box."),x_=h(),Kp=s("p"),X9=l("This approach is also useful if you want to tweak the pytorch source and/or make a new customized build."),D_=h(),_e=s("p"),Y9=l("To find the docker image version you want start "),jo=s("a"),Q9=l("here"),J9=l(", choose one of the latest monthly releases. Go into the release\u2019s notes for the desired release, check that the environment\u2019s components are matching your needs (including NVIDIA Driver requirements!) and then at the very top of that document go to the corresponding NGC page. If for some reason you get lost, here is "),xo=s("a"),Z9=l("the index of all PyTorch NGC images"),K9=l("."),G_=h(),eh=s("p"),eA=l("Next follow the instructions to download and deploy the docker image."),I_=h(),Et=s("h2"),is=s("a"),Wd=s("span"),m(Do.$$.fragment),tA=h(),Rd=s("span"),aA=l("Contribute"),U_=h(),th=s("p"),sA=l("This document is far from being complete and a lot more needs to be added, so if you have additions or corrections to make please don\u2019t hesitate to open a PR or if you aren\u2019t sure start an Issue and we can discuss the details there."),S_=h(),ah=s("p"),rA=l("When making contributions that A is better than B, please try to include a reproducible benchmark and/or a link to the source of that information (unless it comes directly from you)."),this.h()},l(e){const o=Zz('[data-svelte="svelte-1phssyn"]',document.head);y=r(o,"META",{name:!0,content:!0}),o.forEach(t),k=f(e),E=r(e,"H1",{class:!0});var Go=i(E);A=r(Go,"A",{id:!0,class:!0,href:!0});var Xd=i(A);Th=r(Xd,"SPAN",{});var TA=i(Th);d(Es.$$.fragment,TA),TA.forEach(t),Xd.forEach(t),zy=f(Go),jh=r(Go,"SPAN",{});var jA=i(jh);Ny=n(jA,"Performance and Scalability: How To Fit a Bigger Model and Train It Faster"),jA.forEach(t),Go.forEach(t),Zd=f(e),Io=r(e,"BLOCKQUOTE",{});var xA=i(Io);xh=r(xA,"P",{});var DA=i(xh);Dh=r(DA,"EM",{});var GA=i(Dh);Cy=n(GA,"Or how to escape the dreaded \u201CRuntimeError: CUDA error: out of memory\u201D error."),GA.forEach(t),DA.forEach(t),xA.forEach(t),Kd=f(e),d(Ps.$$.fragment,e),eu=f(e),te=r(e,"P",{});var sh=i(te);Ly=n(sh,"Training ever larger models can become challenging even on modern GPUs. Due to their immense size we often run out of GPU memory and training can take very long. In this section we have a look at a few tricks to reduce the memory footprint and speed up training for large models and how they are integrated in the "),Uo=r(sh,"A",{href:!0});var IA=i(Uo);My=n(IA,"Trainer"),IA.forEach(t),Oy=n(sh," and "),$s=r(sh,"A",{href:!0,rel:!0});var UA=i($s);By=n(UA,"\u{1F917} Accelerate"),UA.forEach(t),qy=n(sh,". Before we start make sure you have installed the following libraries:"),sh.forEach(t),tu=f(e),d(ks.$$.fragment,e),au=f(e),ae=r(e,"P",{});var rh=i(ae);Vy=n(rh,"The "),Gh=r(rh,"CODE",{});var SA=i(Gh);Hy=n(SA,"nvidia-ml-py3"),SA.forEach(t),Fy=n(rh," library allows us to monitor the memory usage of the models from within Python. You might be familiar with the "),Ih=r(rh,"CODE",{});var zA=i(Ih);Wy=n(zA,"nvidia-smi"),zA.forEach(t),Ry=n(rh," command in the terminal - this library allows to access the same information in Python directly."),rh.forEach(t),su=f(e),Pt=r(e,"P",{});var N_=i(Pt);Xy=n(N_,"Then we create some dummy data. We create random token IDs between 100 and 30000 and binary labels for a classifier. In total we get 512 sequences each with length 512 and store them in a "),As=r(N_,"A",{href:!0,rel:!0});var NA=i(As);Uh=r(NA,"CODE",{});var CA=i(Uh);Yy=n(CA,"Dataset"),CA.forEach(t),NA.forEach(t),Qy=n(N_," with PyTorch format."),N_.forEach(t),ru=f(e),d(Ts.$$.fragment,e),iu=f(e),$t=r(e,"P",{});var C_=i($t);Jy=n(C_,"We want to print some summary statistics for the GPU utilization and the training run with the "),So=r(C_,"A",{href:!0});var LA=i(So);Zy=n(LA,"Trainer"),LA.forEach(t),Ky=n(C_,". We setup a two helper functions to do just that:"),C_.forEach(t),ou=f(e),d(js.$$.fragment,e),lu=f(e),zo=r(e,"P",{});var MA=i(zo);e2=n(MA,"Let\u2019s verify that we start with a free GPU memory:"),MA.forEach(t),nu=f(e),d(xs.$$.fragment,e),pu=f(e),No=r(e,"P",{});var OA=i(No);t2=n(OA,"That looks good: the GPU memory is not occupied as we would expect before we load any models. If that\u2019s not the case on your machine make sure to stop all processes that are using GPU memory. However, not all free GPU memory can be used by the user. When a model is loaded to the GPU also the kernels are loaded which can take up 1-2GB of memory. To see how much it is we load a tiny tensor into the GPU which triggers the kernels to be loaded as well."),OA.forEach(t),hu=f(e),d(Ds.$$.fragment,e),fu=f(e),Co=r(e,"P",{});var BA=i(Co);a2=n(BA,"We see that the kernels alone take up 1.3GB of GPU memory. Now let\u2019s see how much space the model uses."),BA.forEach(t),cu=f(e),Pe=r(e,"H2",{class:!0});var L_=i(Pe);kt=r(L_,"A",{id:!0,class:!0,href:!0});var qA=i(kt);Sh=r(qA,"SPAN",{});var VA=i(Sh);d(Gs.$$.fragment,VA),VA.forEach(t),qA.forEach(t),s2=f(L_),zh=r(L_,"SPAN",{});var HA=i(zh);r2=n(HA,"Load Model"),HA.forEach(t),L_.forEach(t),mu=f(e),At=r(e,"P",{});var M_=i(At);i2=n(M_,"First, we load the "),Nh=r(M_,"CODE",{});var FA=i(Nh);o2=n(FA,"bert-large-uncased"),FA.forEach(t),l2=n(M_," model. We load the model weights directly to the GPU so that we can check how much space just weights use."),M_.forEach(t),du=f(e),d(Is.$$.fragment,e),uu=f(e),Tt=r(e,"P",{});var O_=i(Tt);n2=n(O_,"We can see that the model weights alone take up 1.3 GB of the GPU memory. The exact number depends on the specific GPU you are using. Note that on newer GPUs a model can sometimes take up more space since the weights are loaded in an optimized fashion that speeds up the usage of the model. Now we can also quickly check if we get the same result as with "),Ch=r(O_,"CODE",{});var WA=i(Ch);p2=n(WA,"nvidia-smi"),WA.forEach(t),h2=n(O_," CLI:"),O_.forEach(t),vu=f(e),d(Us.$$.fragment,e),wu=f(e),d(Ss.$$.fragment,e),gu=f(e),Lo=r(e,"P",{});var RA=i(Lo);f2=n(RA,"We get the same number as before and you can also see that we are using a V100 GPU with 16GB of memory. So now we can start training the model and see how the GPU memory consumption changes. First, we set up a few standard training arguments that we will use across all our experiments:"),RA.forEach(t),_u=f(e),d(zs.$$.fragment,e),bu=f(e),d(jt.$$.fragment,e),yu=f(e),$e=r(e,"H2",{class:!0});var B_=i($e);xt=r(B_,"A",{id:!0,class:!0,href:!0});var XA=i(xt);Lh=r(XA,"SPAN",{});var YA=i(Lh);d(Ns.$$.fragment,YA),YA.forEach(t),XA.forEach(t),c2=f(B_),Mh=r(B_,"SPAN",{});var QA=i(Mh);m2=n(QA,"Vanilla Training"),QA.forEach(t),B_.forEach(t),Eu=f(e),Dt=r(e,"P",{});var q_=i(Dt);d2=n(q_,"As a first experiment we will use the "),Mo=r(q_,"A",{href:!0});var JA=i(Mo);u2=n(JA,"Trainer"),JA.forEach(t),v2=n(q_," and train the model without any further modifications and a batch size of 4:"),q_.forEach(t),Pu=f(e),d(Cs.$$.fragment,e),$u=f(e),d(Ls.$$.fragment,e),ku=f(e),Oo=r(e,"P",{});var ZA=i(Oo);w2=n(ZA,"We see that already a relatively small batch size almost fills up our GPU\u2019s entire memory. However, a larger batch size can often result in faster model convergence or better end performance. So ideally we want to tune the batch size to our model\u2019s needs and not to the GPU limitations. A simple trick to effectively train larger batch size is gradient accumulation."),ZA.forEach(t),Au=f(e),ke=r(e,"H2",{class:!0});var V_=i(ke);Gt=r(V_,"A",{id:!0,class:!0,href:!0});var KA=i(Gt);Oh=r(KA,"SPAN",{});var eT=i(Oh);d(Ms.$$.fragment,eT),eT.forEach(t),KA.forEach(t),g2=f(V_),Bh=r(V_,"SPAN",{});var tT=i(Bh);_2=n(tT,"Gradient Accumulation"),tT.forEach(t),V_.forEach(t),Tu=f(e),Bo=r(e,"P",{});var aT=i(Bo);b2=n(aT,"The idea behind gradient accumulation is to instead of calculating the gradients for the whole batch at once to do it in smaller steps. The way we do that is to calculate the gradients iteratively in smaller batches by doing a forward and backward pass through the model and accumulating the gradients in the process. When enough gradients are accumulated we run the model\u2019s optimization step. This way we can easily increase the overall batch size to numbers that would never fit into the GPU\u2019s memory. In turn, however, the added forward and backward passes can slow down the training a bit."),aT.forEach(t),ju=f(e),N=r(e,"P",{});var os=i(N);y2=n(os,"We can use gradient accumulation in the "),qo=r(os,"A",{href:!0});var sT=i(qo);E2=n(sT,"Trainer"),sT.forEach(t),P2=n(os," by simply adding the "),qh=r(os,"CODE",{});var rT=i(qh);$2=n(rT,"gradient_accumulation_steps"),rT.forEach(t),k2=n(os," argument to "),Vo=r(os,"A",{href:!0});var iT=i(Vo);A2=n(iT,"TrainingArguments"),iT.forEach(t),T2=n(os,". Let\u2019s see how it impacts the models memory footprint:"),os.forEach(t),xu=f(e),d(Os.$$.fragment,e),Du=f(e),d(Bs.$$.fragment,e),Gu=f(e),G=r(e,"P",{});var be=i(G);j2=n(be,"We can see that the memory footprint was dramatically reduced at the cost of being only slightly slower than the vanilla run. Of course, this would change as you increase the number of accumulation steps. In general you would want to max out the GPU usage as much as possible. So in our case, the batch_size of 4 was already pretty close to the GPU\u2019s limit. If we wanted to train with a batch size of 64 we should not use "),Vh=r(be,"CODE",{});var oT=i(Vh);x2=n(oT,"per_device_train_batch_size=1"),oT.forEach(t),D2=n(be," and "),Hh=r(be,"CODE",{});var lT=i(Hh);G2=n(lT,"gradient_accumulation_steps=64"),lT.forEach(t),I2=n(be," but instead "),Fh=r(be,"CODE",{});var nT=i(Fh);U2=n(nT,"per_device_train_batch_size=4"),nT.forEach(t),S2=n(be," and "),Wh=r(be,"CODE",{});var pT=i(Wh);z2=n(pT,"gradient_accumulation_steps=16"),pT.forEach(t),N2=n(be," which has the same effective batch size while making better use of the available GPU resources."),be.forEach(t),Iu=f(e),Ho=r(e,"P",{});var hT=i(Ho);C2=n(hT,"Next we have a look at another trick to save a little bit more GPU memory called gradient checkpointing."),hT.forEach(t),Uu=f(e),Ae=r(e,"H2",{class:!0});var H_=i(Ae);It=r(H_,"A",{id:!0,class:!0,href:!0});var fT=i(It);Rh=r(fT,"SPAN",{});var cT=i(Rh);d(qs.$$.fragment,cT),cT.forEach(t),fT.forEach(t),L2=f(H_),Xh=r(H_,"SPAN",{});var mT=i(Xh);M2=n(mT,"Gradient Checkpointing"),mT.forEach(t),H_.forEach(t),Su=f(e),Fo=r(e,"P",{});var dT=i(Fo);O2=n(dT,"Even when we set the batch size to 1 and use gradient accumulation we can still run out of memory when working with large models. In order to compute the gradients during the backward pass all activations from the forward pass are normally saved. This can create a big memory overhead. Alternatively, one could forget all activations during the forward pass and recompute them on demand during the backward pass. This would however add a significant computational overhead and slow down training."),dT.forEach(t),zu=f(e),Ut=r(e,"P",{});var F_=i(Ut);B2=n(F_,"Gradient checkpointing strikes a compromise between the two approaches and saves strategically selected activations throughout the computational graph so only a fraction of the activations need to be re-computed for the gradients. See "),Vs=r(F_,"A",{href:!0,rel:!0});var uT=i(Vs);q2=n(uT,"this great article"),uT.forEach(t),V2=n(F_," explaining the ideas behind gradient checkpointing."),F_.forEach(t),Nu=f(e),se=r(e,"P",{});var ih=i(se);H2=n(ih,"To enable gradient checkpointing in the "),Wo=r(ih,"A",{href:!0});var vT=i(Wo);F2=n(vT,"Trainer"),vT.forEach(t),W2=n(ih," we only need ot pass it as a flag to the "),Ro=r(ih,"A",{href:!0});var wT=i(Ro);R2=n(wT,"TrainingArguments"),wT.forEach(t),X2=n(ih,". Everything else is handled under the hood:"),ih.forEach(t),Cu=f(e),d(Hs.$$.fragment,e),Lu=f(e),d(Fs.$$.fragment,e),Mu=f(e),Xo=r(e,"P",{});var gT=i(Xo);Y2=n(gT,"We can see that this saved some more memory but at the same time training became a bit slower. A general rule of thumb is that gradient checkpointing slows down training by about 20%. Let\u2019s have a look at another method with which we can regain some speed: mixed precision training."),gT.forEach(t),Ou=f(e),Te=r(e,"H2",{class:!0});var W_=i(Te);St=r(W_,"A",{id:!0,class:!0,href:!0});var _T=i(St);Yh=r(_T,"SPAN",{});var bT=i(Yh);d(Ws.$$.fragment,bT),bT.forEach(t),_T.forEach(t),Q2=f(W_),Qh=r(W_,"SPAN",{});var yT=i(Qh);J2=n(yT,"FP16 Training"),yT.forEach(t),W_.forEach(t),Bu=f(e),re=r(e,"P",{});var oh=i(re);Z2=n(oh,"The idea of mixed precision training is that no all variables need to be stored in full (32-bit) floating point precision. If we can reduce the precision the variales and their computations are faster. The main advantage comes from saving the activations in half (16-bit) precision. Although the gradients are also computed in half precision they are converted back to full precision for the optimization step so no memory is saved here. Since the model is present on the GPU in both 16-bit and 32-bit precision this can use more GPU memory (1.5x the original model is on the GPU), especially for small batch sizes. Since some computations are performed in full and some in half precision this approach is also called mixed precision training. Enabling mixed precision training is also just a matter of setting the "),Jh=r(oh,"CODE",{});var ET=i(Jh);K2=n(ET,"fp16"),ET.forEach(t),e0=n(oh," flag to "),Zh=r(oh,"CODE",{});var PT=i(Zh);t0=n(PT,"True"),PT.forEach(t),a0=n(oh,":"),oh.forEach(t),qu=f(e),d(Rs.$$.fragment,e),Vu=f(e),d(Xs.$$.fragment,e),Hu=f(e),Yo=r(e,"P",{});var $T=i(Yo);s0=n($T,"We can see that this is almost twice as fast as the vanilla training. Let\u2019s add it to the mix of the previous methods:"),$T.forEach(t),Fu=f(e),d(Ys.$$.fragment,e),Wu=f(e),d(Qs.$$.fragment,e),Ru=f(e),Qo=r(e,"P",{});var kT=i(Qo);r0=n(kT,"We can see that with these tweaks we use about half the GPU memory as at the beginning while also being slightly faster. But we are not done, yet! There is another area where we can save GPU memory: the optimizer."),kT.forEach(t),Xu=f(e),je=r(e,"H2",{class:!0});var R_=i(je);zt=r(R_,"A",{id:!0,class:!0,href:!0});var AT=i(zt);Kh=r(AT,"SPAN",{});var TT=i(Kh);d(Js.$$.fragment,TT),TT.forEach(t),AT.forEach(t),i0=f(R_),ef=r(R_,"SPAN",{});var jT=i(ef);o0=n(jT,"Optimizer"),jT.forEach(t),R_.forEach(t),Yu=f(e),Jo=r(e,"P",{});var xT=i(Jo);l0=n(xT,"The most common optimizer used to train transformer model is Adam or AdamW (Adam with weight decay). Adam achieves good convergence by storing the rolling average of the previous gradients which, however, adds an additional memory footprint of the order of the number of model parameters. One remedy to this is to use an alternative optimizer such as Adafactor."),xT.forEach(t),Qu=f(e),xe=r(e,"H3",{class:!0});var X_=i(xe);Nt=r(X_,"A",{id:!0,class:!0,href:!0});var DT=i(Nt);tf=r(DT,"SPAN",{});var GT=i(tf);d(Zs.$$.fragment,GT),GT.forEach(t),DT.forEach(t),n0=f(X_),af=r(X_,"SPAN",{});var IT=i(af);p0=n(IT,"Adafactor"),IT.forEach(t),X_.forEach(t),Ju=f(e),Ct=r(e,"P",{});var Y_=i(Ct);h0=n(Y_,"Instead of keeping the rolling average for each element in the weight matrices Adafactor only stores aggregated information (row- and column-wise sums of the rolling averages) which reduces the footprint considerably. One downside of Adafactor is that in some instances convergence can be slower than Adam\u2019s so some experimentation is advised here. We can use Adafactor simply by setting "),sf=r(Y_,"CODE",{});var UT=i(sf);f0=n(UT,'optim="adafactor"'),UT.forEach(t),c0=n(Y_,":"),Y_.forEach(t),Zu=f(e),d(Ks.$$.fragment,e),Ku=f(e),d(er.$$.fragment,e),ev=f(e),Zo=r(e,"P",{});var ST=i(Zo);m0=n(ST,"We can see that this saves a few more GB on the GPU. Let\u2019s see how it looks when we add it to the other methods we introduced earlier:"),ST.forEach(t),tv=f(e),d(tr.$$.fragment,e),av=f(e),d(ar.$$.fragment,e),sv=f(e),Ko=r(e,"P",{});var zT=i(Ko);d0=n(zT,"We went from 15 GB memory usage to 5 GB - a 3x improvement while maintaining the throughput! However, as mentioned before, the convergence of Adafactor can be worse than Adam. There is an alternative to Adafactor called 8-bit Adam that takes a slightly different approach."),zT.forEach(t),rv=f(e),De=r(e,"H3",{class:!0});var Q_=i(De);Lt=r(Q_,"A",{id:!0,class:!0,href:!0});var NT=i(Lt);rf=r(NT,"SPAN",{});var CT=i(rf);d(sr.$$.fragment,CT),CT.forEach(t),NT.forEach(t),u0=f(Q_),of=r(Q_,"SPAN",{});var LT=i(of);v0=n(LT,"8-bit Adam"),LT.forEach(t),Q_.forEach(t),iv=f(e),el=r(e,"P",{});var MT=i(el);w0=n(MT,"Instead of aggregating optimizer states like Adafactor, 8-bit Adam keeps the full state and quantizes it. Quantization means that it stores the state with lower precision and dequantizes it only for the optimization. This is similar to the idea behind FP16 training where using variables with lower precision saves memory."),MT.forEach(t),ov=f(e),I=r(e,"P",{});var ye=i(I);g0=n(ye,"In contrast to the previous approaches is this one not integrated into the "),tl=r(ye,"A",{href:!0});var OT=i(tl);_0=n(OT,"Trainer"),OT.forEach(t),b0=n(ye," as a simple flag. We need to install the 8-bit optimizer and then pass it as a custom optimizer to the "),al=r(ye,"A",{href:!0});var BT=i(al);y0=n(BT,"Trainer"),BT.forEach(t),E0=n(ye,". Follow the installation guide in the Github "),rr=r(ye,"A",{href:!0,rel:!0});var qT=i(rr);P0=n(qT,"repo"),qT.forEach(t),$0=n(ye," to install the "),lf=r(ye,"CODE",{});var VT=i(lf);k0=n(VT,"bitsandbytes"),VT.forEach(t),A0=n(ye," library that implements the 8-bit Adam optimizer."),ye.forEach(t),lv=f(e),sl=r(e,"P",{});var HT=i(sl);T0=n(HT,"Once installed, we just need to initialize the the optimizer. Although this looks like a considerable amount of work it actually just involves two steps: first we need to group the model\u2019s parameters into two groups where to one group we apply weight decay and to the other we don\u2019t. Usually, biases and layer norm parameters are not weight decayed. Then in a second step we just do some argument housekeeping to use the same parameters as the previously used AdamW optimizer."),HT.forEach(t),nv=f(e),d(Mt.$$.fragment,e),pv=f(e),d(ir.$$.fragment,e),hv=f(e),Ot=r(e,"P",{});var J_=i(Ot);j0=n(J_,"We can now pass the custom optimizer as an argument to the "),nf=r(J_,"CODE",{});var FT=i(nf);x0=n(FT,"Trainer"),FT.forEach(t),D0=n(J_,":"),J_.forEach(t),fv=f(e),d(or.$$.fragment,e),cv=f(e),d(lr.$$.fragment,e),mv=f(e),rl=r(e,"P",{});var WT=i(rl);G0=n(WT,"We can see that we get a similar memory improvement as with Adafactor while keeping the full rolling average of the gradients. Let\u2019s repeat the experiment with the full settings:"),WT.forEach(t),dv=f(e),d(nr.$$.fragment,e),uv=f(e),d(pr.$$.fragment,e),vv=f(e),il=r(e,"P",{});var RT=i(il);I0=n(RT,"Again, we get about a 3x memory improvement and even slightly higher throughput as using Adafactor. So we have seen how we can optimize the memory footprint of large models. The following plot summarizes all our experiments:"),RT.forEach(t),wv=f(e),ol=r(e,"P",{});var XT=i(ol);ll=r(XT,"IMG",{src:!0,alt:!0}),XT.forEach(t),gv=f(e),Ge=r(e,"H2",{class:!0});var Z_=i(Ge);Bt=r(Z_,"A",{id:!0,class:!0,href:!0});var YT=i(Bt);pf=r(YT,"SPAN",{});var QT=i(pf);d(hr.$$.fragment,QT),QT.forEach(t),YT.forEach(t),U0=f(Z_),hf=r(Z_,"SPAN",{});var JT=i(hf);S0=n(JT,"Using \u{1F917} Accelerate"),JT.forEach(t),Z_.forEach(t),_v=f(e),ie=r(e,"P",{});var lh=i(ie);z0=n(lh,"So far we have used the "),nl=r(lh,"A",{href:!0});var ZT=i(nl);N0=n(ZT,"Trainer"),ZT.forEach(t),C0=n(lh," to run the experiments but a more flexible alternative to that approach is to use \u{1F917} Accelerate. With \u{1F917} Accelerate you have full control over the training loop and can essentially write the loop in pure PyTorch with some minor modifications. In turn it allows you to easily scale across different infrastructures such as CPUs, GPUs, TPUs, or distributed multi-GPU setups without changing any code. Let\u2019s see what it takes to implement all of the above tweaks in \u{1F917} Accelerate. We can still use the "),pl=r(lh,"A",{href:!0});var KT=i(pl);L0=n(KT,"TrainingArguments"),KT.forEach(t),M0=n(lh," to wrap the training settings:"),lh.forEach(t),bv=f(e),d(fr.$$.fragment,e),yv=f(e),hl=r(e,"P",{});var ej=i(hl);O0=n(ej,"The full example training loop with \u{1F917} Accelerate is only a handful of lines of code long:"),ej.forEach(t),Ev=f(e),d(cr.$$.fragment,e),Pv=f(e),T=r(e,"P",{});var F=i(T);B0=n(F,"First we wrap the dataset in a "),mr=r(F,"A",{href:!0,rel:!0});var tj=i(mr);ff=r(tj,"CODE",{});var aj=i(ff);q0=n(aj,"DataLoader"),aj.forEach(t),tj.forEach(t),V0=n(F,". Then we can enable gradient checkpointing by calling the model\u2019s "),fl=r(F,"A",{href:!0});var sj=i(fl);H0=n(sj,"gradient_checkpointing_enable()"),sj.forEach(t),F0=n(F," method. When we initialize the "),dr=r(F,"A",{href:!0,rel:!0});var rj=i(dr);cf=r(rj,"CODE",{});var ij=i(cf);W0=n(ij,"Accelerator"),ij.forEach(t),rj.forEach(t),R0=n(F," we can specifiy if we want to use mixed precision training and it will take care of it for us in the "),mf=r(F,"CODE",{});var oj=i(mf);X0=n(oj,"prepare"),oj.forEach(t),Y0=n(F," call. During the "),ur=r(F,"A",{href:!0,rel:!0});var lj=i(ur);df=r(lj,"CODE",{});var nj=i(df);Q0=n(nj,"prepare"),nj.forEach(t),lj.forEach(t),J0=n(F," call the dataloader will also be distributed across workers should we use multiple GPUs. We use the same 8-bit optimizer from the earlier experiments."),F.forEach(t),$v=f(e),qt=r(e,"P",{});var K_=i(qt);Z0=n(K_,"Finally, we can write the main training loop. Note that the "),uf=r(K_,"CODE",{});var pj=i(uf);K0=n(pj,"backward"),pj.forEach(t),e3=n(K_," call is handled by \u{1F917} Accelerate. We can also see how gradient accumulation works: we normalize the loss so we get the average at the end of accumulation and once we have enough steps we run the optimization. Now the question is: does this use the same amount of memory as the previous steps? Let\u2019s check:"),K_.forEach(t),kv=f(e),d(vr.$$.fragment,e),Av=f(e),cl=r(e,"P",{});var hj=i(cl);t3=n(hj,"Indeed it does. Implementing these optimization techniques with \u{1F917} Accelerate only takes a handful of lines of code and comes with the benefit of more flexiblity in the training loop."),hj.forEach(t),Tv=f(e),ml=r(e,"P",{});var fj=i(ml);a3=n(fj,"Now, let\u2019s take a step back and discuss what we should optimize for when scaling the training of large models."),fj.forEach(t),jv=f(e),Ie=r(e,"H2",{class:!0});var eb=i(Ie);Vt=r(eb,"A",{id:!0,class:!0,href:!0});var cj=i(Vt);vf=r(cj,"SPAN",{});var mj=i(vf);d(wr.$$.fragment,mj),mj.forEach(t),cj.forEach(t),s3=f(eb),wf=r(eb,"SPAN",{});var dj=i(wf);r3=n(dj,"How to scale"),dj.forEach(t),eb.forEach(t),xv=f(e),dl=r(e,"P",{});var uj=i(dl);i3=n(uj,"When we train models there are a two aspects we want to optimize at the same time:"),uj.forEach(t),Dv=f(e),Ht=r(e,"UL",{});var tb=i(Ht);gf=r(tb,"LI",{});var vj=i(gf);o3=n(vj,"Data throughput/training time"),vj.forEach(t),l3=f(tb),_f=r(tb,"LI",{});var wj=i(_f);n3=n(wj,"Model performance"),wj.forEach(t),tb.forEach(t),Gv=f(e),ul=r(e,"P",{});var gj=i(ul);p3=n(gj,"We have seen that each method changes the memory usage and throughput. In general we want to maximize the throughput (samples/second) to minimize the training cost. This is generally achieved by utilizing the GPU as much as possible and thus filling GPU memory to its limit. For example, as mentioned earlier, we only employ gradient accumulation when we want to use a batch size beyond the size of the GPU memory. If the desired batch size fits into memory then there is no reason to apply gradient accumulation which will only slow down training."),gj.forEach(t),Iv=f(e),vl=r(e,"P",{});var _j=i(vl);h3=n(_j,"The second objective is model performance. Just because we can does not mean we should use a large batch size. As part of hyperparameter tuning you should determine which batch size yields the best result and then optimize the throughput accordingly."),_j.forEach(t),Uv=f(e),wl=r(e,"P",{});var bj=i(wl);f3=n(bj,"Sometimes, even when applying all the above tweaks the throughput on a given GPU might still not be good enough. One easy solution is to change the type of GPU. For example switching from let\u2019s say a K80 (which you typically get on Google Colab) to a fancier GPU such as the V100 or A100. Although they are more expensive they are usually more cost effective than cheaper GPUs due to their larger memory and faster architecture. For some applications, such as pretraining, this might still not be fast enough. In this case you want to scale your experiment to several GPUs."),bj.forEach(t),Sv=f(e),Ue=r(e,"H2",{class:!0});var ab=i(Ue);Ft=r(ab,"A",{id:!0,class:!0,href:!0});var yj=i(Ft);bf=r(yj,"SPAN",{});var Ej=i(bf);d(gr.$$.fragment,Ej),Ej.forEach(t),yj.forEach(t),c3=f(ab),yf=r(ab,"SPAN",{});var Pj=i(yf);m3=n(Pj,"Multi-GPU Training"),Pj.forEach(t),ab.forEach(t),zv=f(e),C=r(e,"P",{});var ls=i(C);d3=n(ls,"If your model fits on a single GPU scaling to many GPUs can be achieved fairly easily with data parallelism. The idea is very similar to gradient accumulation with the distinction that instead of running the forward and backward passes during the accumulation in sequence on a single machine they are performed in parallel on multiple machines. So each GPU gets a small batch, runs the forward and backward passes and then the gradients from all machines are aggregated and the model is optimized. You can combine this with all the methods we described before. For example, if you have 4 GPUs and use "),Ef=r(ls,"CODE",{});var $j=i(Ef);u3=n($j,"per_device_train_batch_size=12"),$j.forEach(t),v3=n(ls," and "),Pf=r(ls,"CODE",{});var kj=i(Pf);w3=n(kj,"gradient_accumulation_steps=3"),kj.forEach(t),g3=n(ls," you will have an effective batch size of "),$f=r(ls,"CODE",{});var Aj=i($f);_3=n(Aj,"4*12*3=144"),Aj.forEach(t),b3=n(ls,"."),ls.forEach(t),Nv=f(e),L=r(e,"P",{});var ns=i(L);y3=n(ns,"The "),gl=r(ns,"A",{href:!0});var Tj=i(gl);E3=n(Tj,"Trainer"),Tj.forEach(t),P3=n(ns," allows for distributed training and if you execute your "),_l=r(ns,"A",{href:!0});var jj=i(_l);$3=n(jj,"Trainer"),jj.forEach(t),k3=n(ns," training script on a machine with multiple GPUs it will automatically utilize all of them, hence the name "),kf=r(ns,"CODE",{});var xj=i(kf);A3=n(xj,"per_device_train_batch_size"),xj.forEach(t),T3=n(ns,". In \u{1F917} Accelerate you can configure the infrastructure setup with the following command:"),ns.forEach(t),Cv=f(e),d(_r.$$.fragment,e),Lv=f(e),bl=r(e,"P",{});var Dj=i(bl);j3=n(Dj,"Until now we have opperated under the assumption that we can fit the model onto a single GPU without or with the introduced tricks . But what if this is not possible? We still have a few tricks up our sleeves!"),Dj.forEach(t),Mv=f(e),Se=r(e,"H2",{class:!0});var sb=i(Se);Wt=r(sb,"A",{id:!0,class:!0,href:!0});var Gj=i(Wt);Af=r(Gj,"SPAN",{});var Ij=i(Af);d(br.$$.fragment,Ij),Ij.forEach(t),Gj.forEach(t),x3=f(sb),Tf=r(sb,"SPAN",{});var Uj=i(Tf);D3=n(Uj,"What if my model still does not fit?"),Uj.forEach(t),sb.forEach(t),Ov=f(e),Rt=r(e,"P",{});var rb=i(Rt);G3=n(rb,"If the model does not fit on a single GPU with all the mentioned tricks there are still more methods we can apply although life starts to get a bit more complicated. This usually involves some form of pipeline or tensor parallelism where the model itself is distributed across several GPUs. One can also make use of DeepSpeed which implements some of these parallelism strategies along with some more optimization to reduce the memory footprint such as partitioning the optimizer states. You can read more about this in the "),yl=r(rb,"A",{href:!0});var Sj=i(yl);I3=n(Sj,"\u201CModel Parallelism\u201D section"),Sj.forEach(t),U3=n(rb,"."),rb.forEach(t),Bv=f(e),El=r(e,"P",{});var zj=i(El);S3=n(zj,"This concludes the practical part of this guide for scaling the training of large models. The following section goes into more details on some of the aspects discussed above."),zj.forEach(t),qv=f(e),ze=r(e,"H2",{class:!0});var ib=i(ze);Xt=r(ib,"A",{id:!0,class:!0,href:!0});var Nj=i(Xt);jf=r(Nj,"SPAN",{});var Cj=i(jf);d(yr.$$.fragment,Cj),Cj.forEach(t),Nj.forEach(t),z3=f(ib),xf=r(ib,"SPAN",{});var Lj=i(xf);N3=n(Lj,"Further discussions"),Lj.forEach(t),ib.forEach(t),Vv=f(e),Pl=r(e,"P",{});var Mj=i(Pl);C3=n(Mj,"This section gives brief ideas on how to make training faster and support bigger models. Later sections will expand, demonstrate and elucidate each of these."),Mj.forEach(t),Hv=f(e),Ne=r(e,"H2",{class:!0});var ob=i(Ne);Yt=r(ob,"A",{id:!0,class:!0,href:!0});var Oj=i(Yt);Df=r(Oj,"SPAN",{});var Bj=i(Df);d(Er.$$.fragment,Bj),Bj.forEach(t),Oj.forEach(t),L3=f(ob),Gf=r(ob,"SPAN",{});var qj=i(Gf);M3=n(qj,"Faster Training"),qj.forEach(t),ob.forEach(t),Fv=f(e),$l=r(e,"P",{});var Vj=i($l);O3=n(Vj,"Hardware:"),Vj.forEach(t),Wv=f(e),kl=r(e,"UL",{});var Hj=i(kl);Al=r(Hj,"LI",{});var iA=i(Al);B3=n(iA,"fast connectivity between GPUs"),Pr=r(iA,"UL",{});var lb=i(Pr);If=r(lb,"LI",{});var Fj=i(If);q3=n(Fj,"intra-node: NVLink"),Fj.forEach(t),V3=f(lb),Uf=r(lb,"LI",{});var Wj=i(Uf);H3=n(Wj,"inter-node: Infiniband / Intel OPA"),Wj.forEach(t),lb.forEach(t),iA.forEach(t),Hj.forEach(t),Rv=f(e),Tl=r(e,"P",{});var Rj=i(Tl);F3=n(Rj,"Software:"),Rj.forEach(t),Xv=f(e),Qt=r(e,"UL",{});var nb=i(Qt);Sf=r(nb,"LI",{});var Xj=i(Sf);W3=n(Xj,"Data Parallel / Distributed Data Parallel"),Xj.forEach(t),R3=f(nb),zf=r(nb,"LI",{});var Yj=i(zf);X3=n(Yj,"fp16 (autocast caching)"),Yj.forEach(t),nb.forEach(t),Yv=f(e),Ce=r(e,"H2",{class:!0});var pb=i(Ce);Jt=r(pb,"A",{id:!0,class:!0,href:!0});var Qj=i(Jt);Nf=r(Qj,"SPAN",{});var Jj=i(Nf);d($r.$$.fragment,Jj),Jj.forEach(t),Qj.forEach(t),Y3=f(pb),Cf=r(pb,"SPAN",{});var Zj=i(Cf);Q3=n(Zj,"Bigger Models"),Zj.forEach(t),pb.forEach(t),Qv=f(e),jl=r(e,"P",{});var Kj=i(jl);J3=n(Kj,"Hardware:"),Kj.forEach(t),Jv=f(e),oe=r(e,"UL",{});var nh=i(oe);Lf=r(nh,"LI",{});var ex=i(Lf);Z3=n(ex,"bigger GPUs"),ex.forEach(t),K3=f(nh),Mf=r(nh,"LI",{});var tx=i(Mf);e6=n(tx,"more GPUs"),tx.forEach(t),t6=f(nh),kr=r(nh,"LI",{});var hb=i(kr);a6=n(hb,"more CPU and NVMe (offloaded to by "),xl=r(hb,"A",{href:!0});var ax=i(xl);s6=n(ax,"DeepSpeed-Infinity"),ax.forEach(t),r6=n(hb,")"),hb.forEach(t),nh.forEach(t),Zv=f(e),Dl=r(e,"P",{});var sx=i(Dl);i6=n(sx,"Software:"),sx.forEach(t),Kv=f(e),P=r(e,"UL",{});var S=i(P);Of=r(S,"LI",{});var rx=i(Of);o6=n(rx,"Model Scalability (ZeRO and 3D Parallelism)"),rx.forEach(t),l6=f(S),Bf=r(S,"LI",{});var ix=i(Bf);n6=n(ix,"Low-memory Optimizers"),ix.forEach(t),p6=f(S),qf=r(S,"LI",{});var ox=i(qf);h6=n(ox,"fp16/bf16 (smaller data/faster throughput)"),ox.forEach(t),f6=f(S),Vf=r(S,"LI",{});var lx=i(Vf);c6=n(lx,"tf32 (faster throughput)"),lx.forEach(t),m6=f(S),Hf=r(S,"LI",{});var nx=i(Hf);d6=n(nx,"Gradient accumulation"),nx.forEach(t),u6=f(S),Ff=r(S,"LI",{});var px=i(Ff);v6=n(px,"Gradient checkpointing"),px.forEach(t),w6=f(S),Wf=r(S,"LI",{});var hx=i(Wf);g6=n(hx,"Sparsity"),hx.forEach(t),S.forEach(t),ew=f(e),Le=r(e,"H2",{class:!0});var fb=i(Le);Zt=r(fb,"A",{id:!0,class:!0,href:!0});var fx=i(Zt);Rf=r(fx,"SPAN",{});var cx=i(Rf);d(Ar.$$.fragment,cx),cx.forEach(t),fx.forEach(t),_6=f(fb),Xf=r(fb,"SPAN",{});var mx=i(Xf);b6=n(mx,"Hardware"),mx.forEach(t),fb.forEach(t),tw=f(e),Me=r(e,"H3",{class:!0});var cb=i(Me);Kt=r(cb,"A",{id:!0,class:!0,href:!0});var dx=i(Kt);Yf=r(dx,"SPAN",{});var ux=i(Yf);d(Tr.$$.fragment,ux),ux.forEach(t),dx.forEach(t),y6=f(cb),Qf=r(cb,"SPAN",{});var vx=i(Qf);E6=n(vx,"Power and Cooling"),vx.forEach(t),cb.forEach(t),aw=f(e),Gl=r(e,"P",{});var wx=i(Gl);P6=n(wx,"If you bought an expensive high end GPU make sure you give it the correct power and sufficient cooling."),wx.forEach(t),sw=f(e),jr=r(e,"P",{});var oA=i(jr);Jf=r(oA,"STRONG",{});var gx=i(Jf);$6=n(gx,"Power"),gx.forEach(t),k6=n(oA,":"),oA.forEach(t),rw=f(e),Il=r(e,"P",{});var _x=i(Il);A6=n(_x,"Some high end consumer GPU cards have 2 and sometimes 3 PCI-E 8-Pin power sockets. Make sure you have as many independent 12V PCI-E 8-Pin cables plugged into the card as there are sockets. Do not use the 2 splits at one end of the same cable (also known as pigtail cable). That is if you have 2 sockets on the GPU, you want 2 PCI-E 8-Pin cables going from your PSU to the card and not one that has 2 PCI-E 8-Pin connectors at the end! You won\u2019t get the full performance out of your card otherwise."),_x.forEach(t),iw=f(e),Ul=r(e,"P",{});var bx=i(Ul);T6=n(bx,"Each PCI-E 8-Pin power cable needs to be plugged into a 12V rail on the PSU side and can supply up to 150W of power."),bx.forEach(t),ow=f(e),Sl=r(e,"P",{});var yx=i(Sl);j6=n(yx,"Some other cards may use a PCI-E 12-Pin connectors, and these can deliver up to 500-600W of power."),yx.forEach(t),lw=f(e),zl=r(e,"P",{});var Ex=i(zl);x6=n(Ex,"Low end cards may use 6-Pin connectors, which supply up to 75W of power."),Ex.forEach(t),nw=f(e),Nl=r(e,"P",{});var Px=i(Nl);D6=n(Px,"Additionally you want the high-end PSU that has stable voltage. Some lower quality ones may not give the card the stable voltage it needs to function at its peak."),Px.forEach(t),pw=f(e),Cl=r(e,"P",{});var $x=i(Cl);G6=n($x,"And of course the PSU needs to have enough unused Watts to power the card."),$x.forEach(t),hw=f(e),xr=r(e,"P",{});var lA=i(xr);Zf=r(lA,"STRONG",{});var kx=i(Zf);I6=n(kx,"Cooling"),kx.forEach(t),U6=n(lA,":"),lA.forEach(t),fw=f(e),Ll=r(e,"P",{});var Ax=i(Ll);S6=n(Ax,"When a GPU gets overheated it would start throttling down and will not deliver full performance. And it will shutdown if it gets too hot."),Ax.forEach(t),cw=f(e),Ml=r(e,"P",{});var Tx=i(Ml);z6=n(Tx,"It\u2019s hard to tell the exact best temperature to strive for when a GPU is heavily loaded, but probably anything under +80C is good, but lower is better - perhaps 70-75C is an excellent range to be in. The throttling down is likely to start at around 84-90C. But other than throttling performance a prolonged very higher temperature is likely to reduce the lifespan of a GPU."),Tx.forEach(t),mw=f(e),Oe=r(e,"H3",{class:!0});var mb=i(Oe);ea=r(mb,"A",{id:!0,class:!0,href:!0});var jx=i(ea);Kf=r(jx,"SPAN",{});var xx=i(Kf);d(Dr.$$.fragment,xx),xx.forEach(t),jx.forEach(t),N6=f(mb),ec=r(mb,"SPAN",{});var Dx=i(ec);C6=n(Dx,"Multi-GPU Connectivity"),Dx.forEach(t),mb.forEach(t),dw=f(e),Ol=r(e,"P",{});var Gx=i(Ol);L6=n(Gx,"If you use multiple GPUs the way cards are inter-connected can have a huge impact on the total training time."),Gx.forEach(t),uw=f(e),Bl=r(e,"P",{});var Ix=i(Bl);M6=n(Ix,"If the GPUs are on the same physical node, you can run:"),Ix.forEach(t),vw=f(e),d(Gr.$$.fragment,e),ww=f(e),ql=r(e,"P",{});var Ux=i(ql);O6=n(Ux,"and it will tell you how the GPUs are inter-connected."),Ux.forEach(t),gw=f(e),Vl=r(e,"P",{});var Sx=i(Vl);B6=n(Sx,"On a machine with dual-GPU and which are connected with NVLink, you will most likely see something like:"),Sx.forEach(t),_w=f(e),d(Ir.$$.fragment,e),bw=f(e),Hl=r(e,"P",{});var zx=i(Hl);q6=n(zx,"on a different machine w/o NVLink we may see:"),zx.forEach(t),yw=f(e),d(Ur.$$.fragment,e),Ew=f(e),Fl=r(e,"P",{});var Nx=i(Fl);V6=n(Nx,"The report includes this legend:"),Nx.forEach(t),Pw=f(e),d(Sr.$$.fragment,e),$w=f(e),le=r(e,"P",{});var ph=i(le);H6=n(ph,"So the first report "),tc=r(ph,"CODE",{});var Cx=i(tc);F6=n(Cx,"NV2"),Cx.forEach(t),W6=n(ph," tells us the GPUs are interconnected with 2 NVLinks, and the second report "),ac=r(ph,"CODE",{});var Lx=i(ac);R6=n(Lx,"PHB"),Lx.forEach(t),X6=n(ph," we have a typical consumer-level PCIe+Bridge setup."),ph.forEach(t),kw=f(e),Wl=r(e,"P",{});var Mx=i(Wl);Y6=n(Mx,"Check what type of connectivity you have on your setup. Some of these will make the communication between cards faster (e.g. NVLink), others slower (e.g. PHB)."),Mx.forEach(t),Aw=f(e),Rl=r(e,"P",{});var Ox=i(Rl);Q6=n(Ox,"Depending on the type of scalability solution used, the connectivity speed could have a major or a minor impact. If the GPUs need to sync rarely, as in DDP, the impact of a slower connection will be less significant. If the GPUs need to send messages to each other often, as in ZeRO-DP, then faster connectivity becomes super important to achieve faster training."),Ox.forEach(t),Tw=f(e),Be=r(e,"H3",{class:!0});var db=i(Be);ta=r(db,"A",{id:!0,class:!0,href:!0});var Bx=i(ta);sc=r(Bx,"SPAN",{});var qx=i(sc);d(zr.$$.fragment,qx),qx.forEach(t),Bx.forEach(t),J6=f(db),rc=r(db,"SPAN",{});var Vx=i(rc);Z6=n(Vx,"NVlink"),Vx.forEach(t),db.forEach(t),jw=f(e),Nr=r(e,"P",{});var nA=i(Nr);Cr=r(nA,"A",{href:!0,rel:!0});var Hx=i(Cr);K6=n(Hx,"NVLink"),Hx.forEach(t),eE=n(nA," is a wire-based serial multi-lane near-range communications link developed by Nvidia."),nA.forEach(t),xw=f(e),aa=r(e,"P",{});var ub=i(aa);tE=n(ub,"Each new generation provides a faster bandwidth, e.g. here is a quote from "),Lr=r(ub,"A",{href:!0,rel:!0});var Fx=i(Lr);aE=n(Fx,"Nvidia Ampere GA102 GPU Architecture"),Fx.forEach(t),sE=n(ub,":"),ub.forEach(t),Dw=f(e),Xl=r(e,"BLOCKQUOTE",{});var Wx=i(Xl);ic=r(Wx,"P",{});var Rx=i(ic);rE=n(Rx,`Third-Generation NVLink\xAE GA102 GPUs utilize NVIDIA\u2019s third-generation NVLink interface, which includes four x4 links, with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. (Note that 3-Way and 4-Way SLI configurations are not supported.)`),Rx.forEach(t),Wx.forEach(t),Gw=f(e),M=r(e,"P",{});var ps=i(M);iE=n(ps,"So the higher "),oc=r(ps,"CODE",{});var Xx=i(oc);oE=n(Xx,"X"),Xx.forEach(t),lE=n(ps," you get in the report of "),lc=r(ps,"CODE",{});var Yx=i(lc);nE=n(Yx,"NVX"),Yx.forEach(t),pE=n(ps," in the output of "),nc=r(ps,"CODE",{});var Qx=i(nc);hE=n(Qx,"nvidia-smi topo -m"),Qx.forEach(t),fE=n(ps," the better. The generation will depend on your GPU architecture."),ps.forEach(t),Iw=f(e),Yl=r(e,"P",{});var Jx=i(Yl);cE=n(Jx,"Let\u2019s compare the execution of a gpt2 language model training over a small sample of wikitext."),Jx.forEach(t),Uw=f(e),Ql=r(e,"P",{});var Zx=i(Ql);mE=n(Zx,"The results are:"),Zx.forEach(t),Sw=f(e),sa=r(e,"TABLE",{});var vb=i(sa);pc=r(vb,"THEAD",{});var Kx=i(pc);Mr=r(Kx,"TR",{});var wb=i(Mr);hc=r(wb,"TH",{});var eD=i(hc);dE=n(eD,"NVlink"),eD.forEach(t),uE=f(wb),Jl=r(wb,"TH",{align:!0});var tD=i(Jl);vE=n(tD,"Time"),tD.forEach(t),wb.forEach(t),Kx.forEach(t),wE=f(vb),Or=r(vb,"TBODY",{});var gb=i(Or);Br=r(gb,"TR",{});var _b=i(Br);fc=r(_b,"TD",{});var aD=i(fc);gE=n(aD,"Y"),aD.forEach(t),_E=f(_b),Zl=r(_b,"TD",{align:!0});var sD=i(Zl);bE=n(sD,"101s"),sD.forEach(t),_b.forEach(t),yE=f(gb),qr=r(gb,"TR",{});var bb=i(qr);cc=r(bb,"TD",{});var rD=i(cc);EE=n(rD,"N"),rD.forEach(t),PE=f(bb),Kl=r(bb,"TD",{align:!0});var iD=i(Kl);$E=n(iD,"131s"),iD.forEach(t),bb.forEach(t),gb.forEach(t),vb.forEach(t),zw=f(e),en=r(e,"P",{});var oD=i(en);kE=n(oD,"You can see that NVLink completes the training ~23% faster."),oD.forEach(t),Nw=f(e),ra=r(e,"P",{});var yb=i(ra);AE=n(yb,"In the second benchmark we use "),mc=r(yb,"CODE",{});var lD=i(mc);TE=n(lD,"NCCL_P2P_DISABLE=1"),lD.forEach(t),jE=n(yb," to tell the GPUs not to use NVLink."),yb.forEach(t),Cw=f(e),tn=r(e,"P",{});var nD=i(tn);xE=n(nD,"Here is the full benchmark code and outputs:"),nD.forEach(t),Lw=f(e),d(Vr.$$.fragment,e),Mw=f(e),x=r(e,"P",{});var K=i(x);DE=n(K,"Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks ("),dc=r(K,"CODE",{});var pD=i(dc);GE=n(pD,"NV2"),pD.forEach(t),IE=n(K," in "),uc=r(K,"CODE",{});var hD=i(uc);UE=n(hD,"nvidia-smi topo -m"),hD.forEach(t),SE=n(K,`) Software: `),vc=r(K,"CODE",{});var fD=i(vc);zE=n(fD,"pytorch-1.8-to-be"),fD.forEach(t),NE=n(K," + "),wc=r(K,"CODE",{});var cD=i(wc);CE=n(cD,"cuda-11.0"),cD.forEach(t),LE=n(K," / "),gc=r(K,"CODE",{});var mD=i(gc);ME=n(mD,"transformers==4.3.0.dev0"),mD.forEach(t),K.forEach(t),Ow=f(e),qe=r(e,"H2",{class:!0});var Eb=i(qe);ia=r(Eb,"A",{id:!0,class:!0,href:!0});var dD=i(ia);_c=r(dD,"SPAN",{});var uD=i(_c);d(Hr.$$.fragment,uD),uD.forEach(t),dD.forEach(t),OE=f(Eb),bc=r(Eb,"SPAN",{});var vD=i(bc);BE=n(vD,"Software"),vD.forEach(t),Eb.forEach(t),Bw=f(e),Ve=r(e,"H3",{class:!0});var Pb=i(Ve);oa=r(Pb,"A",{id:!0,class:!0,href:!0});var wD=i(oa);yc=r(wD,"SPAN",{});var gD=i(yc);d(Fr.$$.fragment,gD),gD.forEach(t),wD.forEach(t),qE=f(Pb),Ec=r(Pb,"SPAN",{});var _D=i(Ec);VE=n(_D,"Model Scalability"),_D.forEach(t),Pb.forEach(t),qw=f(e),an=r(e,"P",{});var bD=i(an);HE=n(bD,"When you can\u2019t fit a model into the available GPU memory, you need to start using a solution that allows you to scale a large model to use multiple GPUs in parallel."),bD.forEach(t),Vw=f(e),Wr=r(e,"P",{});var pA=i(Wr);FE=n(pA,"For indepth details on ZeRO and various other model parallelism protocols please see: "),sn=r(pA,"A",{href:!0});var yD=i(sn);WE=n(yD,"Model Parallelism"),yD.forEach(t),pA.forEach(t),Hw=f(e),He=r(e,"H3",{class:!0});var $b=i(He);la=r($b,"A",{id:!0,class:!0,href:!0});var ED=i(la);Pc=r(ED,"SPAN",{});var PD=i(Pc);d(Rr.$$.fragment,PD),PD.forEach(t),ED.forEach(t),RE=f($b),$c=r($b,"SPAN",{});var $D=i($c);XE=n($D,"Anatomy of Model's Operations"),$D.forEach(t),$b.forEach(t),Fw=f(e),rn=r(e,"P",{});var kD=i(rn);YE=n(kD,"Transformers architecture includes 3 main groups of operations grouped below by compute-intensity."),kD.forEach(t),Ww=f(e),ne=r(e,"OL",{});var hh=i(ne);Xr=r(hh,"LI",{});var kb=i(Xr);kc=r(kb,"P",{});var AD=i(kc);Ac=r(AD,"STRONG",{});var TD=i(Ac);QE=n(TD,"Tensor Contractions"),TD.forEach(t),AD.forEach(t),JE=f(kb),Yr=r(kb,"P",{});var Ab=i(Yr);ZE=n(Ab,"Linear layers and components of Multi-Head Attention all do batched "),Tc=r(Ab,"STRONG",{});var jD=i(Tc);KE=n(jD,"matrix-matrix multiplications"),jD.forEach(t),eP=n(Ab,". These operations are the most compute-intensive part of training a transformer."),Ab.forEach(t),kb.forEach(t),tP=f(hh),Qr=r(hh,"LI",{});var Tb=i(Qr);jc=r(Tb,"P",{});var xD=i(jc);xc=r(xD,"STRONG",{});var DD=i(xc);aP=n(DD,"Statistical Normalizations"),DD.forEach(t),xD.forEach(t),sP=f(Tb),Jr=r(Tb,"P",{});var jb=i(Jr);rP=n(jb,"Softmax and layer normalization are less compute-intensive than tensor contractions, and involve one or more "),Dc=r(jb,"STRONG",{});var GD=i(Dc);iP=n(GD,"reduction operations"),GD.forEach(t),oP=n(jb,", the result of which is then applied via a map."),jb.forEach(t),Tb.forEach(t),lP=f(hh),Zr=r(hh,"LI",{});var xb=i(Zr);Gc=r(xb,"P",{});var ID=i(Gc);Ic=r(ID,"STRONG",{});var UD=i(Ic);nP=n(UD,"Element-wise Operators"),UD.forEach(t),ID.forEach(t),pP=f(xb),Kr=r(xb,"P",{});var Db=i(Kr);hP=n(Db,"These are the remaining operators: "),Uc=r(Db,"STRONG",{});var SD=i(Uc);fP=n(SD,"biases, dropout, activations, and residual connections"),SD.forEach(t),cP=n(Db,". These are the least compute-intensive operations."),Db.forEach(t),xb.forEach(t),hh.forEach(t),Rw=f(e),on=r(e,"P",{});var zD=i(on);mP=n(zD,"This knowledge can be helpful to know when analyzing performance bottlenecks."),zD.forEach(t),Xw=f(e),ei=r(e,"P",{});var hA=i(ei);dP=n(hA,"This summary is derived from "),ti=r(hA,"A",{href:!0,rel:!0});var ND=i(ti);uP=n(ND,"Data Movement Is All You Need: A Case Study on Optimizing Transformers 2020"),ND.forEach(t),hA.forEach(t),Yw=f(e),Fe=r(e,"H3",{class:!0});var Gb=i(Fe);na=r(Gb,"A",{id:!0,class:!0,href:!0});var CD=i(na);Sc=r(CD,"SPAN",{});var LD=i(Sc);d(ai.$$.fragment,LD),LD.forEach(t),CD.forEach(t),vP=f(Gb),zc=r(Gb,"SPAN",{});var MD=i(zc);wP=n(MD,"Anatomy of Model's Memory"),MD.forEach(t),Gb.forEach(t),Qw=f(e),ln=r(e,"P",{});var OD=i(ln);gP=n(OD,"The components on GPU memory are the following:"),OD.forEach(t),Jw=f(e),j=r(e,"OL",{});var W=i(j);Nc=r(W,"LI",{});var BD=i(Nc);_P=n(BD,"model weights"),BD.forEach(t),bP=f(W),Cc=r(W,"LI",{});var qD=i(Cc);yP=n(qD,"optimizer states"),qD.forEach(t),EP=f(W),Lc=r(W,"LI",{});var VD=i(Lc);PP=n(VD,"gradients"),VD.forEach(t),$P=f(W),Mc=r(W,"LI",{});var HD=i(Mc);kP=n(HD,"forward activations saved for gradient computation"),HD.forEach(t),AP=f(W),Oc=r(W,"LI",{});var FD=i(Oc);TP=n(FD,"temporary buffers"),FD.forEach(t),jP=f(W),Bc=r(W,"LI",{});var WD=i(Bc);xP=n(WD,"functionality-specific memory"),WD.forEach(t),W.forEach(t),Zw=f(e),nn=r(e,"P",{});var RD=i(nn);DP=n(RD,"A typical model trained in mixed precision with AdamW requires 18 bytes per model parameter plus activation memory."),RD.forEach(t),Kw=f(e),pn=r(e,"P",{});var XD=i(pn);GP=n(XD,"For inference there are no optimizer states and gradients, so we can subtract those. And thus we end up with 6 bytes per model parameter for mixed precision inference, plus activation memory."),XD.forEach(t),eg=f(e),hn=r(e,"P",{});var YD=i(hn);IP=n(YD,"Let\u2019s look at the details."),YD.forEach(t),tg=f(e),We=r(e,"H4",{class:!0});var Ib=i(We);pa=r(Ib,"A",{id:!0,class:!0,href:!0});var QD=i(pa);qc=r(QD,"SPAN",{});var JD=i(qc);d(si.$$.fragment,JD),JD.forEach(t),QD.forEach(t),UP=f(Ib),Vc=r(Ib,"SPAN",{});var ZD=i(Vc);SP=n(ZD,"Model Weights"),ZD.forEach(t),Ib.forEach(t),ag=f(e),ha=r(e,"UL",{});var Ub=i(ha);Hc=r(Ub,"LI",{});var KD=i(Hc);zP=n(KD,"4 bytes * number of parameters for fp32 training"),KD.forEach(t),NP=f(Ub),Fc=r(Ub,"LI",{});var eG=i(Fc);CP=n(eG,"6 bytes * number of parameters for mixed precision training"),eG.forEach(t),Ub.forEach(t),sg=f(e),Re=r(e,"H4",{class:!0});var Sb=i(Re);fa=r(Sb,"A",{id:!0,class:!0,href:!0});var tG=i(fa);Wc=r(tG,"SPAN",{});var aG=i(Wc);d(ri.$$.fragment,aG),aG.forEach(t),tG.forEach(t),LP=f(Sb),Rc=r(Sb,"SPAN",{});var sG=i(Rc);MP=n(sG,"Optimizer States"),sG.forEach(t),Sb.forEach(t),rg=f(e),pe=r(e,"UL",{});var fh=i(pe);Xc=r(fh,"LI",{});var rG=i(Xc);OP=n(rG,"8 bytes * number of parameters for normal AdamW (maintains 2 states)"),rG.forEach(t),BP=f(fh),fn=r(fh,"LI",{});var fA=i(fn);qP=n(fA,"2 bytes * number of parameters for 8-bit AdamW optimizers like "),ii=r(fA,"A",{href:!0,rel:!0});var iG=i(ii);VP=n(iG,"bitsandbytes"),iG.forEach(t),fA.forEach(t),HP=f(fh),Yc=r(fh,"LI",{});var oG=i(Yc);FP=n(oG,"4 bytes * number of parameters for optimizers like SGD (maintains only 1 state)"),oG.forEach(t),fh.forEach(t),ig=f(e),Xe=r(e,"H4",{class:!0});var zb=i(Xe);ca=r(zb,"A",{id:!0,class:!0,href:!0});var lG=i(ca);Qc=r(lG,"SPAN",{});var nG=i(Qc);d(oi.$$.fragment,nG),nG.forEach(t),lG.forEach(t),WP=f(zb),Jc=r(zb,"SPAN",{});var pG=i(Jc);RP=n(pG,"Gradients"),pG.forEach(t),zb.forEach(t),og=f(e),cn=r(e,"UL",{});var hG=i(cn);Zc=r(hG,"LI",{});var fG=i(Zc);XP=n(fG,"4 bytes * number of parameters for either fp32 or mixed precision training"),fG.forEach(t),hG.forEach(t),lg=f(e),Ye=r(e,"H4",{class:!0});var Nb=i(Ye);ma=r(Nb,"A",{id:!0,class:!0,href:!0});var cG=i(ma);Kc=r(cG,"SPAN",{});var mG=i(Kc);d(li.$$.fragment,mG),mG.forEach(t),cG.forEach(t),YP=f(Nb),em=r(Nb,"SPAN",{});var dG=i(em);QP=n(dG,"Forward Activations"),dG.forEach(t),Nb.forEach(t),ng=f(e),mn=r(e,"UL",{});var uG=i(mn);tm=r(uG,"LI",{});var vG=i(tm);JP=n(vG,"size depends on many factors, the key ones being sequence length, hidden size and batch size."),vG.forEach(t),uG.forEach(t),pg=f(e),dn=r(e,"P",{});var wG=i(dn);ZP=n(wG,"There are the input and output that are being passed and returned by the forward and the backward functions and the forward activations saved for gradient computation."),wG.forEach(t),hg=f(e),Qe=r(e,"H4",{class:!0});var Cb=i(Qe);da=r(Cb,"A",{id:!0,class:!0,href:!0});var gG=i(da);am=r(gG,"SPAN",{});var _G=i(am);d(ni.$$.fragment,_G),_G.forEach(t),gG.forEach(t),KP=f(Cb),sm=r(Cb,"SPAN",{});var bG=i(sm);e5=n(bG,"Temporary Memory"),bG.forEach(t),Cb.forEach(t),fg=f(e),un=r(e,"P",{});var yG=i(un);t5=n(yG,"Additionally there are all kinds of temporary variables which get released once the calculation is done, but in the moment these could require additional memory and could push to OOM. Therefore when coding it\u2019s crucial to think strategically about such temporary variables and sometimes to explicitly free those as soon as they are no longer needed."),yG.forEach(t),cg=f(e),Je=r(e,"H4",{class:!0});var Lb=i(Je);ua=r(Lb,"A",{id:!0,class:!0,href:!0});var EG=i(ua);rm=r(EG,"SPAN",{});var PG=i(rm);d(pi.$$.fragment,PG),PG.forEach(t),EG.forEach(t),a5=f(Lb),im=r(Lb,"SPAN",{});var $G=i(im);s5=n($G,"Functionality-specific memory"),$G.forEach(t),Lb.forEach(t),mg=f(e),vn=r(e,"P",{});var kG=i(vn);r5=n(kG,"Then your software could have special memory needs. For example, when generating text using beam search, the software needs to maintain multiple copies of inputs and outputs."),kG.forEach(t),dg=f(e),Ze=r(e,"H3",{class:!0});var Mb=i(Ze);va=r(Mb,"A",{id:!0,class:!0,href:!0});var AG=i(va);om=r(AG,"SPAN",{});var TG=i(om);d(hi.$$.fragment,TG),TG.forEach(t),AG.forEach(t),i5=f(Mb),wa=r(Mb,"SPAN",{});var Yd=i(wa);lm=r(Yd,"CODE",{});var jG=i(lm);o5=n(jG,"forward"),jG.forEach(t),l5=n(Yd," vs "),nm=r(Yd,"CODE",{});var xG=i(nm);n5=n(xG,"backward"),xG.forEach(t),p5=n(Yd," Execution Speed"),Yd.forEach(t),Mb.forEach(t),ug=f(e),wn=r(e,"P",{});var DG=i(wn);h5=n(DG,"For convolutions and linear layers there are 2x flops in the backward compared to the forward, which generally translates into ~2x slower (sometimes more, because sizes in the backward tend to be more awkward). Activations are usually bandwidth-limited, and it\u2019s typical for an activation to have to read more data in the backward than in the forward (e.g. activation forward reads once, writes once, activation backward reads twice, gradOutput and output of the forward, and writes once, gradInput)."),DG.forEach(t),vg=f(e),Ke=r(e,"H3",{class:!0});var Ob=i(Ke);ga=r(Ob,"A",{id:!0,class:!0,href:!0});var GG=i(ga);pm=r(GG,"SPAN",{});var IG=i(pm);d(fi.$$.fragment,IG),IG.forEach(t),GG.forEach(t),f5=f(Ob),hm=r(Ob,"SPAN",{});var UG=i(hm);c5=n(UG,"Floating Data Types"),UG.forEach(t),Ob.forEach(t),wg=f(e),gn=r(e,"P",{});var SG=i(gn);m5=n(SG,"Here are the commonly used floating point data types choice of which impacts both memory usage and throughput:"),SG.forEach(t),gg=f(e),O=r(e,"UL",{});var hs=i(O);ci=r(hs,"LI",{});var Bb=i(ci);d5=n(Bb,"fp32 ("),fm=r(Bb,"CODE",{});var zG=i(fm);u5=n(zG,"float32"),zG.forEach(t),v5=n(Bb,")"),Bb.forEach(t),w5=f(hs),mi=r(hs,"LI",{});var qb=i(mi);g5=n(qb,"fp16 ("),cm=r(qb,"CODE",{});var NG=i(cm);_5=n(NG,"float16"),NG.forEach(t),b5=n(qb,")"),qb.forEach(t),y5=f(hs),di=r(hs,"LI",{});var Vb=i(di);E5=n(Vb,"bf16 ("),mm=r(Vb,"CODE",{});var CG=i(mm);P5=n(CG,"bfloat16"),CG.forEach(t),$5=n(Vb,")"),Vb.forEach(t),k5=f(hs),dm=r(hs,"LI",{});var LG=i(dm);A5=n(LG,"tf32 (CUDA internal data type)"),LG.forEach(t),hs.forEach(t),_g=f(e),_n=r(e,"P",{});var MG=i(_n);T5=n(MG,"Here is a diagram that shows how these data types correlate to each other."),MG.forEach(t),bg=f(e),bn=r(e,"P",{});var OG=i(bn);yn=r(OG,"IMG",{src:!0,alt:!0}),OG.forEach(t),yg=f(e),_a=r(e,"P",{});var Hb=i(_a);j5=n(Hb,"(source: "),ui=r(Hb,"A",{href:!0,rel:!0});var BG=i(ui);x5=n(BG,"NVIDIA Blog"),BG.forEach(t),D5=n(Hb,")"),Hb.forEach(t),Eg=f(e),En=r(e,"P",{});var qG=i(En);G5=n(qG,"While fp16 and fp32 have been around for quite some time, bf16 and tf32 are only available on the Ampere architecture GPUS. TPUs support bf16 as well."),qG.forEach(t),Pg=f(e),et=r(e,"H4",{class:!0});var Fb=i(et);ba=r(Fb,"A",{id:!0,class:!0,href:!0});var VG=i(ba);um=r(VG,"SPAN",{});var HG=i(um);d(vi.$$.fragment,HG),HG.forEach(t),VG.forEach(t),I5=f(Fb),vm=r(Fb,"SPAN",{});var FG=i(vm);U5=n(FG,"fp16"),FG.forEach(t),Fb.forEach(t),$g=f(e),Pn=r(e,"P",{});var WG=i(Pn);S5=n(WG,"AMP = Automatic Mixed Precision"),WG.forEach(t),kg=f(e),$n=r(e,"P",{});var RG=i($n);z5=n(RG,"If we look at what\u2019s happening with FP16 training (mixed precision) we have:"),RG.forEach(t),Ag=f(e),B=r(e,"UL",{});var fs=i(B);wm=r(fs,"LI",{});var XG=i(wm);N5=n(XG,"the model has two copies in memory: one in half-precision for the forward/backward computations and one in full precision - no memory saved here"),XG.forEach(t),C5=f(fs),gm=r(fs,"LI",{});var YG=i(gm);L5=n(YG,"the forward activations saved for gradient computation are in half-precision - memory is saved here"),YG.forEach(t),M5=f(fs),wi=r(fs,"LI",{});var Wb=i(wi);O5=n(Wb,"the gradients are computed in half-precision "),_m=r(Wb,"EM",{});var QG=i(_m);B5=n(QG,"but"),QG.forEach(t),q5=n(Wb," converted to full-precision for the update, no saving there"),Wb.forEach(t),V5=f(fs),bm=r(fs,"LI",{});var JG=i(bm);H5=n(JG,"the optimizer states are in full precision as all the updates are done in full-precision"),JG.forEach(t),fs.forEach(t),Tg=f(e),kn=r(e,"P",{});var ZG=i(kn);F5=n(ZG,"So the savings only happen for the forward activations saved for the backward computation, and there is a slight overhead because the model weights are stored both in half- and full-precision."),ZG.forEach(t),jg=f(e),ya=r(e,"P",{});var Rb=i(ya);W5=n(Rb,"In \u{1F917} Transformers fp16 mixed precision is enabled by passing "),ym=r(Rb,"CODE",{});var KG=i(ym);R5=n(KG,"--fp16"),KG.forEach(t),X5=n(Rb," to the \u{1F917} Trainer."),Rb.forEach(t),xg=f(e),An=r(e,"P",{});var eI=i(An);Y5=n(eI,"Now let\u2019s look at a simple text-classification fine-tuning on 2 GPUs (I\u2019m giving the command for reference):"),eI.forEach(t),Dg=f(e),d(gi.$$.fragment,e),Gg=f(e),Ea=r(e,"P",{});var Xb=i(Ea);Q5=n(Xb,"Since the only savings we get are in the model activations saved for the backward passed, it\u2019s logical that the bigger those activations are, the bigger the saving will be. If we try different batch sizes, I indeed get (this is with "),Em=r(Xb,"CODE",{});var tI=i(Em);J5=n(tI,"nvidia-smi"),tI.forEach(t),Z5=n(Xb," so not completely reliable as said above but it will be a fair comparison):"),Xb.forEach(t),Ig=f(e),Pa=r(e,"TABLE",{});var Yb=i(Pa);Pm=r(Yb,"THEAD",{});var aI=i(Pm);R=r(aI,"TR",{});var cs=i(R);Tn=r(cs,"TH",{align:!0});var sI=i(Tn);K5=n(sI,"batch size"),sI.forEach(t),e4=f(cs),jn=r(cs,"TH",{align:!0});var rI=i(jn);t4=n(rI,"w/o \u2014fp16"),rI.forEach(t),a4=f(cs),xn=r(cs,"TH",{align:!0});var iI=i(xn);s4=n(iI,"w/ \u2014fp16"),iI.forEach(t),r4=f(cs),Dn=r(cs,"TH",{align:!0});var oI=i(Dn);i4=n(oI,"savings"),oI.forEach(t),cs.forEach(t),aI.forEach(t),o4=f(Yb),X=r(Yb,"TBODY",{});var ms=i(X);Y=r(ms,"TR",{});var ds=i(Y);Gn=r(ds,"TD",{align:!0});var lI=i(Gn);l4=n(lI,"8"),lI.forEach(t),n4=f(ds),In=r(ds,"TD",{align:!0});var nI=i(In);p4=n(nI,"4247"),nI.forEach(t),h4=f(ds),Un=r(ds,"TD",{align:!0});var pI=i(Un);f4=n(pI,"4163"),pI.forEach(t),c4=f(ds),Sn=r(ds,"TD",{align:!0});var hI=i(Sn);m4=n(hI,"84"),hI.forEach(t),ds.forEach(t),d4=f(ms),Q=r(ms,"TR",{});var us=i(Q);zn=r(us,"TD",{align:!0});var fI=i(zn);u4=n(fI,"16"),fI.forEach(t),v4=f(us),Nn=r(us,"TD",{align:!0});var cI=i(Nn);w4=n(cI,"4971"),cI.forEach(t),g4=f(us),Cn=r(us,"TD",{align:!0});var mI=i(Cn);_4=n(mI,"4793"),mI.forEach(t),b4=f(us),Ln=r(us,"TD",{align:!0});var dI=i(Ln);y4=n(dI,"178"),dI.forEach(t),us.forEach(t),E4=f(ms),J=r(ms,"TR",{});var vs=i(J);Mn=r(vs,"TD",{align:!0});var uI=i(Mn);P4=n(uI,"32"),uI.forEach(t),$4=f(vs),On=r(vs,"TD",{align:!0});var vI=i(On);k4=n(vI,"6827"),vI.forEach(t),A4=f(vs),Bn=r(vs,"TD",{align:!0});var wI=i(Bn);T4=n(wI,"6207"),wI.forEach(t),j4=f(vs),qn=r(vs,"TD",{align:!0});var gI=i(qn);x4=n(gI,"620"),gI.forEach(t),vs.forEach(t),D4=f(ms),Z=r(ms,"TR",{});var ws=i(Z);Vn=r(ws,"TD",{align:!0});var _I=i(Vn);G4=n(_I,"64"),_I.forEach(t),I4=f(ws),Hn=r(ws,"TD",{align:!0});var bI=i(Hn);U4=n(bI,"10037"),bI.forEach(t),S4=f(ws),Fn=r(ws,"TD",{align:!0});var yI=i(Fn);z4=n(yI,"8061"),yI.forEach(t),N4=f(ws),Wn=r(ws,"TD",{align:!0});var EI=i(Wn);C4=n(EI,"1976"),EI.forEach(t),ws.forEach(t),ms.forEach(t),Yb.forEach(t),Ug=f(e),$a=r(e,"P",{});var Qb=i($a);L4=n(Qb,"So there is only a real memory saving if we train at a high batch size (and it\u2019s not half) and at batch sizes lower than 8, you actually get a bigger memory footprint (because of the overhead mentioned above). The gain for FP16 training is that in each of those cases, the training with the flag "),$m=r(Qb,"CODE",{});var PI=i($m);M4=n(PI,"--fp16"),PI.forEach(t),O4=n(Qb," is twice as fast, which does require every tensor to have every dimension be a multiple of 8 (examples pad the tensors to a sequence length that is a multiple of 8)."),Qb.forEach(t),Sg=f(e),Rn=r(e,"P",{});var $I=i(Rn);B4=n($I,"Summary: FP16 with apex or AMP will only give you some memory savings with a reasonably high batch size."),$I.forEach(t),zg=f(e),Xn=r(e,"P",{});var kI=i(Xn);q4=n(kI,"Additionally, under mixed precision when possible, it\u2019s important that the batch size is a multiple of 8 to efficiently use tensor cores."),kI.forEach(t),Ng=f(e),ka=r(e,"P",{});var Jb=i(ka);V4=n(Jb,"Note that in some situations the speed up can be as big as 5x when using mixed precision. e.g. we have observed that while using "),_i=r(Jb,"A",{href:!0,rel:!0});var AI=i(_i);H4=n(AI,"Megatron-Deepspeed"),AI.forEach(t),F4=n(Jb,"."),Jb.forEach(t),Cg=f(e),Yn=r(e,"P",{});var TI=i(Yn);W4=n(TI,"Some amazing tutorials to read on mixed precision:"),TI.forEach(t),Lg=f(e),Aa=r(e,"UL",{});var Zb=i(Aa);Qn=r(Zb,"LI",{});var cA=i(Qn);R4=n(cA,"@sgugger wrote a great explanation of mixed precision "),bi=r(cA,"A",{href:!0,rel:!0});var jI=i(bi);X4=n(jI,"here"),jI.forEach(t),cA.forEach(t),Y4=f(Zb),Jn=r(Zb,"LI",{});var mA=i(Jn);Q4=n(mA,"Aleksey Bilogur\u2019s "),yi=r(mA,"A",{href:!0,rel:!0});var xI=i(yi);J4=n(xI,"A developer-friendly guide to mixed precision training with PyTorch"),xI.forEach(t),mA.forEach(t),Zb.forEach(t),Mg=f(e),he=r(e,"P",{});var ch=i(he);Z4=n(ch,`You can also see a variety of benchmarks on fp16 vs other precisions: `),Ei=r(ch,"A",{href:!0,rel:!0});var DI=i(Ei);K4=n(DI,"RTX-3090"),DI.forEach(t),e$=n(ch,` and `),Pi=r(ch,"A",{href:!0,rel:!0});var GI=i(Pi);t$=n(GI,"A100"),GI.forEach(t),a$=n(ch,"."),ch.forEach(t),Og=f(e),tt=r(e,"H5",{class:!0});var Kb=i(tt);Ta=r(Kb,"A",{id:!0,class:!0,href:!0});var II=i(Ta);km=r(II,"SPAN",{});var UI=i(km);d($i.$$.fragment,UI),UI.forEach(t),II.forEach(t),s$=f(Kb),Am=r(Kb,"SPAN",{});var SI=i(Am);r$=n(SI,"fp16 caching"),SI.forEach(t),Kb.forEach(t),Bg=f(e),fe=r(e,"P",{});var mh=i(fe);i$=n(mh,"pytorch "),Tm=r(mh,"CODE",{});var zI=i(Tm);o$=n(zI,"autocast"),zI.forEach(t),l$=n(mh," which performs AMP include a caching feature, which speed things up by caching fp16-converted values. Here is the full description from this "),ki=r(mh,"A",{href:!0,rel:!0});var NI=i(ki);n$=n(NI,"comment"),NI.forEach(t),p$=n(mh,":"),mh.forEach(t),qg=f(e),Zn=r(e,"P",{});var CI=i(Zn);h$=n(CI,"Autocast maintains a cache of the FP16 casts of model parameters (leaves). This helps streamline parameter reuse: if the same FP32 param is used in several different FP16list ops, like several matmuls, instead of re-casting the param to FP16 on entering each matmul, the cast will occur on the first matmul, the casted FP16 copy will be cached, and for all later matmuls the FP16 copy will be reused. The cache is maintained only within a particular outermost autocast context. When you exit the autocast context the cache is dropped. For recommended usage, in which autocast wraps the forward pass, and then you exit the context before calling backward(), this means the cache only lasts the duration of the forward pass each iteration, and will be rebuilt next iteration. (The cache of FP16-casted copies MUST be rebuilt each iteration. The FP32 parameters get updated by the optimizer, so the FP16 copies must be recreated, otherwise the FP16 values will be stale.)"),CI.forEach(t),Vg=f(e),at=r(e,"H5",{class:!0});var ey=i(at);ja=r(ey,"A",{id:!0,class:!0,href:!0});var LI=i(ja);jm=r(LI,"SPAN",{});var MI=i(jm);d(Ai.$$.fragment,MI),MI.forEach(t),LI.forEach(t),f$=f(ey),xm=r(ey,"SPAN",{});var OI=i(xm);c$=n(OI,"fp16 Inference"),OI.forEach(t),ey.forEach(t),Hg=f(e),Kn=r(e,"P",{});var BI=i(Kn);m$=n(BI,"While normally inference is done with fp16/amp as with training, it\u2019s also possible to use the full fp16 mode without using mixed precision. This is especially a good fit if the pretrained model weights are already in fp16. So a lot less memory is used: 2 bytes per parameter vs 6 bytes with mixed precision!"),BI.forEach(t),Fg=f(e),ep=r(e,"P",{});var qI=i(ep);d$=n(qI,"How good the results this will deliver will depend on the model. If it can handle fp16 without overflows and accuracy issues, then it\u2019ll definitely better to use the full fp16 mode."),qI.forEach(t),Wg=f(e),tp=r(e,"P",{});var VI=i(tp);u$=n(VI,"For example, LayerNorm has to be done in fp32 and recent pytorch (1.10+) has been fixed to do that regardless of the input types, but earlier pytorch versions accumulate in the input type which can be an issue."),VI.forEach(t),Rg=f(e),xa=r(e,"P",{});var ty=i(xa);v$=n(ty,"In \u{1F917} Transformers the full fp16 inference is enabled by passing "),Dm=r(ty,"CODE",{});var HI=i(Dm);w$=n(HI,"--fp16_full_eval"),HI.forEach(t),g$=n(ty," to the \u{1F917} Trainer."),ty.forEach(t),Xg=f(e),st=r(e,"H4",{class:!0});var ay=i(st);Da=r(ay,"A",{id:!0,class:!0,href:!0});var FI=i(Da);Gm=r(FI,"SPAN",{});var WI=i(Gm);d(Ti.$$.fragment,WI),WI.forEach(t),FI.forEach(t),_$=f(ay),Im=r(ay,"SPAN",{});var RI=i(Im);b$=n(RI,"bf16"),RI.forEach(t),ay.forEach(t),Yg=f(e),ce=r(e,"P",{});var dh=i(ce);y$=n(dh,"If you own Ampere or newer hardware you can start using bf16 for your training and evaluation. While bf16 has a worse precision than fp16, it has a much much bigger dynamic range. Therefore, if in the past you were experiencing overflow issues while training the model, bf16 will prevent this from happening most of the time. Remember that in fp16 the biggest number you can have is "),Um=r(dh,"CODE",{});var XI=i(Um);E$=n(XI,"65535"),XI.forEach(t),P$=n(dh," and any number above that will overflow. A bf16 number can be as large as "),Sm=r(dh,"CODE",{});var YI=i(Sm);$$=n(YI,"3.39e+38"),YI.forEach(t),k$=n(dh," (!) which is about the same as fp32 - because both have 8-bits used for the numerical range."),dh.forEach(t),Qg=f(e),ap=r(e,"P",{});var QI=i(ap);A$=n(QI,"Automatic Mixed Precision (AMP) is the same as with fp16, except it\u2019ll use bf16."),QI.forEach(t),Jg=f(e),sp=r(e,"P",{});var JI=i(sp);T$=n(JI,"Thanks to the fp32-like dynamic range with bf16 mixed precision loss scaling is no longer needed."),JI.forEach(t),Zg=f(e),rp=r(e,"P",{});var ZI=i(rp);j$=n(ZI,"If you have tried to finetune models pre-trained under bf16 mixed precision (e.g. T5) it\u2019s very likely that you have encountered overflow issues. Now you should be able to finetune those models without any issues."),ZI.forEach(t),Kg=f(e),ip=r(e,"P",{});var KI=i(ip);x$=n(KI,"That said, also be aware that if you pre-trained a model in bf16, it\u2019s likely to have overflow issues if someone tries to finetune it in fp16 down the road. So once started on the bf16-mode path it\u2019s best to remain on it and not switch to fp16."),KI.forEach(t),e1=f(e),Ga=r(e,"P",{});var sy=i(Ga);D$=n(sy,"In \u{1F917} Transformers bf16 mixed precision is enabled by passing "),zm=r(sy,"CODE",{});var eU=i(zm);G$=n(eU,"--bf16"),eU.forEach(t),I$=n(sy," to the \u{1F917} Trainer."),sy.forEach(t),t1=f(e),op=r(e,"P",{});var tU=i(op);U$=n(tU,"If you use your own trainer, this is just:"),tU.forEach(t),a1=f(e),d(ji.$$.fragment,e),s1=f(e),xi=r(e,"P",{});var dA=i(xi);S$=n(dA,"If you need to switch a tensor to bf16, it\u2019s just: "),Nm=r(dA,"CODE",{});var aU=i(Nm);z$=n(aU,"t.to(dtype=torch.bfloat16)"),aU.forEach(t),dA.forEach(t),r1=f(e),lp=r(e,"P",{});var sU=i(lp);N$=n(sU,"Here is how you can check if your setup supports bf16:"),sU.forEach(t),i1=f(e),d(Di.$$.fragment,e),o1=f(e),np=r(e,"P",{});var rU=i(np);C$=n(rU,"On the other hand bf16 has a much worse precision than fp16, so there are certain situations where you\u2019d still want to use fp16 and not bf16."),rU.forEach(t),l1=f(e),me=r(e,"P",{});var uh=i(me);L$=n(uh,`You can also see a variety of benchmarks on bf16 vs other precisions: `),Gi=r(uh,"A",{href:!0,rel:!0});var iU=i(Gi);M$=n(iU,"RTX-3090"),iU.forEach(t),O$=n(uh,` and `),Ii=r(uh,"A",{href:!0,rel:!0});var oU=i(Ii);B$=n(oU,"A100"),oU.forEach(t),q$=n(uh,"."),uh.forEach(t),n1=f(e),rt=r(e,"H5",{class:!0});var ry=i(rt);Ia=r(ry,"A",{id:!0,class:!0,href:!0});var lU=i(Ia);Cm=r(lU,"SPAN",{});var nU=i(Cm);d(Ui.$$.fragment,nU),nU.forEach(t),lU.forEach(t),V$=f(ry),Lm=r(ry,"SPAN",{});var pU=i(Lm);H$=n(pU,"bf16 Inference"),pU.forEach(t),ry.forEach(t),p1=f(e),Ua=r(e,"P",{});var iy=i(Ua);F$=n(iy,"Same as with fp16, you can do inference in either the mixed precision bf16 or using the full bf16 mode. The same caveats apply. For details see "),pp=r(iy,"A",{href:!0});var hU=i(pp);W$=n(hU,"fp16 Inference"),hU.forEach(t),R$=n(iy,"."),iy.forEach(t),h1=f(e),Sa=r(e,"P",{});var oy=i(Sa);X$=n(oy,"In \u{1F917} Transformers the full bf16 inference is enabled by passing "),Mm=r(oy,"CODE",{});var fU=i(Mm);Y$=n(fU,"--bf16_full_eval"),fU.forEach(t),Q$=n(oy," to the \u{1F917} Trainer."),oy.forEach(t),f1=f(e),it=r(e,"H4",{class:!0});var ly=i(it);za=r(ly,"A",{id:!0,class:!0,href:!0});var cU=i(za);Om=r(cU,"SPAN",{});var mU=i(Om);d(Si.$$.fragment,mU),mU.forEach(t),cU.forEach(t),J$=f(ly),Bm=r(ly,"SPAN",{});var dU=i(Bm);Z$=n(dU,"tf32"),dU.forEach(t),ly.forEach(t),c1=f(e),hp=r(e,"P",{});var uU=i(hp);K$=n(uU,"The Ampere hardware uses a magical data type called tf32. It has the same numerical range as fp32 (8-bits), but instead of 23 bits precision it has only 10 bits (same as fp16). In total it uses only 19 bits."),uU.forEach(t),m1=f(e),fp=r(e,"P",{});var vU=i(fp);ek=n(vU,"It\u2019s magical in the sense that you can use the normal fp32 training and/or inference code and by enabling tf32 support you can get up to 3x throughput improvement. All you need to do is to add this to your code:"),vU.forEach(t),d1=f(e),d(zi.$$.fragment,e),u1=f(e),cp=r(e,"P",{});var wU=i(cp);tk=n(wU,"When this is done CUDA will automatically switch to using tf32 instead of fp32 where it\u2019s possible. This, of course, assumes that the used GPU is from the Ampere series."),wU.forEach(t),v1=f(e),Na=r(e,"P",{});var ny=i(Na);ak=n(ny,"Like all cases with reduced precision this may or may not be satisfactory for your needs, so you have to experiment and see. According to "),Ni=r(ny,"A",{href:!0,rel:!0});var gU=i(Ni);sk=n(gU,"NVIDIA research"),gU.forEach(t),rk=n(ny," the majority of machine learning training shouldn\u2019t be impacted and showed the same perplexity and convergence as the fp32 training."),ny.forEach(t),w1=f(e),mp=r(e,"P",{});var _U=i(mp);ik=n(_U,"If you\u2019re already using fp16 or bf16 mixed precision it may help with the throughput as well."),_U.forEach(t),g1=f(e),q=r(e,"P",{});var gs=i(q);ok=n(gs,"You can enable this mode in the \u{1F917} Trainer with "),qm=r(gs,"CODE",{});var bU=i(qm);lk=n(bU,"--tf32"),bU.forEach(t),nk=n(gs,", or disable it with "),Vm=r(gs,"CODE",{});var yU=i(Vm);pk=n(yU,"--tf32 0"),yU.forEach(t),hk=n(gs," or "),Hm=r(gs,"CODE",{});var EU=i(Hm);fk=n(EU,"--no_tf32"),EU.forEach(t),ck=n(gs,`. By default the PyTorch default is used.`),gs.forEach(t),_1=f(e),de=r(e,"P",{});var vh=i(de);mk=n(vh,"Note: tf32 mode is internal to CUDA and can\u2019t be accessed directly via "),Fm=r(vh,"CODE",{});var PU=i(Fm);dk=n(PU,"tensor.to(dtype=torch.tf32)"),PU.forEach(t),uk=n(vh," as "),Wm=r(vh,"CODE",{});var $U=i(Wm);vk=n($U,"torch.tf32"),$U.forEach(t),wk=n(vh," doesn\u2019t exit."),vh.forEach(t),b1=f(e),Ca=r(e,"P",{});var py=i(Ca);gk=n(py,"Note: you need "),Rm=r(py,"CODE",{});var kU=i(Rm);_k=n(kU,"torch>=1.7"),kU.forEach(t),bk=n(py," to enjoy this feature."),py.forEach(t),y1=f(e),ue=r(e,"P",{});var wh=i(ue);yk=n(wh,`You can also see a variety of benchmarks on tf32 vs other precisions: `),Ci=r(wh,"A",{href:!0,rel:!0});var AU=i(Ci);Ek=n(AU,"RTX-3090"),AU.forEach(t),Pk=n(wh,` and `),Li=r(wh,"A",{href:!0,rel:!0});var TU=i(Li);$k=n(TU,"A100"),TU.forEach(t),kk=n(wh,"."),wh.forEach(t),E1=f(e),ot=r(e,"H3",{class:!0});var hy=i(ot);La=r(hy,"A",{id:!0,class:!0,href:!0});var jU=i(La);Xm=r(jU,"SPAN",{});var xU=i(Xm);d(Mi.$$.fragment,xU),xU.forEach(t),jU.forEach(t),Ak=f(hy),Ym=r(hy,"SPAN",{});var DU=i(Ym);Tk=n(DU,"Gradient Accumulation"),DU.forEach(t),hy.forEach(t),P1=f(e),ve=r(e,"P",{});var gh=i(ve);jk=n(gh,"Since gradient accumulation essentially is identical to having a larger batch size, just as with the larger batch size here you are likely to see a 20-30% speedup due to the optimizer running less often. For example, see benchmarks for "),Oi=r(gh,"A",{href:!0,rel:!0});var GU=i(Oi);xk=n(GU,"RTX-3090"),GU.forEach(t),Dk=n(gh,` and `),Bi=r(gh,"A",{href:!0,rel:!0});var IU=i(Bi);Gk=n(IU,"A100"),IU.forEach(t),Ik=n(gh,"."),gh.forEach(t),$1=f(e),Ma=r(e,"P",{});var fy=i(Ma);Uk=n(fy,"To activate this feature in \u{1F917} Trainer add "),Qm=r(fy,"CODE",{});var UU=i(Qm);Sk=n(UU,"--gradient_accumulation_steps 4"),UU.forEach(t),zk=n(fy," to its arguments (experiment with the value to get the best performance)."),fy.forEach(t),k1=f(e),dp=r(e,"P",{});var SU=i(dp);Nk=n(SU,"It\u2019s important to remember that using gradient accumulation you may end up with a much larger effective batch size, so you may need to adjust the learning rate, its warm up and for very short datasets it\u2019ll impact the loss as the training will end up doing less steps than normal."),SU.forEach(t),A1=f(e),lt=r(e,"H3",{class:!0});var cy=i(lt);Oa=r(cy,"A",{id:!0,class:!0,href:!0});var zU=i(Oa);Jm=r(zU,"SPAN",{});var NU=i(Jm);d(qi.$$.fragment,NU),NU.forEach(t),zU.forEach(t),Ck=f(cy),Zm=r(cy,"SPAN",{});var CU=i(Zm);Lk=n(CU,"Gradient Checkpointing"),CU.forEach(t),cy.forEach(t),T1=f(e),up=r(e,"P",{});var LU=i(up);Mk=n(LU,"One way to use significantly less GPU memory is to enabled \u201CGradient Checkpointing\u201D (also known as \u201Cactivation checkpointing\u201D). When enabled, a lot of memory can be freed at the cost of small decrease in the training speed due to recomputing parts of the graph during back-propagation. The slowdown will depend on the model but quite often it is around 20-30%."),LU.forEach(t),j1=f(e),V=r(e,"P",{});var _s=i(V);Ok=n(_s,"This technique was first shared in the paper: "),Vi=r(_s,"A",{href:!0,rel:!0});var MU=i(Vi);Bk=n(MU,"Training Deep Nets with Sublinear Memory Cost"),MU.forEach(t),qk=n(_s,". The paper will also give you the exact details on the savings, but it\u2019s in the ballpark of "),Km=r(_s,"CODE",{});var OU=i(Km);Vk=n(OU,"O(sqrt(n))"),OU.forEach(t),Hk=n(_s,", where "),ed=r(_s,"CODE",{});var BU=i(ed);Fk=n(BU,"n"),BU.forEach(t),Wk=n(_s," is the number of feed-forward layers."),_s.forEach(t),x1=f(e),vp=r(e,"P",{});var qU=i(vp);Rk=n(qU,"To activate this feature in \u{1F917} Transformers for models that support it, use:"),qU.forEach(t),D1=f(e),d(Hi.$$.fragment,e),G1=f(e),Ba=r(e,"P",{});var my=i(Ba);Xk=n(my,"or add "),td=r(my,"CODE",{});var VU=i(td);Yk=n(VU,"--gradient_checkpointing"),VU.forEach(t),Qk=n(my," to the Trainer arguments."),my.forEach(t),I1=f(e),nt=r(e,"H3",{class:!0});var dy=i(nt);qa=r(dy,"A",{id:!0,class:!0,href:!0});var HU=i(qa);ad=r(HU,"SPAN",{});var FU=i(ad);d(Fi.$$.fragment,FU),FU.forEach(t),HU.forEach(t),Jk=f(dy),sd=r(dy,"SPAN",{});var WU=i(sd);Zk=n(WU,"Batch sizes"),WU.forEach(t),dy.forEach(t),U1=f(e),wp=r(e,"P",{});var RU=i(wp);Kk=n(RU,"One gets the most efficient performance when batch sizes and input/output neuron counts are divisible by a certain number, which typically starts at 8, but can be much higher as well. That number varies a lot depending on the specific hardware being used and the dtype of the model."),RU.forEach(t),S1=f(e),we=r(e,"P",{});var _h=i(we);e8=n(_h,"For example for fully connected layers (which correspond to GEMMs), NVIDIA provides recommendations for "),Wi=r(_h,"A",{href:!0,rel:!0});var XU=i(Wi);t8=n(XU,"input/output neuron counts"),XU.forEach(t),a8=n(_h," and "),Ri=r(_h,"A",{href:!0,rel:!0});var YU=i(Ri);s8=n(YU,"batch size"),YU.forEach(t),r8=n(_h,"."),_h.forEach(t),z1=f(e),Xi=r(e,"P",{});var uA=i(Xi);Yi=r(uA,"A",{href:!0,rel:!0});var QU=i(Yi);i8=n(QU,"Tensor Core Requirements"),QU.forEach(t),o8=n(uA," define the multiplier based on the dtype and the hardware. For example, for fp16 a multiple of 8 is recommended, but on A100 it\u2019s 64!"),uA.forEach(t),N1=f(e),Va=r(e,"P",{});var uy=i(Va);l8=n(uy,"For parameters that are small, there is also "),Qi=r(uy,"A",{href:!0,rel:!0});var JU=i(Qi);n8=n(JU,"Dimension Quantization Effects"),JU.forEach(t),p8=n(uy," to consider, this is where tiling happens and the right multiplier can have a significant speedup."),uy.forEach(t),C1=f(e),H=r(e,"P",{});var bs=i(H);h8=n(bs,"Additionally, as explained in the "),gp=r(bs,"A",{href:!0});var ZU=i(gp);f8=n(ZU,"Gradient Accumulation"),ZU.forEach(t),c8=n(bs,` section, the bigger the batch size the less often the optimizer is run, the faster the training is (considering the same dataset length). See benchmarks for `),Ji=r(bs,"A",{href:!0,rel:!0});var KU=i(Ji);m8=n(KU,"RTX-3090"),KU.forEach(t),d8=n(bs,` and `),Zi=r(bs,"A",{href:!0,rel:!0});var eS=i(Zi);u8=n(eS,"A100"),eS.forEach(t),v8=n(bs,"."),bs.forEach(t),L1=f(e),pt=r(e,"H3",{class:!0});var vy=i(pt);Ha=r(vy,"A",{id:!0,class:!0,href:!0});var tS=i(Ha);rd=r(tS,"SPAN",{});var aS=i(rd);d(Ki.$$.fragment,aS),aS.forEach(t),tS.forEach(t),w8=f(vy),id=r(vy,"SPAN",{});var sS=i(id);g8=n(sS,"DP vs DDP"),sS.forEach(t),vy.forEach(t),M1=f(e),ht=r(e,"P",{});var Qd=i(ht);od=r(Qd,"CODE",{});var rS=i(od);_8=n(rS,"DistributedDataParallel"),rS.forEach(t),b8=n(Qd," (DDP) is typically faster than "),ld=r(Qd,"CODE",{});var iS=i(ld);y8=n(iS,"DataParallel"),iS.forEach(t),E8=n(Qd," (DP), but it is not always the case:"),Qd.forEach(t),O1=f(e),Fa=r(e,"UL",{});var wy=i(Fa);nd=r(wy,"LI",{});var oS=i(nd);P8=n(oS,"while DP is python threads-based, DDP is multiprocess-based - and as such it has no python threads limitations, such as GIL"),oS.forEach(t),$8=f(wy),pd=r(wy,"LI",{});var lS=i(pd);k8=n(lS,"on the other hand a slow inter-connectivity between the GPU cards could lead to an actual slower outcome with DDP"),lS.forEach(t),wy.forEach(t),B1=f(e),_p=r(e,"P",{});var nS=i(_p);A8=n(nS,"Here are the main differences in the inter-GPU communication overhead between the two modes:"),nS.forEach(t),q1=f(e),eo=r(e,"P",{});var vA=i(eo);to=r(vA,"A",{href:!0,rel:!0});var pS=i(to);T8=n(pS,"DDP"),pS.forEach(t),j8=n(vA,":"),vA.forEach(t),V1=f(e),Wa=r(e,"UL",{});var gy=i(Wa);hd=r(gy,"LI",{});var hS=i(hd);x8=n(hS,"At the start time the main process replicates the model once from gpu 0 to the rest of gpus"),hS.forEach(t),D8=f(gy),bp=r(gy,"LI",{});var wA=i(bp);G8=n(wA,"Then for each batch:"),ao=r(wA,"OL",{});var _y=i(ao);fd=r(_y,"LI",{});var fS=i(fd);I8=n(fS,"each gpu consumes each own mini-batch of data directly"),fS.forEach(t),U8=f(_y),so=r(_y,"LI",{});var by=i(so);S8=n(by,"during "),cd=r(by,"CODE",{});var cS=i(cd);z8=n(cS,"backward"),cS.forEach(t),N8=n(by,", once the local gradients are ready, they are then averaged across all processes"),by.forEach(t),_y.forEach(t),wA.forEach(t),gy.forEach(t),H1=f(e),ro=r(e,"P",{});var gA=i(ro);io=r(gA,"A",{href:!0,rel:!0});var mS=i(io);C8=n(mS,"DP"),mS.forEach(t),L8=n(gA,":"),gA.forEach(t),F1=f(e),yp=r(e,"P",{});var dS=i(yp);M8=n(dS,"For each batch:"),dS.forEach(t),W1=f(e),U=r(e,"OL",{});var Ee=i(U);md=r(Ee,"LI",{});var uS=i(md);O8=n(uS,"gpu 0 reads the batch of data and then sends a mini-batch to each gpu"),uS.forEach(t),B8=f(Ee),dd=r(Ee,"LI",{});var vS=i(dd);q8=n(vS,"replicates the up-to-date model from gpu 0 to each gpu"),vS.forEach(t),V8=f(Ee),oo=r(Ee,"LI",{});var yy=i(oo);H8=n(yy,"runs "),ud=r(yy,"CODE",{});var wS=i(ud);F8=n(wS,"forward"),wS.forEach(t),W8=n(yy," and sends output from each gpu to gpu 0, computes loss"),yy.forEach(t),R8=f(Ee),Ep=r(Ee,"LI",{});var _A=i(Ep);X8=n(_A,"scatters loss from gpu 0 to all gpus, runs "),vd=r(_A,"CODE",{});var gS=i(vd);Y8=n(gS,"backward"),gS.forEach(t),_A.forEach(t),Q8=f(Ee),wd=r(Ee,"LI",{});var _S=i(wd);J8=n(_S,"sends gradients from each gpu to gpu 0 and averages those"),_S.forEach(t),Ee.forEach(t),R1=f(e),Pp=r(e,"P",{});var bS=i(Pp);Z8=n(bS,"The only communication DDP performs per batch is sending gradients, whereas DP does 5 different data exchanges per batch."),bS.forEach(t),X1=f(e),Ra=r(e,"P",{});var Ey=i(Ra);K8=n(Ey,"DP copies data within the process via python threads, whereas DDP copies data via "),lo=r(Ey,"A",{href:!0,rel:!0});var yS=i(lo);e7=n(yS,"torch.distributed"),yS.forEach(t),t7=n(Ey,"."),Ey.forEach(t),Y1=f(e),$p=r(e,"P",{});var ES=i($p);a7=n(ES,"Under DP gpu 0 performs a lot more work than the rest of the gpus, thus resulting in under-utilization of gpus."),ES.forEach(t),Q1=f(e),kp=r(e,"P",{});var PS=i(kp);s7=n(PS,"You can use DDP across multiple machines, but this is not the case with DP."),PS.forEach(t),J1=f(e),Ap=r(e,"P",{});var $S=i(Ap);r7=n($S,"There are other differences between DP and DDP but they aren\u2019t relevant to this discussion."),$S.forEach(t),Z1=f(e),Xa=r(e,"P",{});var Py=i(Xa);i7=n(Py,"If you want to go really deep into understanding these 2 modes, this "),no=r(Py,"A",{href:!0,rel:!0});var kS=i(no);o7=n(kS,"article"),kS.forEach(t),l7=n(Py," is highly recommended, as it has great diagrams, includes multiple benchmarks and profiler outputs on various hardware, explains all the nuances that you may need to know."),Py.forEach(t),K1=f(e),Tp=r(e,"P",{});var AS=i(Tp);n7=n(AS,"Let\u2019s look at an actual benchmark:"),AS.forEach(t),e_=f(e),Ya=r(e,"TABLE",{});var $y=i(Ya);gd=r($y,"THEAD",{});var TS=i(gd);ft=r(TS,"TR",{});var bh=i(ft);jp=r(bh,"TH",{align:!0});var jS=i(jp);p7=n(jS,"Type"),jS.forEach(t),h7=f(bh),_d=r(bh,"TH",{});var xS=i(_d);f7=n(xS,"NVlink"),xS.forEach(t),c7=f(bh),xp=r(bh,"TH",{align:!0});var DS=i(xp);m7=n(DS,"Time"),DS.forEach(t),bh.forEach(t),TS.forEach(t),d7=f($y),ct=r($y,"TBODY",{});var yh=i(ct);mt=r(yh,"TR",{});var Eh=i(mt);Dp=r(Eh,"TD",{align:!0});var GS=i(Dp);u7=n(GS,"2:DP"),GS.forEach(t),v7=f(Eh),bd=r(Eh,"TD",{});var IS=i(bd);w7=n(IS,"Y"),IS.forEach(t),g7=f(Eh),Gp=r(Eh,"TD",{align:!0});var US=i(Gp);_7=n(US,"110s"),US.forEach(t),Eh.forEach(t),b7=f(yh),dt=r(yh,"TR",{});var Ph=i(dt);Ip=r(Ph,"TD",{align:!0});var SS=i(Ip);y7=n(SS,"2:DDP"),SS.forEach(t),E7=f(Ph),yd=r(Ph,"TD",{});var zS=i(yd);P7=n(zS,"Y"),zS.forEach(t),$7=f(Ph),Up=r(Ph,"TD",{align:!0});var NS=i(Up);k7=n(NS,"101s"),NS.forEach(t),Ph.forEach(t),A7=f(yh),ut=r(yh,"TR",{});var $h=i(ut);Sp=r($h,"TD",{align:!0});var CS=i(Sp);T7=n(CS,"2:DDP"),CS.forEach(t),j7=f($h),Ed=r($h,"TD",{});var LS=i(Ed);x7=n(LS,"N"),LS.forEach(t),D7=f($h),zp=r($h,"TD",{align:!0});var MS=i(zp);G7=n(MS,"131s"),MS.forEach(t),$h.forEach(t),yh.forEach(t),$y.forEach(t),t_=f(e),Np=r(e,"P",{});var OS=i(Np);I7=n(OS,"Analysis:"),OS.forEach(t),a_=f(e),Cp=r(e,"P",{});var BS=i(Cp);U7=n(BS,"Here DP is ~10% slower than DDP w/ NVlink, but ~15% faster than DDP w/o NVlink"),BS.forEach(t),s_=f(e),Lp=r(e,"P",{});var qS=i(Lp);S7=n(qS,"The real difference will depend on how much data each GPU needs to sync with the others - the more there is to sync, the more a slow link will slow down the total runtime."),qS.forEach(t),r_=f(e),Mp=r(e,"P",{});var VS=i(Mp);z7=n(VS,"Here is the full benchmark code and outputs:"),VS.forEach(t),i_=f(e),po=r(e,"P",{});var bA=i(po);Pd=r(bA,"CODE",{});var HS=i(Pd);N7=n(HS,"NCCL_P2P_DISABLE=1"),HS.forEach(t),C7=n(bA," was used to disable the NVLink feature on the corresponding benchmark."),bA.forEach(t),o_=f(e),d(ho.$$.fragment,e),l_=f(e),D=r(e,"P",{});var ee=i(D);L7=n(ee,"Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks ("),$d=r(ee,"CODE",{});var FS=i($d);M7=n(FS,"NV2"),FS.forEach(t),O7=n(ee," in "),kd=r(ee,"CODE",{});var WS=i(kd);B7=n(WS,"nvidia-smi topo -m"),WS.forEach(t),q7=n(ee,`) Software: `),Ad=r(ee,"CODE",{});var RS=i(Ad);V7=n(RS,"pytorch-1.8-to-be"),RS.forEach(t),H7=n(ee," + "),Td=r(ee,"CODE",{});var XS=i(Td);F7=n(XS,"cuda-11.0"),XS.forEach(t),W7=n(ee," / "),jd=r(ee,"CODE",{});var YS=i(jd);R7=n(YS,"transformers==4.3.0.dev0"),YS.forEach(t),ee.forEach(t),n_=f(e),vt=r(e,"H3",{class:!0});var ky=i(vt);Qa=r(ky,"A",{id:!0,class:!0,href:!0});var QS=i(Qa);xd=r(QS,"SPAN",{});var JS=i(xd);d(fo.$$.fragment,JS),JS.forEach(t),QS.forEach(t),X7=f(ky),Dd=r(ky,"SPAN",{});var ZS=i(Dd);Y7=n(ZS,"DataLoader"),ZS.forEach(t),ky.forEach(t),p_=f(e),Op=r(e,"P",{});var KS=i(Op);Q7=n(KS,"One of the important requirements to reach great training speed is the ability to feed the GPU at the maximum speed it can handle. By default everything happens in the main process and it might not be able to read the data from disk fast enough, and thus create a bottleneck, leading to GPU under-utilization."),KS.forEach(t),h_=f(e),Ja=r(e,"UL",{});var Ay=i(Ja);Bp=r(Ay,"LI",{});var yA=i(Bp);Gd=r(yA,"CODE",{});var ez=i(Gd);J7=n(ez,"DataLoader(pin_memory=True, ...)"),ez.forEach(t),Z7=n(yA," which ensures that the data gets preloaded into the pinned memory on CPU and typically leads to much faster transfers from CPU to GPU memory."),yA.forEach(t),K7=f(Ay),qp=r(Ay,"LI",{});var EA=i(qp);Id=r(EA,"CODE",{});var tz=i(Id);e9=n(tz,"DataLoader(num_workers=4, ...)"),tz.forEach(t),t9=n(EA," - spawn several workers to pre-load data faster - during training watch the GPU utilization stats and if it\u2019s far from 100% experiment with raising the number of workers. Of course, the problem could be elsewhere so a very big number of workers won\u2019t necessarily lead to a better performance."),EA.forEach(t),Ay.forEach(t),f_=f(e),wt=r(e,"H2",{class:!0});var Ty=i(wt);Za=r(Ty,"A",{id:!0,class:!0,href:!0});var az=i(Za);Ud=r(az,"SPAN",{});var sz=i(Ud);d(co.$$.fragment,sz),sz.forEach(t),az.forEach(t),a9=f(Ty),Sd=r(Ty,"SPAN",{});var rz=i(Sd);s9=n(rz,"Faster optimizer"),rz.forEach(t),Ty.forEach(t),c_=f(e),gt=r(e,"P",{});var Jd=i(gt);r9=n(Jd,"pytorch-nightly introduced "),zd=r(Jd,"CODE",{});var iz=i(zd);i9=n(iz,"torch.optim._multi_tensor"),iz.forEach(t),o9=n(Jd," which should significantly speed up the optimizers for situations with lots of small feature tensors. It should eventually become the default, but if you want to experiment with it sooner and don\u2019t mind using the bleed-edge, see: "),mo=r(Jd,"A",{href:!0,rel:!0});var oz=i(mo);l9=n(oz,"https://github.com/huggingface/transformers/issues/9965"),oz.forEach(t),Jd.forEach(t),m_=f(e),_t=r(e,"H3",{class:!0});var jy=i(_t);Ka=r(jy,"A",{id:!0,class:!0,href:!0});var lz=i(Ka);Nd=r(lz,"SPAN",{});var nz=i(Nd);d(uo.$$.fragment,nz),nz.forEach(t),lz.forEach(t),n9=f(jy),Cd=r(jy,"SPAN",{});var pz=i(Cd);p9=n(pz,"Sparsity"),pz.forEach(t),jy.forEach(t),d_=f(e),bt=r(e,"H4",{class:!0});var xy=i(bt);es=r(xy,"A",{id:!0,class:!0,href:!0});var hz=i(es);Ld=r(hz,"SPAN",{});var fz=i(Ld);d(vo.$$.fragment,fz),fz.forEach(t),hz.forEach(t),h9=f(xy),Md=r(xy,"SPAN",{});var cz=i(Md);f9=n(cz,"Mixture of Experts"),cz.forEach(t),xy.forEach(t),u_=f(e),Vp=r(e,"P",{});var mz=i(Vp);c9=n(mz,`Quite a few of the recent papers reported a 4-5x training speedup and a faster inference by integrating Mixture of Experts (MoE) into the Transformer models.`),mz.forEach(t),v_=f(e),Hp=r(e,"P",{});var dz=i(Hp);m9=n(dz,"Since it has been discovered that more parameters lead to better performance, this technique allows to increase the number of parameters by an order of magnitude without increasing training costs."),dz.forEach(t),w_=f(e),Fp=r(e,"P",{});var uz=i(Fp);d9=n(uz,"In this approach every other FFN layer is replaced with a MoE Layer which consists of many experts, with a gated function that trains each expert in a balanced way depending on the input token\u2019s position in a sequence."),uz.forEach(t),g_=f(e),Wp=r(e,"P",{});var vz=i(Wp);Rp=r(vz,"IMG",{src:!0,alt:!0}),vz.forEach(t),__=f(e),ts=r(e,"P",{});var Dy=i(ts);u9=n(Dy,"(source: "),wo=r(Dy,"A",{href:!0,rel:!0});var wz=i(wo);v9=n(wz,"GLAM"),wz.forEach(t),w9=n(Dy,")"),Dy.forEach(t),b_=f(e),Xp=r(e,"P",{});var gz=i(Xp);g9=n(gz,"You can find exhaustive details and comparison tables in the papers listed at the end of this section."),gz.forEach(t),y_=f(e),Yp=r(e,"P",{});var _z=i(Yp);_9=n(_z,"The main drawback of this approach is that it requires staggering amounts of GPU memory - almost an order of magnitude larger than its dense equivalent. Various distillation and approaches are proposed to how to overcome the much higher memory requirements."),_z.forEach(t),E_=f(e),Qp=r(e,"P",{});var bz=i(Qp);b9=n(bz,"There is direct trade-off though, you can use just a few experts with a 2-3x smaller base model instead of dozens or hundreds experts leading to a 5x smaller model and thus increase the training speed moderately while increasing the memory requirements moderately as well."),bz.forEach(t),P_=f(e),Jp=r(e,"P",{});var yz=i(Jp);y9=n(yz,"Most related papers and implementations are built around Tensorflow/TPUs:"),yz.forEach(t),$_=f(e),ge=r(e,"UL",{});var kh=i(ge);Od=r(kh,"LI",{});var Ez=i(Od);go=r(Ez,"A",{href:!0,rel:!0});var Pz=i(go);E9=n(Pz,"GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding"),Pz.forEach(t),Ez.forEach(t),P9=f(kh),Bd=r(kh,"LI",{});var $z=i(Bd);_o=r($z,"A",{href:!0,rel:!0});var kz=i(_o);$9=n(kz,"Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity"),kz.forEach(t),$z.forEach(t),k9=f(kh),qd=r(kh,"LI",{});var Az=i(qd);bo=r(Az,"A",{href:!0,rel:!0});var Tz=i(bo);A9=n(Tz,"GLaM: Generalist Language Model (GLaM)"),Tz.forEach(t),Az.forEach(t),kh.forEach(t),k_=f(e),$=r(e,"P",{});var z=i($);T9=n(z,"And for Pytorch DeepSpeed has built one as well: "),yo=r(z,"A",{href:!0,rel:!0});var jz=i(yo);j9=n(jz,"DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale"),jz.forEach(t),x9=n(z,", "),Eo=r(z,"A",{href:!0,rel:!0});var xz=i(Eo);D9=n(xz,"Mixture of Experts"),xz.forEach(t),G9=n(z," - blog posts: "),Po=r(z,"A",{href:!0,rel:!0});var Dz=i(Po);I9=n(Dz,"1"),Dz.forEach(t),U9=n(z,", "),$o=r(z,"A",{href:!0,rel:!0});var Gz=i($o);S9=n(Gz,"2"),Gz.forEach(t),z9=n(z," and specific deployment with large transformer-based natural language generation models: "),ko=r(z,"A",{href:!0,rel:!0});var Iz=i(ko);N9=n(Iz,"blog post"),Iz.forEach(t),C9=n(z,", "),Zp=r(z,"A",{href:!0});var Uz=i(Zp);L9=n(Uz,"Megatron-Deepspeed branch"),Uz.forEach(t),M9=n(z,"."),z.forEach(t),A_=f(e),yt=r(e,"H3",{class:!0});var Gy=i(yt);as=r(Gy,"A",{id:!0,class:!0,href:!0});var Sz=i(as);Vd=r(Sz,"SPAN",{});var zz=i(Vd);d(Ao.$$.fragment,zz),zz.forEach(t),Sz.forEach(t),O9=f(Gy),Hd=r(Gy,"SPAN",{});var Nz=i(Hd);B9=n(Nz,"Efficient Software Prebuilds"),Nz.forEach(t),Gy.forEach(t),T_=f(e),ss=r(e,"P",{});var Iy=i(ss);q9=n(Iy,"PyTorch\u2019s "),To=r(Iy,"A",{href:!0,rel:!0});var Cz=i(To);V9=n(Cz,"pip and conda builds"),Cz.forEach(t),H9=n(Iy," come prebuit with the cuda toolkit which is enough to run PyTorch, but it is insufficient if you need to build cuda extensions."),Iy.forEach(t),j_=f(e),rs=r(e,"P",{});var Uy=i(rs);F9=n(Uy,"At times it may take an additional effort to pre-build some components, e.g., if you\u2019re using libraries like "),Fd=r(Uy,"CODE",{});var Lz=i(Fd);W9=n(Lz,"apex"),Lz.forEach(t),R9=n(Uy," that don\u2019t come pre-compiled. In other situations figuring out how to install the right cuda toolkit system-wide can be complicated. To address these users\u2019 needs PyTorch and NVIDIA release a new version of NGC docker container which already comes with everything prebuilt and you just need to install your programs on it and it will run out of the box."),Uy.forEach(t),x_=f(e),Kp=r(e,"P",{});var Mz=i(Kp);X9=n(Mz,"This approach is also useful if you want to tweak the pytorch source and/or make a new customized build."),Mz.forEach(t),D_=f(e),_e=r(e,"P",{});var Ah=i(_e);Y9=n(Ah,"To find the docker image version you want start "),jo=r(Ah,"A",{href:!0,rel:!0});var Oz=i(jo);Q9=n(Oz,"here"),Oz.forEach(t),J9=n(Ah,", choose one of the latest monthly releases. Go into the release\u2019s notes for the desired release, check that the environment\u2019s components are matching your needs (including NVIDIA Driver requirements!) and then at the very top of that document go to the corresponding NGC page. If for some reason you get lost, here is "),xo=r(Ah,"A",{href:!0,rel:!0});var Bz=i(xo);Z9=n(Bz,"the index of all PyTorch NGC images"),Bz.forEach(t),K9=n(Ah,"."),Ah.forEach(t),G_=f(e),eh=r(e,"P",{});var qz=i(eh);eA=n(qz,"Next follow the instructions to download and deploy the docker image."),qz.forEach(t),I_=f(e),Et=r(e,"H2",{class:!0});var Sy=i(Et);is=r(Sy,"A",{id:!0,class:!0,href:!0});var Vz=i(is);Wd=r(Vz,"SPAN",{});var Hz=i(Wd);d(Do.$$.fragment,Hz),Hz.forEach(t),Vz.forEach(t),tA=f(Sy),Rd=r(Sy,"SPAN",{});var Fz=i(Rd);aA=n(Fz,"Contribute"),Fz.forEach(t),Sy.forEach(t),U_=f(e),th=r(e,"P",{});var Wz=i(th);sA=n(Wz,"This document is far from being complete and a lot more needs to be added, so if you have additions or corrections to make please don\u2019t hesitate to open a PR or if you aren\u2019t sure start an Issue and we can discuss the details there."),Wz.forEach(t),S_=f(e),ah=r(e,"P",{});var Rz=i(ah);rA=n(Rz,"When making contributions that A is better than B, please try to include a reproducible benchmark and/or a link to the source of that information (unless it comes directly from you)."),Rz.forEach(t),this.h()},h(){c(y,"name","hf:doc:metadata"),c(y,"content",JSON.stringify(sN)),c(A,"id","performance-and-scalability-how-to-fit-a-bigger-model-and-train-it-faster"),c(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(A,"href","#performance-and-scalability-how-to-fit-a-bigger-model-and-train-it-faster"),c(E,"class","relative group"),c(Uo,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c($s,"href","https://huggingface.co/docs/accelerate/"),c($s,"rel","nofollow"),c(As,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=dataset#datasets.Dataset"),c(As,"rel","nofollow"),c(So,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(kt,"id","load-model"),c(kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(kt,"href","#load-model"),c(Pe,"class","relative group"),c(xt,"id","vanilla-training"),c(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xt,"href","#vanilla-training"),c($e,"class","relative group"),c(Mo,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(Gt,"id","gradient-accumulation"),c(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gt,"href","#gradient-accumulation"),c(ke,"class","relative group"),c(qo,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(Vo,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),c(It,"id","gradient-checkpointing"),c(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(It,"href","#gradient-checkpointing"),c(Ae,"class","relative group"),c(Vs,"href","https://medium.com/tensorflow/fitting-larger-networks-into-memory-583e3c758ff9"),c(Vs,"rel","nofollow"),c(Wo,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(Ro,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),c(St,"id","fp16-training"),c(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(St,"href","#fp16-training"),c(Te,"class","relative group"),c(zt,"id","optimizer"),c(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zt,"href","#optimizer"),c(je,"class","relative group"),c(Nt,"id","adafactor"),c(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nt,"href","#adafactor"),c(xe,"class","relative group"),c(Lt,"id","8bit-adam"),c(Lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lt,"href","#8bit-adam"),c(De,"class","relative group"),c(tl,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(al,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(rr,"href","https://github.com/facebookresearch/bitsandbytes"),c(rr,"rel","nofollow"),PA(ll.src,$A="https://huggingface.co/datasets/lvwerra/repo-images/raw/main/gpu-memory-savings.png")||c(ll,"src",$A),c(ll,"alt","png"),c(Bt,"id","using-accelerate"),c(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bt,"href","#using-accelerate"),c(Ge,"class","relative group"),c(nl,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(pl,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),c(mr,"href","https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader"),c(mr,"rel","nofollow"),c(fl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.gradient_checkpointing_enable"),c(dr,"href","https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator"),c(dr,"rel","nofollow"),c(ur,"href","https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare"),c(ur,"rel","nofollow"),c(Vt,"id","how-to-scale"),c(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vt,"href","#how-to-scale"),c(Ie,"class","relative group"),c(Ft,"id","multigpu-training"),c(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ft,"href","#multigpu-training"),c(Ue,"class","relative group"),c(gl,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(_l,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(Wt,"id","what-if-my-model-still-does-not-fit"),c(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wt,"href","#what-if-my-model-still-does-not-fit"),c(Se,"class","relative group"),c(yl,"href","parallelism"),c(Xt,"id","further-discussions"),c(Xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xt,"href","#further-discussions"),c(ze,"class","relative group"),c(Yt,"id","faster-training"),c(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yt,"href","#faster-training"),c(Ne,"class","relative group"),c(Jt,"id","bigger-models"),c(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jt,"href","#bigger-models"),c(Ce,"class","relative group"),c(xl,"href","deepspeed#nvme-support"),c(Zt,"id","hardware"),c(Zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Zt,"href","#hardware"),c(Le,"class","relative group"),c(Kt,"id","power-and-cooling"),c(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Kt,"href","#power-and-cooling"),c(Me,"class","relative group"),c(ea,"id","multigpu-connectivity"),c(ea,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ea,"href","#multigpu-connectivity"),c(Oe,"class","relative group"),c(ta,"id","nvlink"),c(ta,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ta,"href","#nvlink"),c(Be,"class","relative group"),c(Cr,"href","https://en.wikipedia.org/wiki/NVLink"),c(Cr,"rel","nofollow"),c(Lr,"href","https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf"),c(Lr,"rel","nofollow"),c(Jl,"align","right"),c(Zl,"align","right"),c(Kl,"align","right"),c(ia,"id","software"),c(ia,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ia,"href","#software"),c(qe,"class","relative group"),c(oa,"id","model-scalability"),c(oa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(oa,"href","#model-scalability"),c(Ve,"class","relative group"),c(sn,"href","parallelism"),c(la,"id","anatomy-of-models-operations"),c(la,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(la,"href","#anatomy-of-models-operations"),c(He,"class","relative group"),c(ti,"href","https://arxiv.org/abs/2007.00072"),c(ti,"rel","nofollow"),c(na,"id","anatomy-of-models-memory"),c(na,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(na,"href","#anatomy-of-models-memory"),c(Fe,"class","relative group"),c(pa,"id","model-weights"),c(pa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(pa,"href","#model-weights"),c(We,"class","relative group"),c(fa,"id","optimizer-states"),c(fa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fa,"href","#optimizer-states"),c(Re,"class","relative group"),c(ii,"href","https://github.com/facebookresearch/bitsandbytes"),c(ii,"rel","nofollow"),c(ca,"id","gradients"),c(ca,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ca,"href","#gradients"),c(Xe,"class","relative group"),c(ma,"id","forward-activations"),c(ma,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ma,"href","#forward-activations"),c(Ye,"class","relative group"),c(da,"id","temporary-memory"),c(da,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(da,"href","#temporary-memory"),c(Qe,"class","relative group"),c(ua,"id","functionalityspecific-memory"),c(ua,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ua,"href","#functionalityspecific-memory"),c(Je,"class","relative group"),c(va,"id","forward-vs-backward-execution-speed"),c(va,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(va,"href","#forward-vs-backward-execution-speed"),c(Ze,"class","relative group"),c(ga,"id","floating-data-types"),c(ga,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ga,"href","#floating-data-types"),c(Ke,"class","relative group"),PA(yn.src,kA="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tf32-bf16-fp16-fp32.png")||c(yn,"src",kA),c(yn,"alt","data types"),c(ui,"href","https://developer.nvidia.com/blog/accelerating-ai-training-with-tf32-tensor-cores/"),c(ui,"rel","nofollow"),c(ba,"id","fp16"),c(ba,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ba,"href","#fp16"),c(et,"class","relative group"),c(Tn,"align","right"),c(jn,"align","right"),c(xn,"align","right"),c(Dn,"align","right"),c(Gn,"align","right"),c(In,"align","right"),c(Un,"align","right"),c(Sn,"align","right"),c(zn,"align","right"),c(Nn,"align","right"),c(Cn,"align","right"),c(Ln,"align","right"),c(Mn,"align","right"),c(On,"align","right"),c(Bn,"align","right"),c(qn,"align","right"),c(Vn,"align","right"),c(Hn,"align","right"),c(Fn,"align","right"),c(Wn,"align","right"),c(_i,"href","https://github.com/bigscience-workshop/Megatron-DeepSpeed"),c(_i,"rel","nofollow"),c(bi,"href","https://docs.fast.ai/callback.fp16.html#A-little-bit-of-theory"),c(bi,"rel","nofollow"),c(yi,"href","https://spell.ml/blog/mixed-precision-training-with-pytorch-Xuk7YBEAACAASJam"),c(yi,"rel","nofollow"),c(Ei,"href","https://github.com/huggingface/transformers/issues/14608#issuecomment-1004390803"),c(Ei,"rel","nofollow"),c(Pi,"href","https://github.com/huggingface/transformers/issues/15026#issuecomment-1004543189"),c(Pi,"rel","nofollow"),c(Ta,"id","fp16-caching"),c(Ta,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ta,"href","#fp16-caching"),c(tt,"class","relative group"),c(ki,"href","https://discuss.pytorch.org/t/autocast-and-torch-no-grad-unexpected-behaviour/93475/3"),c(ki,"rel","nofollow"),c(ja,"id","fp16-inference"),c(ja,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ja,"href","#fp16-inference"),c(at,"class","relative group"),c(Da,"id","bf16"),c(Da,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Da,"href","#bf16"),c(st,"class","relative group"),c(Gi,"href","https://github.com/huggingface/transformers/issues/14608#issuecomment-1004390803"),c(Gi,"rel","nofollow"),c(Ii,"href","https://github.com/huggingface/transformers/issues/15026#issuecomment-1004543189"),c(Ii,"rel","nofollow"),c(Ia,"id","bf16-inference"),c(Ia,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ia,"href","#bf16-inference"),c(rt,"class","relative group"),c(pp,"href","#fp16-inference"),c(za,"id","tf32"),c(za,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(za,"href","#tf32"),c(it,"class","relative group"),c(Ni,"href","https://developer.nvidia.com/blog/accelerating-ai-training-with-tf32-tensor-cores/"),c(Ni,"rel","nofollow"),c(Ci,"href","https://github.com/huggingface/transformers/issues/14608#issuecomment-1004390803"),c(Ci,"rel","nofollow"),c(Li,"href","https://github.com/huggingface/transformers/issues/15026#issuecomment-1004543189"),c(Li,"rel","nofollow"),c(La,"id","gradient-accumulation"),c(La,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(La,"href","#gradient-accumulation"),c(ot,"class","relative group"),c(Oi,"href","https://github.com/huggingface/transformers/issues/14608#issuecomment-1004392537"),c(Oi,"rel","nofollow"),c(Bi,"href","https://github.com/huggingface/transformers/issues/15026#issuecomment-1004592231"),c(Bi,"rel","nofollow"),c(Oa,"id","gradient-checkpointing"),c(Oa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oa,"href","#gradient-checkpointing"),c(lt,"class","relative group"),c(Vi,"href","https://arxiv.org/abs/1604.06174"),c(Vi,"rel","nofollow"),c(qa,"id","batch-sizes"),c(qa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qa,"href","#batch-sizes"),c(nt,"class","relative group"),c(Wi,"href","https://docs.nvidia.com/deeplearning/performance/dl-performance-fully-connected/index.html#input-features"),c(Wi,"rel","nofollow"),c(Ri,"href","https://docs.nvidia.com/deeplearning/performance/dl-performance-fully-connected/index.html#batch-size"),c(Ri,"rel","nofollow"),c(Yi,"href","https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc"),c(Yi,"rel","nofollow"),c(Qi,"href","https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#dim-quantization"),c(Qi,"rel","nofollow"),c(gp,"href","#gradient-accumulation"),c(Ji,"href","https://github.com/huggingface/transformers/issues/14608#issuecomment-1004392537"),c(Ji,"rel","nofollow"),c(Zi,"href","https://github.com/huggingface/transformers/issues/15026#issuecomment-1005033957"),c(Zi,"rel","nofollow"),c(Ha,"id","dp-vs-ddp"),c(Ha,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ha,"href","#dp-vs-ddp"),c(pt,"class","relative group"),c(to,"href","https://pytorch.org/docs/master/notes/ddp.html"),c(to,"rel","nofollow"),c(io,"href","https://pytorch.org/docs/master/generated/torch.nn.DataParallel.html"),c(io,"rel","nofollow"),c(lo,"href","https://pytorch.org/docs/master/distributed.html"),c(lo,"rel","nofollow"),c(no,"href","https://www.telesens.co/2019/04/04/distributed-data-parallel-training-using-pytorch-on-aws/"),c(no,"rel","nofollow"),c(jp,"align","left"),c(xp,"align","right"),c(Dp,"align","left"),c(Gp,"align","right"),c(Ip,"align","left"),c(Up,"align","right"),c(Sp,"align","left"),c(zp,"align","right"),c(Qa,"id","dataloader"),c(Qa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qa,"href","#dataloader"),c(vt,"class","relative group"),c(Za,"id","faster-optimizer"),c(Za,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Za,"href","#faster-optimizer"),c(wt,"class","relative group"),c(mo,"href","https://github.com/huggingface/transformers/issues/9965"),c(mo,"rel","nofollow"),c(Ka,"id","sparsity"),c(Ka,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ka,"href","#sparsity"),c(_t,"class","relative group"),c(es,"id","mixture-of-experts"),c(es,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(es,"href","#mixture-of-experts"),c(bt,"class","relative group"),PA(Rp.src,AA="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/perf-moe-transformer.png")||c(Rp,"src",AA),c(Rp,"alt","MoE Transformer 2x block"),c(wo,"href","https://ai.googleblog.com/2021/12/more-efficient-in-context-learning-with.html"),c(wo,"rel","nofollow"),c(go,"href","https://arxiv.org/abs/2006.16668"),c(go,"rel","nofollow"),c(_o,"href","https://arxiv.org/abs/2101.03961"),c(_o,"rel","nofollow"),c(bo,"href","https://ai.googleblog.com/2021/12/more-efficient-in-context-learning-with.html"),c(bo,"rel","nofollow"),c(yo,"href","https://arxiv.org/abs/2201.05596"),c(yo,"rel","nofollow"),c(Eo,"href","https://www.deepspeed.ai/tutorials/mixture-of-experts/"),c(Eo,"rel","nofollow"),c(Po,"href","https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/"),c(Po,"rel","nofollow"),c($o,"href","https://www.microsoft.com/en-us/research/publication/scalable-and-efficient-moe-training-for-multitask-multilingual-models/"),c($o,"rel","nofollow"),c(ko,"href","https://www.deepspeed.ai/news/2021/12/09/deepspeed-moe-nlg.html"),c(ko,"rel","nofollow"),c(Zp,"href","Thttps://github.com/microsoft/Megatron-DeepSpeed/tree/moe-training"),c(as,"id","efficient-software-prebuilds"),c(as,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(as,"href","#efficient-software-prebuilds"),c(yt,"class","relative group"),c(To,"href","https://pytorch.org/get-started/locally/#start-locally"),c(To,"rel","nofollow"),c(jo,"href","https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/"),c(jo,"rel","nofollow"),c(xo,"href","https://ngc.nvidia.com/catalog/containers/nvidia:pytorch"),c(xo,"rel","nofollow"),c(is,"id","contribute"),c(is,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(is,"href","#contribute"),c(Et,"class","relative group")},m(e,o){a(document.head,y),p(e,k,o),p(e,E,o),a(E,A),a(A,Th),u(Es,Th,null),a(E,zy),a(E,jh),a(jh,Ny),p(e,Zd,o),p(e,Io,o),a(Io,xh),a(xh,Dh),a(Dh,Cy),p(e,Kd,o),u(Ps,e,o),p(e,eu,o),p(e,te,o),a(te,Ly),a(te,Uo),a(Uo,My),a(te,Oy),a(te,$s),a($s,By),a(te,qy),p(e,tu,o),u(ks,e,o),p(e,au,o),p(e,ae,o),a(ae,Vy),a(ae,Gh),a(Gh,Hy),a(ae,Fy),a(ae,Ih),a(Ih,Wy),a(ae,Ry),p(e,su,o),p(e,Pt,o),a(Pt,Xy),a(Pt,As),a(As,Uh),a(Uh,Yy),a(Pt,Qy),p(e,ru,o),u(Ts,e,o),p(e,iu,o),p(e,$t,o),a($t,Jy),a($t,So),a(So,Zy),a($t,Ky),p(e,ou,o),u(js,e,o),p(e,lu,o),p(e,zo,o),a(zo,e2),p(e,nu,o),u(xs,e,o),p(e,pu,o),p(e,No,o),a(No,t2),p(e,hu,o),u(Ds,e,o),p(e,fu,o),p(e,Co,o),a(Co,a2),p(e,cu,o),p(e,Pe,o),a(Pe,kt),a(kt,Sh),u(Gs,Sh,null),a(Pe,s2),a(Pe,zh),a(zh,r2),p(e,mu,o),p(e,At,o),a(At,i2),a(At,Nh),a(Nh,o2),a(At,l2),p(e,du,o),u(Is,e,o),p(e,uu,o),p(e,Tt,o),a(Tt,n2),a(Tt,Ch),a(Ch,p2),a(Tt,h2),p(e,vu,o),u(Us,e,o),p(e,wu,o),u(Ss,e,o),p(e,gu,o),p(e,Lo,o),a(Lo,f2),p(e,_u,o),u(zs,e,o),p(e,bu,o),u(jt,e,o),p(e,yu,o),p(e,$e,o),a($e,xt),a(xt,Lh),u(Ns,Lh,null),a($e,c2),a($e,Mh),a(Mh,m2),p(e,Eu,o),p(e,Dt,o),a(Dt,d2),a(Dt,Mo),a(Mo,u2),a(Dt,v2),p(e,Pu,o),u(Cs,e,o),p(e,$u,o),u(Ls,e,o),p(e,ku,o),p(e,Oo,o),a(Oo,w2),p(e,Au,o),p(e,ke,o),a(ke,Gt),a(Gt,Oh),u(Ms,Oh,null),a(ke,g2),a(ke,Bh),a(Bh,_2),p(e,Tu,o),p(e,Bo,o),a(Bo,b2),p(e,ju,o),p(e,N,o),a(N,y2),a(N,qo),a(qo,E2),a(N,P2),a(N,qh),a(qh,$2),a(N,k2),a(N,Vo),a(Vo,A2),a(N,T2),p(e,xu,o),u(Os,e,o),p(e,Du,o),u(Bs,e,o),p(e,Gu,o),p(e,G,o),a(G,j2),a(G,Vh),a(Vh,x2),a(G,D2),a(G,Hh),a(Hh,G2),a(G,I2),a(G,Fh),a(Fh,U2),a(G,S2),a(G,Wh),a(Wh,z2),a(G,N2),p(e,Iu,o),p(e,Ho,o),a(Ho,C2),p(e,Uu,o),p(e,Ae,o),a(Ae,It),a(It,Rh),u(qs,Rh,null),a(Ae,L2),a(Ae,Xh),a(Xh,M2),p(e,Su,o),p(e,Fo,o),a(Fo,O2),p(e,zu,o),p(e,Ut,o),a(Ut,B2),a(Ut,Vs),a(Vs,q2),a(Ut,V2),p(e,Nu,o),p(e,se,o),a(se,H2),a(se,Wo),a(Wo,F2),a(se,W2),a(se,Ro),a(Ro,R2),a(se,X2),p(e,Cu,o),u(Hs,e,o),p(e,Lu,o),u(Fs,e,o),p(e,Mu,o),p(e,Xo,o),a(Xo,Y2),p(e,Ou,o),p(e,Te,o),a(Te,St),a(St,Yh),u(Ws,Yh,null),a(Te,Q2),a(Te,Qh),a(Qh,J2),p(e,Bu,o),p(e,re,o),a(re,Z2),a(re,Jh),a(Jh,K2),a(re,e0),a(re,Zh),a(Zh,t0),a(re,a0),p(e,qu,o),u(Rs,e,o),p(e,Vu,o),u(Xs,e,o),p(e,Hu,o),p(e,Yo,o),a(Yo,s0),p(e,Fu,o),u(Ys,e,o),p(e,Wu,o),u(Qs,e,o),p(e,Ru,o),p(e,Qo,o),a(Qo,r0),p(e,Xu,o),p(e,je,o),a(je,zt),a(zt,Kh),u(Js,Kh,null),a(je,i0),a(je,ef),a(ef,o0),p(e,Yu,o),p(e,Jo,o),a(Jo,l0),p(e,Qu,o),p(e,xe,o),a(xe,Nt),a(Nt,tf),u(Zs,tf,null),a(xe,n0),a(xe,af),a(af,p0),p(e,Ju,o),p(e,Ct,o),a(Ct,h0),a(Ct,sf),a(sf,f0),a(Ct,c0),p(e,Zu,o),u(Ks,e,o),p(e,Ku,o),u(er,e,o),p(e,ev,o),p(e,Zo,o),a(Zo,m0),p(e,tv,o),u(tr,e,o),p(e,av,o),u(ar,e,o),p(e,sv,o),p(e,Ko,o),a(Ko,d0),p(e,rv,o),p(e,De,o),a(De,Lt),a(Lt,rf),u(sr,rf,null),a(De,u0),a(De,of),a(of,v0),p(e,iv,o),p(e,el,o),a(el,w0),p(e,ov,o),p(e,I,o),a(I,g0),a(I,tl),a(tl,_0),a(I,b0),a(I,al),a(al,y0),a(I,E0),a(I,rr),a(rr,P0),a(I,$0),a(I,lf),a(lf,k0),a(I,A0),p(e,lv,o),p(e,sl,o),a(sl,T0),p(e,nv,o),u(Mt,e,o),p(e,pv,o),u(ir,e,o),p(e,hv,o),p(e,Ot,o),a(Ot,j0),a(Ot,nf),a(nf,x0),a(Ot,D0),p(e,fv,o),u(or,e,o),p(e,cv,o),u(lr,e,o),p(e,mv,o),p(e,rl,o),a(rl,G0),p(e,dv,o),u(nr,e,o),p(e,uv,o),u(pr,e,o),p(e,vv,o),p(e,il,o),a(il,I0),p(e,wv,o),p(e,ol,o),a(ol,ll),p(e,gv,o),p(e,Ge,o),a(Ge,Bt),a(Bt,pf),u(hr,pf,null),a(Ge,U0),a(Ge,hf),a(hf,S0),p(e,_v,o),p(e,ie,o),a(ie,z0),a(ie,nl),a(nl,N0),a(ie,C0),a(ie,pl),a(pl,L0),a(ie,M0),p(e,bv,o),u(fr,e,o),p(e,yv,o),p(e,hl,o),a(hl,O0),p(e,Ev,o),u(cr,e,o),p(e,Pv,o),p(e,T,o),a(T,B0),a(T,mr),a(mr,ff),a(ff,q0),a(T,V0),a(T,fl),a(fl,H0),a(T,F0),a(T,dr),a(dr,cf),a(cf,W0),a(T,R0),a(T,mf),a(mf,X0),a(T,Y0),a(T,ur),a(ur,df),a(df,Q0),a(T,J0),p(e,$v,o),p(e,qt,o),a(qt,Z0),a(qt,uf),a(uf,K0),a(qt,e3),p(e,kv,o),u(vr,e,o),p(e,Av,o),p(e,cl,o),a(cl,t3),p(e,Tv,o),p(e,ml,o),a(ml,a3),p(e,jv,o),p(e,Ie,o),a(Ie,Vt),a(Vt,vf),u(wr,vf,null),a(Ie,s3),a(Ie,wf),a(wf,r3),p(e,xv,o),p(e,dl,o),a(dl,i3),p(e,Dv,o),p(e,Ht,o),a(Ht,gf),a(gf,o3),a(Ht,l3),a(Ht,_f),a(_f,n3),p(e,Gv,o),p(e,ul,o),a(ul,p3),p(e,Iv,o),p(e,vl,o),a(vl,h3),p(e,Uv,o),p(e,wl,o),a(wl,f3),p(e,Sv,o),p(e,Ue,o),a(Ue,Ft),a(Ft,bf),u(gr,bf,null),a(Ue,c3),a(Ue,yf),a(yf,m3),p(e,zv,o),p(e,C,o),a(C,d3),a(C,Ef),a(Ef,u3),a(C,v3),a(C,Pf),a(Pf,w3),a(C,g3),a(C,$f),a($f,_3),a(C,b3),p(e,Nv,o),p(e,L,o),a(L,y3),a(L,gl),a(gl,E3),a(L,P3),a(L,_l),a(_l,$3),a(L,k3),a(L,kf),a(kf,A3),a(L,T3),p(e,Cv,o),u(_r,e,o),p(e,Lv,o),p(e,bl,o),a(bl,j3),p(e,Mv,o),p(e,Se,o),a(Se,Wt),a(Wt,Af),u(br,Af,null),a(Se,x3),a(Se,Tf),a(Tf,D3),p(e,Ov,o),p(e,Rt,o),a(Rt,G3),a(Rt,yl),a(yl,I3),a(Rt,U3),p(e,Bv,o),p(e,El,o),a(El,S3),p(e,qv,o),p(e,ze,o),a(ze,Xt),a(Xt,jf),u(yr,jf,null),a(ze,z3),a(ze,xf),a(xf,N3),p(e,Vv,o),p(e,Pl,o),a(Pl,C3),p(e,Hv,o),p(e,Ne,o),a(Ne,Yt),a(Yt,Df),u(Er,Df,null),a(Ne,L3),a(Ne,Gf),a(Gf,M3),p(e,Fv,o),p(e,$l,o),a($l,O3),p(e,Wv,o),p(e,kl,o),a(kl,Al),a(Al,B3),a(Al,Pr),a(Pr,If),a(If,q3),a(Pr,V3),a(Pr,Uf),a(Uf,H3),p(e,Rv,o),p(e,Tl,o),a(Tl,F3),p(e,Xv,o),p(e,Qt,o),a(Qt,Sf),a(Sf,W3),a(Qt,R3),a(Qt,zf),a(zf,X3),p(e,Yv,o),p(e,Ce,o),a(Ce,Jt),a(Jt,Nf),u($r,Nf,null),a(Ce,Y3),a(Ce,Cf),a(Cf,Q3),p(e,Qv,o),p(e,jl,o),a(jl,J3),p(e,Jv,o),p(e,oe,o),a(oe,Lf),a(Lf,Z3),a(oe,K3),a(oe,Mf),a(Mf,e6),a(oe,t6),a(oe,kr),a(kr,a6),a(kr,xl),a(xl,s6),a(kr,r6),p(e,Zv,o),p(e,Dl,o),a(Dl,i6),p(e,Kv,o),p(e,P,o),a(P,Of),a(Of,o6),a(P,l6),a(P,Bf),a(Bf,n6),a(P,p6),a(P,qf),a(qf,h6),a(P,f6),a(P,Vf),a(Vf,c6),a(P,m6),a(P,Hf),a(Hf,d6),a(P,u6),a(P,Ff),a(Ff,v6),a(P,w6),a(P,Wf),a(Wf,g6),p(e,ew,o),p(e,Le,o),a(Le,Zt),a(Zt,Rf),u(Ar,Rf,null),a(Le,_6),a(Le,Xf),a(Xf,b6),p(e,tw,o),p(e,Me,o),a(Me,Kt),a(Kt,Yf),u(Tr,Yf,null),a(Me,y6),a(Me,Qf),a(Qf,E6),p(e,aw,o),p(e,Gl,o),a(Gl,P6),p(e,sw,o),p(e,jr,o),a(jr,Jf),a(Jf,$6),a(jr,k6),p(e,rw,o),p(e,Il,o),a(Il,A6),p(e,iw,o),p(e,Ul,o),a(Ul,T6),p(e,ow,o),p(e,Sl,o),a(Sl,j6),p(e,lw,o),p(e,zl,o),a(zl,x6),p(e,nw,o),p(e,Nl,o),a(Nl,D6),p(e,pw,o),p(e,Cl,o),a(Cl,G6),p(e,hw,o),p(e,xr,o),a(xr,Zf),a(Zf,I6),a(xr,U6),p(e,fw,o),p(e,Ll,o),a(Ll,S6),p(e,cw,o),p(e,Ml,o),a(Ml,z6),p(e,mw,o),p(e,Oe,o),a(Oe,ea),a(ea,Kf),u(Dr,Kf,null),a(Oe,N6),a(Oe,ec),a(ec,C6),p(e,dw,o),p(e,Ol,o),a(Ol,L6),p(e,uw,o),p(e,Bl,o),a(Bl,M6),p(e,vw,o),u(Gr,e,o),p(e,ww,o),p(e,ql,o),a(ql,O6),p(e,gw,o),p(e,Vl,o),a(Vl,B6),p(e,_w,o),u(Ir,e,o),p(e,bw,o),p(e,Hl,o),a(Hl,q6),p(e,yw,o),u(Ur,e,o),p(e,Ew,o),p(e,Fl,o),a(Fl,V6),p(e,Pw,o),u(Sr,e,o),p(e,$w,o),p(e,le,o),a(le,H6),a(le,tc),a(tc,F6),a(le,W6),a(le,ac),a(ac,R6),a(le,X6),p(e,kw,o),p(e,Wl,o),a(Wl,Y6),p(e,Aw,o),p(e,Rl,o),a(Rl,Q6),p(e,Tw,o),p(e,Be,o),a(Be,ta),a(ta,sc),u(zr,sc,null),a(Be,J6),a(Be,rc),a(rc,Z6),p(e,jw,o),p(e,Nr,o),a(Nr,Cr),a(Cr,K6),a(Nr,eE),p(e,xw,o),p(e,aa,o),a(aa,tE),a(aa,Lr),a(Lr,aE),a(aa,sE),p(e,Dw,o),p(e,Xl,o),a(Xl,ic),a(ic,rE),p(e,Gw,o),p(e,M,o),a(M,iE),a(M,oc),a(oc,oE),a(M,lE),a(M,lc),a(lc,nE),a(M,pE),a(M,nc),a(nc,hE),a(M,fE),p(e,Iw,o),p(e,Yl,o),a(Yl,cE),p(e,Uw,o),p(e,Ql,o),a(Ql,mE),p(e,Sw,o),p(e,sa,o),a(sa,pc),a(pc,Mr),a(Mr,hc),a(hc,dE),a(Mr,uE),a(Mr,Jl),a(Jl,vE),a(sa,wE),a(sa,Or),a(Or,Br),a(Br,fc),a(fc,gE),a(Br,_E),a(Br,Zl),a(Zl,bE),a(Or,yE),a(Or,qr),a(qr,cc),a(cc,EE),a(qr,PE),a(qr,Kl),a(Kl,$E),p(e,zw,o),p(e,en,o),a(en,kE),p(e,Nw,o),p(e,ra,o),a(ra,AE),a(ra,mc),a(mc,TE),a(ra,jE),p(e,Cw,o),p(e,tn,o),a(tn,xE),p(e,Lw,o),u(Vr,e,o),p(e,Mw,o),p(e,x,o),a(x,DE),a(x,dc),a(dc,GE),a(x,IE),a(x,uc),a(uc,UE),a(x,SE),a(x,vc),a(vc,zE),a(x,NE),a(x,wc),a(wc,CE),a(x,LE),a(x,gc),a(gc,ME),p(e,Ow,o),p(e,qe,o),a(qe,ia),a(ia,_c),u(Hr,_c,null),a(qe,OE),a(qe,bc),a(bc,BE),p(e,Bw,o),p(e,Ve,o),a(Ve,oa),a(oa,yc),u(Fr,yc,null),a(Ve,qE),a(Ve,Ec),a(Ec,VE),p(e,qw,o),p(e,an,o),a(an,HE),p(e,Vw,o),p(e,Wr,o),a(Wr,FE),a(Wr,sn),a(sn,WE),p(e,Hw,o),p(e,He,o),a(He,la),a(la,Pc),u(Rr,Pc,null),a(He,RE),a(He,$c),a($c,XE),p(e,Fw,o),p(e,rn,o),a(rn,YE),p(e,Ww,o),p(e,ne,o),a(ne,Xr),a(Xr,kc),a(kc,Ac),a(Ac,QE),a(Xr,JE),a(Xr,Yr),a(Yr,ZE),a(Yr,Tc),a(Tc,KE),a(Yr,eP),a(ne,tP),a(ne,Qr),a(Qr,jc),a(jc,xc),a(xc,aP),a(Qr,sP),a(Qr,Jr),a(Jr,rP),a(Jr,Dc),a(Dc,iP),a(Jr,oP),a(ne,lP),a(ne,Zr),a(Zr,Gc),a(Gc,Ic),a(Ic,nP),a(Zr,pP),a(Zr,Kr),a(Kr,hP),a(Kr,Uc),a(Uc,fP),a(Kr,cP),p(e,Rw,o),p(e,on,o),a(on,mP),p(e,Xw,o),p(e,ei,o),a(ei,dP),a(ei,ti),a(ti,uP),p(e,Yw,o),p(e,Fe,o),a(Fe,na),a(na,Sc),u(ai,Sc,null),a(Fe,vP),a(Fe,zc),a(zc,wP),p(e,Qw,o),p(e,ln,o),a(ln,gP),p(e,Jw,o),p(e,j,o),a(j,Nc),a(Nc,_P),a(j,bP),a(j,Cc),a(Cc,yP),a(j,EP),a(j,Lc),a(Lc,PP),a(j,$P),a(j,Mc),a(Mc,kP),a(j,AP),a(j,Oc),a(Oc,TP),a(j,jP),a(j,Bc),a(Bc,xP),p(e,Zw,o),p(e,nn,o),a(nn,DP),p(e,Kw,o),p(e,pn,o),a(pn,GP),p(e,eg,o),p(e,hn,o),a(hn,IP),p(e,tg,o),p(e,We,o),a(We,pa),a(pa,qc),u(si,qc,null),a(We,UP),a(We,Vc),a(Vc,SP),p(e,ag,o),p(e,ha,o),a(ha,Hc),a(Hc,zP),a(ha,NP),a(ha,Fc),a(Fc,CP),p(e,sg,o),p(e,Re,o),a(Re,fa),a(fa,Wc),u(ri,Wc,null),a(Re,LP),a(Re,Rc),a(Rc,MP),p(e,rg,o),p(e,pe,o),a(pe,Xc),a(Xc,OP),a(pe,BP),a(pe,fn),a(fn,qP),a(fn,ii),a(ii,VP),a(pe,HP),a(pe,Yc),a(Yc,FP),p(e,ig,o),p(e,Xe,o),a(Xe,ca),a(ca,Qc),u(oi,Qc,null),a(Xe,WP),a(Xe,Jc),a(Jc,RP),p(e,og,o),p(e,cn,o),a(cn,Zc),a(Zc,XP),p(e,lg,o),p(e,Ye,o),a(Ye,ma),a(ma,Kc),u(li,Kc,null),a(Ye,YP),a(Ye,em),a(em,QP),p(e,ng,o),p(e,mn,o),a(mn,tm),a(tm,JP),p(e,pg,o),p(e,dn,o),a(dn,ZP),p(e,hg,o),p(e,Qe,o),a(Qe,da),a(da,am),u(ni,am,null),a(Qe,KP),a(Qe,sm),a(sm,e5),p(e,fg,o),p(e,un,o),a(un,t5),p(e,cg,o),p(e,Je,o),a(Je,ua),a(ua,rm),u(pi,rm,null),a(Je,a5),a(Je,im),a(im,s5),p(e,mg,o),p(e,vn,o),a(vn,r5),p(e,dg,o),p(e,Ze,o),a(Ze,va),a(va,om),u(hi,om,null),a(Ze,i5),a(Ze,wa),a(wa,lm),a(lm,o5),a(wa,l5),a(wa,nm),a(nm,n5),a(wa,p5),p(e,ug,o),p(e,wn,o),a(wn,h5),p(e,vg,o),p(e,Ke,o),a(Ke,ga),a(ga,pm),u(fi,pm,null),a(Ke,f5),a(Ke,hm),a(hm,c5),p(e,wg,o),p(e,gn,o),a(gn,m5),p(e,gg,o),p(e,O,o),a(O,ci),a(ci,d5),a(ci,fm),a(fm,u5),a(ci,v5),a(O,w5),a(O,mi),a(mi,g5),a(mi,cm),a(cm,_5),a(mi,b5),a(O,y5),a(O,di),a(di,E5),a(di,mm),a(mm,P5),a(di,$5),a(O,k5),a(O,dm),a(dm,A5),p(e,_g,o),p(e,_n,o),a(_n,T5),p(e,bg,o),p(e,bn,o),a(bn,yn),p(e,yg,o),p(e,_a,o),a(_a,j5),a(_a,ui),a(ui,x5),a(_a,D5),p(e,Eg,o),p(e,En,o),a(En,G5),p(e,Pg,o),p(e,et,o),a(et,ba),a(ba,um),u(vi,um,null),a(et,I5),a(et,vm),a(vm,U5),p(e,$g,o),p(e,Pn,o),a(Pn,S5),p(e,kg,o),p(e,$n,o),a($n,z5),p(e,Ag,o),p(e,B,o),a(B,wm),a(wm,N5),a(B,C5),a(B,gm),a(gm,L5),a(B,M5),a(B,wi),a(wi,O5),a(wi,_m),a(_m,B5),a(wi,q5),a(B,V5),a(B,bm),a(bm,H5),p(e,Tg,o),p(e,kn,o),a(kn,F5),p(e,jg,o),p(e,ya,o),a(ya,W5),a(ya,ym),a(ym,R5),a(ya,X5),p(e,xg,o),p(e,An,o),a(An,Y5),p(e,Dg,o),u(gi,e,o),p(e,Gg,o),p(e,Ea,o),a(Ea,Q5),a(Ea,Em),a(Em,J5),a(Ea,Z5),p(e,Ig,o),p(e,Pa,o),a(Pa,Pm),a(Pm,R),a(R,Tn),a(Tn,K5),a(R,e4),a(R,jn),a(jn,t4),a(R,a4),a(R,xn),a(xn,s4),a(R,r4),a(R,Dn),a(Dn,i4),a(Pa,o4),a(Pa,X),a(X,Y),a(Y,Gn),a(Gn,l4),a(Y,n4),a(Y,In),a(In,p4),a(Y,h4),a(Y,Un),a(Un,f4),a(Y,c4),a(Y,Sn),a(Sn,m4),a(X,d4),a(X,Q),a(Q,zn),a(zn,u4),a(Q,v4),a(Q,Nn),a(Nn,w4),a(Q,g4),a(Q,Cn),a(Cn,_4),a(Q,b4),a(Q,Ln),a(Ln,y4),a(X,E4),a(X,J),a(J,Mn),a(Mn,P4),a(J,$4),a(J,On),a(On,k4),a(J,A4),a(J,Bn),a(Bn,T4),a(J,j4),a(J,qn),a(qn,x4),a(X,D4),a(X,Z),a(Z,Vn),a(Vn,G4),a(Z,I4),a(Z,Hn),a(Hn,U4),a(Z,S4),a(Z,Fn),a(Fn,z4),a(Z,N4),a(Z,Wn),a(Wn,C4),p(e,Ug,o),p(e,$a,o),a($a,L4),a($a,$m),a($m,M4),a($a,O4),p(e,Sg,o),p(e,Rn,o),a(Rn,B4),p(e,zg,o),p(e,Xn,o),a(Xn,q4),p(e,Ng,o),p(e,ka,o),a(ka,V4),a(ka,_i),a(_i,H4),a(ka,F4),p(e,Cg,o),p(e,Yn,o),a(Yn,W4),p(e,Lg,o),p(e,Aa,o),a(Aa,Qn),a(Qn,R4),a(Qn,bi),a(bi,X4),a(Aa,Y4),a(Aa,Jn),a(Jn,Q4),a(Jn,yi),a(yi,J4),p(e,Mg,o),p(e,he,o),a(he,Z4),a(he,Ei),a(Ei,K4),a(he,e$),a(he,Pi),a(Pi,t$),a(he,a$),p(e,Og,o),p(e,tt,o),a(tt,Ta),a(Ta,km),u($i,km,null),a(tt,s$),a(tt,Am),a(Am,r$),p(e,Bg,o),p(e,fe,o),a(fe,i$),a(fe,Tm),a(Tm,o$),a(fe,l$),a(fe,ki),a(ki,n$),a(fe,p$),p(e,qg,o),p(e,Zn,o),a(Zn,h$),p(e,Vg,o),p(e,at,o),a(at,ja),a(ja,jm),u(Ai,jm,null),a(at,f$),a(at,xm),a(xm,c$),p(e,Hg,o),p(e,Kn,o),a(Kn,m$),p(e,Fg,o),p(e,ep,o),a(ep,d$),p(e,Wg,o),p(e,tp,o),a(tp,u$),p(e,Rg,o),p(e,xa,o),a(xa,v$),a(xa,Dm),a(Dm,w$),a(xa,g$),p(e,Xg,o),p(e,st,o),a(st,Da),a(Da,Gm),u(Ti,Gm,null),a(st,_$),a(st,Im),a(Im,b$),p(e,Yg,o),p(e,ce,o),a(ce,y$),a(ce,Um),a(Um,E$),a(ce,P$),a(ce,Sm),a(Sm,$$),a(ce,k$),p(e,Qg,o),p(e,ap,o),a(ap,A$),p(e,Jg,o),p(e,sp,o),a(sp,T$),p(e,Zg,o),p(e,rp,o),a(rp,j$),p(e,Kg,o),p(e,ip,o),a(ip,x$),p(e,e1,o),p(e,Ga,o),a(Ga,D$),a(Ga,zm),a(zm,G$),a(Ga,I$),p(e,t1,o),p(e,op,o),a(op,U$),p(e,a1,o),u(ji,e,o),p(e,s1,o),p(e,xi,o),a(xi,S$),a(xi,Nm),a(Nm,z$),p(e,r1,o),p(e,lp,o),a(lp,N$),p(e,i1,o),u(Di,e,o),p(e,o1,o),p(e,np,o),a(np,C$),p(e,l1,o),p(e,me,o),a(me,L$),a(me,Gi),a(Gi,M$),a(me,O$),a(me,Ii),a(Ii,B$),a(me,q$),p(e,n1,o),p(e,rt,o),a(rt,Ia),a(Ia,Cm),u(Ui,Cm,null),a(rt,V$),a(rt,Lm),a(Lm,H$),p(e,p1,o),p(e,Ua,o),a(Ua,F$),a(Ua,pp),a(pp,W$),a(Ua,R$),p(e,h1,o),p(e,Sa,o),a(Sa,X$),a(Sa,Mm),a(Mm,Y$),a(Sa,Q$),p(e,f1,o),p(e,it,o),a(it,za),a(za,Om),u(Si,Om,null),a(it,J$),a(it,Bm),a(Bm,Z$),p(e,c1,o),p(e,hp,o),a(hp,K$),p(e,m1,o),p(e,fp,o),a(fp,ek),p(e,d1,o),u(zi,e,o),p(e,u1,o),p(e,cp,o),a(cp,tk),p(e,v1,o),p(e,Na,o),a(Na,ak),a(Na,Ni),a(Ni,sk),a(Na,rk),p(e,w1,o),p(e,mp,o),a(mp,ik),p(e,g1,o),p(e,q,o),a(q,ok),a(q,qm),a(qm,lk),a(q,nk),a(q,Vm),a(Vm,pk),a(q,hk),a(q,Hm),a(Hm,fk),a(q,ck),p(e,_1,o),p(e,de,o),a(de,mk),a(de,Fm),a(Fm,dk),a(de,uk),a(de,Wm),a(Wm,vk),a(de,wk),p(e,b1,o),p(e,Ca,o),a(Ca,gk),a(Ca,Rm),a(Rm,_k),a(Ca,bk),p(e,y1,o),p(e,ue,o),a(ue,yk),a(ue,Ci),a(Ci,Ek),a(ue,Pk),a(ue,Li),a(Li,$k),a(ue,kk),p(e,E1,o),p(e,ot,o),a(ot,La),a(La,Xm),u(Mi,Xm,null),a(ot,Ak),a(ot,Ym),a(Ym,Tk),p(e,P1,o),p(e,ve,o),a(ve,jk),a(ve,Oi),a(Oi,xk),a(ve,Dk),a(ve,Bi),a(Bi,Gk),a(ve,Ik),p(e,$1,o),p(e,Ma,o),a(Ma,Uk),a(Ma,Qm),a(Qm,Sk),a(Ma,zk),p(e,k1,o),p(e,dp,o),a(dp,Nk),p(e,A1,o),p(e,lt,o),a(lt,Oa),a(Oa,Jm),u(qi,Jm,null),a(lt,Ck),a(lt,Zm),a(Zm,Lk),p(e,T1,o),p(e,up,o),a(up,Mk),p(e,j1,o),p(e,V,o),a(V,Ok),a(V,Vi),a(Vi,Bk),a(V,qk),a(V,Km),a(Km,Vk),a(V,Hk),a(V,ed),a(ed,Fk),a(V,Wk),p(e,x1,o),p(e,vp,o),a(vp,Rk),p(e,D1,o),u(Hi,e,o),p(e,G1,o),p(e,Ba,o),a(Ba,Xk),a(Ba,td),a(td,Yk),a(Ba,Qk),p(e,I1,o),p(e,nt,o),a(nt,qa),a(qa,ad),u(Fi,ad,null),a(nt,Jk),a(nt,sd),a(sd,Zk),p(e,U1,o),p(e,wp,o),a(wp,Kk),p(e,S1,o),p(e,we,o),a(we,e8),a(we,Wi),a(Wi,t8),a(we,a8),a(we,Ri),a(Ri,s8),a(we,r8),p(e,z1,o),p(e,Xi,o),a(Xi,Yi),a(Yi,i8),a(Xi,o8),p(e,N1,o),p(e,Va,o),a(Va,l8),a(Va,Qi),a(Qi,n8),a(Va,p8),p(e,C1,o),p(e,H,o),a(H,h8),a(H,gp),a(gp,f8),a(H,c8),a(H,Ji),a(Ji,m8),a(H,d8),a(H,Zi),a(Zi,u8),a(H,v8),p(e,L1,o),p(e,pt,o),a(pt,Ha),a(Ha,rd),u(Ki,rd,null),a(pt,w8),a(pt,id),a(id,g8),p(e,M1,o),p(e,ht,o),a(ht,od),a(od,_8),a(ht,b8),a(ht,ld),a(ld,y8),a(ht,E8),p(e,O1,o),p(e,Fa,o),a(Fa,nd),a(nd,P8),a(Fa,$8),a(Fa,pd),a(pd,k8),p(e,B1,o),p(e,_p,o),a(_p,A8),p(e,q1,o),p(e,eo,o),a(eo,to),a(to,T8),a(eo,j8),p(e,V1,o),p(e,Wa,o),a(Wa,hd),a(hd,x8),a(Wa,D8),a(Wa,bp),a(bp,G8),a(bp,ao),a(ao,fd),a(fd,I8),a(ao,U8),a(ao,so),a(so,S8),a(so,cd),a(cd,z8),a(so,N8),p(e,H1,o),p(e,ro,o),a(ro,io),a(io,C8),a(ro,L8),p(e,F1,o),p(e,yp,o),a(yp,M8),p(e,W1,o),p(e,U,o),a(U,md),a(md,O8),a(U,B8),a(U,dd),a(dd,q8),a(U,V8),a(U,oo),a(oo,H8),a(oo,ud),a(ud,F8),a(oo,W8),a(U,R8),a(U,Ep),a(Ep,X8),a(Ep,vd),a(vd,Y8),a(U,Q8),a(U,wd),a(wd,J8),p(e,R1,o),p(e,Pp,o),a(Pp,Z8),p(e,X1,o),p(e,Ra,o),a(Ra,K8),a(Ra,lo),a(lo,e7),a(Ra,t7),p(e,Y1,o),p(e,$p,o),a($p,a7),p(e,Q1,o),p(e,kp,o),a(kp,s7),p(e,J1,o),p(e,Ap,o),a(Ap,r7),p(e,Z1,o),p(e,Xa,o),a(Xa,i7),a(Xa,no),a(no,o7),a(Xa,l7),p(e,K1,o),p(e,Tp,o),a(Tp,n7),p(e,e_,o),p(e,Ya,o),a(Ya,gd),a(gd,ft),a(ft,jp),a(jp,p7),a(ft,h7),a(ft,_d),a(_d,f7),a(ft,c7),a(ft,xp),a(xp,m7),a(Ya,d7),a(Ya,ct),a(ct,mt),a(mt,Dp),a(Dp,u7),a(mt,v7),a(mt,bd),a(bd,w7),a(mt,g7),a(mt,Gp),a(Gp,_7),a(ct,b7),a(ct,dt),a(dt,Ip),a(Ip,y7),a(dt,E7),a(dt,yd),a(yd,P7),a(dt,$7),a(dt,Up),a(Up,k7),a(ct,A7),a(ct,ut),a(ut,Sp),a(Sp,T7),a(ut,j7),a(ut,Ed),a(Ed,x7),a(ut,D7),a(ut,zp),a(zp,G7),p(e,t_,o),p(e,Np,o),a(Np,I7),p(e,a_,o),p(e,Cp,o),a(Cp,U7),p(e,s_,o),p(e,Lp,o),a(Lp,S7),p(e,r_,o),p(e,Mp,o),a(Mp,z7),p(e,i_,o),p(e,po,o),a(po,Pd),a(Pd,N7),a(po,C7),p(e,o_,o),u(ho,e,o),p(e,l_,o),p(e,D,o),a(D,L7),a(D,$d),a($d,M7),a(D,O7),a(D,kd),a(kd,B7),a(D,q7),a(D,Ad),a(Ad,V7),a(D,H7),a(D,Td),a(Td,F7),a(D,W7),a(D,jd),a(jd,R7),p(e,n_,o),p(e,vt,o),a(vt,Qa),a(Qa,xd),u(fo,xd,null),a(vt,X7),a(vt,Dd),a(Dd,Y7),p(e,p_,o),p(e,Op,o),a(Op,Q7),p(e,h_,o),p(e,Ja,o),a(Ja,Bp),a(Bp,Gd),a(Gd,J7),a(Bp,Z7),a(Ja,K7),a(Ja,qp),a(qp,Id),a(Id,e9),a(qp,t9),p(e,f_,o),p(e,wt,o),a(wt,Za),a(Za,Ud),u(co,Ud,null),a(wt,a9),a(wt,Sd),a(Sd,s9),p(e,c_,o),p(e,gt,o),a(gt,r9),a(gt,zd),a(zd,i9),a(gt,o9),a(gt,mo),a(mo,l9),p(e,m_,o),p(e,_t,o),a(_t,Ka),a(Ka,Nd),u(uo,Nd,null),a(_t,n9),a(_t,Cd),a(Cd,p9),p(e,d_,o),p(e,bt,o),a(bt,es),a(es,Ld),u(vo,Ld,null),a(bt,h9),a(bt,Md),a(Md,f9),p(e,u_,o),p(e,Vp,o),a(Vp,c9),p(e,v_,o),p(e,Hp,o),a(Hp,m9),p(e,w_,o),p(e,Fp,o),a(Fp,d9),p(e,g_,o),p(e,Wp,o),a(Wp,Rp),p(e,__,o),p(e,ts,o),a(ts,u9),a(ts,wo),a(wo,v9),a(ts,w9),p(e,b_,o),p(e,Xp,o),a(Xp,g9),p(e,y_,o),p(e,Yp,o),a(Yp,_9),p(e,E_,o),p(e,Qp,o),a(Qp,b9),p(e,P_,o),p(e,Jp,o),a(Jp,y9),p(e,$_,o),p(e,ge,o),a(ge,Od),a(Od,go),a(go,E9),a(ge,P9),a(ge,Bd),a(Bd,_o),a(_o,$9),a(ge,k9),a(ge,qd),a(qd,bo),a(bo,A9),p(e,k_,o),p(e,$,o),a($,T9),a($,yo),a(yo,j9),a($,x9),a($,Eo),a(Eo,D9),a($,G9),a($,Po),a(Po,I9),a($,U9),a($,$o),a($o,S9),a($,z9),a($,ko),a(ko,N9),a($,C9),a($,Zp),a(Zp,L9),a($,M9),p(e,A_,o),p(e,yt,o),a(yt,as),a(as,Vd),u(Ao,Vd,null),a(yt,O9),a(yt,Hd),a(Hd,B9),p(e,T_,o),p(e,ss,o),a(ss,q9),a(ss,To),a(To,V9),a(ss,H9),p(e,j_,o),p(e,rs,o),a(rs,F9),a(rs,Fd),a(Fd,W9),a(rs,R9),p(e,x_,o),p(e,Kp,o),a(Kp,X9),p(e,D_,o),p(e,_e,o),a(_e,Y9),a(_e,jo),a(jo,Q9),a(_e,J9),a(_e,xo),a(xo,Z9),a(_e,K9),p(e,G_,o),p(e,eh,o),a(eh,eA),p(e,I_,o),p(e,Et,o),a(Et,is),a(is,Wd),u(Do,Wd,null),a(Et,tA),a(Et,Rd),a(Rd,aA),p(e,U_,o),p(e,th,o),a(th,sA),p(e,S_,o),p(e,ah,o),a(ah,rA),z_=!0},p(e,[o]){const Go={};o&2&&(Go.$$scope={dirty:o,ctx:e}),jt.$set(Go);const Xd={};o&2&&(Xd.$$scope={dirty:o,ctx:e}),Mt.$set(Xd)},i(e){z_||(v(Es.$$.fragment,e),v(Ps.$$.fragment,e),v(ks.$$.fragment,e),v(Ts.$$.fragment,e),v(js.$$.fragment,e),v(xs.$$.fragment,e),v(Ds.$$.fragment,e),v(Gs.$$.fragment,e),v(Is.$$.fragment,e),v(Us.$$.fragment,e),v(Ss.$$.fragment,e),v(zs.$$.fragment,e),v(jt.$$.fragment,e),v(Ns.$$.fragment,e),v(Cs.$$.fragment,e),v(Ls.$$.fragment,e),v(Ms.$$.fragment,e),v(Os.$$.fragment,e),v(Bs.$$.fragment,e),v(qs.$$.fragment,e),v(Hs.$$.fragment,e),v(Fs.$$.fragment,e),v(Ws.$$.fragment,e),v(Rs.$$.fragment,e),v(Xs.$$.fragment,e),v(Ys.$$.fragment,e),v(Qs.$$.fragment,e),v(Js.$$.fragment,e),v(Zs.$$.fragment,e),v(Ks.$$.fragment,e),v(er.$$.fragment,e),v(tr.$$.fragment,e),v(ar.$$.fragment,e),v(sr.$$.fragment,e),v(Mt.$$.fragment,e),v(ir.$$.fragment,e),v(or.$$.fragment,e),v(lr.$$.fragment,e),v(nr.$$.fragment,e),v(pr.$$.fragment,e),v(hr.$$.fragment,e),v(fr.$$.fragment,e),v(cr.$$.fragment,e),v(vr.$$.fragment,e),v(wr.$$.fragment,e),v(gr.$$.fragment,e),v(_r.$$.fragment,e),v(br.$$.fragment,e),v(yr.$$.fragment,e),v(Er.$$.fragment,e),v($r.$$.fragment,e),v(Ar.$$.fragment,e),v(Tr.$$.fragment,e),v(Dr.$$.fragment,e),v(Gr.$$.fragment,e),v(Ir.$$.fragment,e),v(Ur.$$.fragment,e),v(Sr.$$.fragment,e),v(zr.$$.fragment,e),v(Vr.$$.fragment,e),v(Hr.$$.fragment,e),v(Fr.$$.fragment,e),v(Rr.$$.fragment,e),v(ai.$$.fragment,e),v(si.$$.fragment,e),v(ri.$$.fragment,e),v(oi.$$.fragment,e),v(li.$$.fragment,e),v(ni.$$.fragment,e),v(pi.$$.fragment,e),v(hi.$$.fragment,e),v(fi.$$.fragment,e),v(vi.$$.fragment,e),v(gi.$$.fragment,e),v($i.$$.fragment,e),v(Ai.$$.fragment,e),v(Ti.$$.fragment,e),v(ji.$$.fragment,e),v(Di.$$.fragment,e),v(Ui.$$.fragment,e),v(Si.$$.fragment,e),v(zi.$$.fragment,e),v(Mi.$$.fragment,e),v(qi.$$.fragment,e),v(Hi.$$.fragment,e),v(Fi.$$.fragment,e),v(Ki.$$.fragment,e),v(ho.$$.fragment,e),v(fo.$$.fragment,e),v(co.$$.fragment,e),v(uo.$$.fragment,e),v(vo.$$.fragment,e),v(Ao.$$.fragment,e),v(Do.$$.fragment,e),z_=!0)},o(e){w(Es.$$.fragment,e),w(Ps.$$.fragment,e),w(ks.$$.fragment,e),w(Ts.$$.fragment,e),w(js.$$.fragment,e),w(xs.$$.fragment,e),w(Ds.$$.fragment,e),w(Gs.$$.fragment,e),w(Is.$$.fragment,e),w(Us.$$.fragment,e),w(Ss.$$.fragment,e),w(zs.$$.fragment,e),w(jt.$$.fragment,e),w(Ns.$$.fragment,e),w(Cs.$$.fragment,e),w(Ls.$$.fragment,e),w(Ms.$$.fragment,e),w(Os.$$.fragment,e),w(Bs.$$.fragment,e),w(qs.$$.fragment,e),w(Hs.$$.fragment,e),w(Fs.$$.fragment,e),w(Ws.$$.fragment,e),w(Rs.$$.fragment,e),w(Xs.$$.fragment,e),w(Ys.$$.fragment,e),w(Qs.$$.fragment,e),w(Js.$$.fragment,e),w(Zs.$$.fragment,e),w(Ks.$$.fragment,e),w(er.$$.fragment,e),w(tr.$$.fragment,e),w(ar.$$.fragment,e),w(sr.$$.fragment,e),w(Mt.$$.fragment,e),w(ir.$$.fragment,e),w(or.$$.fragment,e),w(lr.$$.fragment,e),w(nr.$$.fragment,e),w(pr.$$.fragment,e),w(hr.$$.fragment,e),w(fr.$$.fragment,e),w(cr.$$.fragment,e),w(vr.$$.fragment,e),w(wr.$$.fragment,e),w(gr.$$.fragment,e),w(_r.$$.fragment,e),w(br.$$.fragment,e),w(yr.$$.fragment,e),w(Er.$$.fragment,e),w($r.$$.fragment,e),w(Ar.$$.fragment,e),w(Tr.$$.fragment,e),w(Dr.$$.fragment,e),w(Gr.$$.fragment,e),w(Ir.$$.fragment,e),w(Ur.$$.fragment,e),w(Sr.$$.fragment,e),w(zr.$$.fragment,e),w(Vr.$$.fragment,e),w(Hr.$$.fragment,e),w(Fr.$$.fragment,e),w(Rr.$$.fragment,e),w(ai.$$.fragment,e),w(si.$$.fragment,e),w(ri.$$.fragment,e),w(oi.$$.fragment,e),w(li.$$.fragment,e),w(ni.$$.fragment,e),w(pi.$$.fragment,e),w(hi.$$.fragment,e),w(fi.$$.fragment,e),w(vi.$$.fragment,e),w(gi.$$.fragment,e),w($i.$$.fragment,e),w(Ai.$$.fragment,e),w(Ti.$$.fragment,e),w(ji.$$.fragment,e),w(Di.$$.fragment,e),w(Ui.$$.fragment,e),w(Si.$$.fragment,e),w(zi.$$.fragment,e),w(Mi.$$.fragment,e),w(qi.$$.fragment,e),w(Hi.$$.fragment,e),w(Fi.$$.fragment,e),w(Ki.$$.fragment,e),w(ho.$$.fragment,e),w(fo.$$.fragment,e),w(co.$$.fragment,e),w(uo.$$.fragment,e),w(vo.$$.fragment,e),w(Ao.$$.fragment,e),w(Do.$$.fragment,e),z_=!1},d(e){t(y),e&&t(k),e&&t(E),g(Es),e&&t(Zd),e&&t(Io),e&&t(Kd),g(Ps,e),e&&t(eu),e&&t(te),e&&t(tu),g(ks,e),e&&t(au),e&&t(ae),e&&t(su),e&&t(Pt),e&&t(ru),g(Ts,e),e&&t(iu),e&&t($t),e&&t(ou),g(js,e),e&&t(lu),e&&t(zo),e&&t(nu),g(xs,e),e&&t(pu),e&&t(No),e&&t(hu),g(Ds,e),e&&t(fu),e&&t(Co),e&&t(cu),e&&t(Pe),g(Gs),e&&t(mu),e&&t(At),e&&t(du),g(Is,e),e&&t(uu),e&&t(Tt),e&&t(vu),g(Us,e),e&&t(wu),g(Ss,e),e&&t(gu),e&&t(Lo),e&&t(_u),g(zs,e),e&&t(bu),g(jt,e),e&&t(yu),e&&t($e),g(Ns),e&&t(Eu),e&&t(Dt),e&&t(Pu),g(Cs,e),e&&t($u),g(Ls,e),e&&t(ku),e&&t(Oo),e&&t(Au),e&&t(ke),g(Ms),e&&t(Tu),e&&t(Bo),e&&t(ju),e&&t(N),e&&t(xu),g(Os,e),e&&t(Du),g(Bs,e),e&&t(Gu),e&&t(G),e&&t(Iu),e&&t(Ho),e&&t(Uu),e&&t(Ae),g(qs),e&&t(Su),e&&t(Fo),e&&t(zu),e&&t(Ut),e&&t(Nu),e&&t(se),e&&t(Cu),g(Hs,e),e&&t(Lu),g(Fs,e),e&&t(Mu),e&&t(Xo),e&&t(Ou),e&&t(Te),g(Ws),e&&t(Bu),e&&t(re),e&&t(qu),g(Rs,e),e&&t(Vu),g(Xs,e),e&&t(Hu),e&&t(Yo),e&&t(Fu),g(Ys,e),e&&t(Wu),g(Qs,e),e&&t(Ru),e&&t(Qo),e&&t(Xu),e&&t(je),g(Js),e&&t(Yu),e&&t(Jo),e&&t(Qu),e&&t(xe),g(Zs),e&&t(Ju),e&&t(Ct),e&&t(Zu),g(Ks,e),e&&t(Ku),g(er,e),e&&t(ev),e&&t(Zo),e&&t(tv),g(tr,e),e&&t(av),g(ar,e),e&&t(sv),e&&t(Ko),e&&t(rv),e&&t(De),g(sr),e&&t(iv),e&&t(el),e&&t(ov),e&&t(I),e&&t(lv),e&&t(sl),e&&t(nv),g(Mt,e),e&&t(pv),g(ir,e),e&&t(hv),e&&t(Ot),e&&t(fv),g(or,e),e&&t(cv),g(lr,e),e&&t(mv),e&&t(rl),e&&t(dv),g(nr,e),e&&t(uv),g(pr,e),e&&t(vv),e&&t(il),e&&t(wv),e&&t(ol),e&&t(gv),e&&t(Ge),g(hr),e&&t(_v),e&&t(ie),e&&t(bv),g(fr,e),e&&t(yv),e&&t(hl),e&&t(Ev),g(cr,e),e&&t(Pv),e&&t(T),e&&t($v),e&&t(qt),e&&t(kv),g(vr,e),e&&t(Av),e&&t(cl),e&&t(Tv),e&&t(ml),e&&t(jv),e&&t(Ie),g(wr),e&&t(xv),e&&t(dl),e&&t(Dv),e&&t(Ht),e&&t(Gv),e&&t(ul),e&&t(Iv),e&&t(vl),e&&t(Uv),e&&t(wl),e&&t(Sv),e&&t(Ue),g(gr),e&&t(zv),e&&t(C),e&&t(Nv),e&&t(L),e&&t(Cv),g(_r,e),e&&t(Lv),e&&t(bl),e&&t(Mv),e&&t(Se),g(br),e&&t(Ov),e&&t(Rt),e&&t(Bv),e&&t(El),e&&t(qv),e&&t(ze),g(yr),e&&t(Vv),e&&t(Pl),e&&t(Hv),e&&t(Ne),g(Er),e&&t(Fv),e&&t($l),e&&t(Wv),e&&t(kl),e&&t(Rv),e&&t(Tl),e&&t(Xv),e&&t(Qt),e&&t(Yv),e&&t(Ce),g($r),e&&t(Qv),e&&t(jl),e&&t(Jv),e&&t(oe),e&&t(Zv),e&&t(Dl),e&&t(Kv),e&&t(P),e&&t(ew),e&&t(Le),g(Ar),e&&t(tw),e&&t(Me),g(Tr),e&&t(aw),e&&t(Gl),e&&t(sw),e&&t(jr),e&&t(rw),e&&t(Il),e&&t(iw),e&&t(Ul),e&&t(ow),e&&t(Sl),e&&t(lw),e&&t(zl),e&&t(nw),e&&t(Nl),e&&t(pw),e&&t(Cl),e&&t(hw),e&&t(xr),e&&t(fw),e&&t(Ll),e&&t(cw),e&&t(Ml),e&&t(mw),e&&t(Oe),g(Dr),e&&t(dw),e&&t(Ol),e&&t(uw),e&&t(Bl),e&&t(vw),g(Gr,e),e&&t(ww),e&&t(ql),e&&t(gw),e&&t(Vl),e&&t(_w),g(Ir,e),e&&t(bw),e&&t(Hl),e&&t(yw),g(Ur,e),e&&t(Ew),e&&t(Fl),e&&t(Pw),g(Sr,e),e&&t($w),e&&t(le),e&&t(kw),e&&t(Wl),e&&t(Aw),e&&t(Rl),e&&t(Tw),e&&t(Be),g(zr),e&&t(jw),e&&t(Nr),e&&t(xw),e&&t(aa),e&&t(Dw),e&&t(Xl),e&&t(Gw),e&&t(M),e&&t(Iw),e&&t(Yl),e&&t(Uw),e&&t(Ql),e&&t(Sw),e&&t(sa),e&&t(zw),e&&t(en),e&&t(Nw),e&&t(ra),e&&t(Cw),e&&t(tn),e&&t(Lw),g(Vr,e),e&&t(Mw),e&&t(x),e&&t(Ow),e&&t(qe),g(Hr),e&&t(Bw),e&&t(Ve),g(Fr),e&&t(qw),e&&t(an),e&&t(Vw),e&&t(Wr),e&&t(Hw),e&&t(He),g(Rr),e&&t(Fw),e&&t(rn),e&&t(Ww),e&&t(ne),e&&t(Rw),e&&t(on),e&&t(Xw),e&&t(ei),e&&t(Yw),e&&t(Fe),g(ai),e&&t(Qw),e&&t(ln),e&&t(Jw),e&&t(j),e&&t(Zw),e&&t(nn),e&&t(Kw),e&&t(pn),e&&t(eg),e&&t(hn),e&&t(tg),e&&t(We),g(si),e&&t(ag),e&&t(ha),e&&t(sg),e&&t(Re),g(ri),e&&t(rg),e&&t(pe),e&&t(ig),e&&t(Xe),g(oi),e&&t(og),e&&t(cn),e&&t(lg),e&&t(Ye),g(li),e&&t(ng),e&&t(mn),e&&t(pg),e&&t(dn),e&&t(hg),e&&t(Qe),g(ni),e&&t(fg),e&&t(un),e&&t(cg),e&&t(Je),g(pi),e&&t(mg),e&&t(vn),e&&t(dg),e&&t(Ze),g(hi),e&&t(ug),e&&t(wn),e&&t(vg),e&&t(Ke),g(fi),e&&t(wg),e&&t(gn),e&&t(gg),e&&t(O),e&&t(_g),e&&t(_n),e&&t(bg),e&&t(bn),e&&t(yg),e&&t(_a),e&&t(Eg),e&&t(En),e&&t(Pg),e&&t(et),g(vi),e&&t($g),e&&t(Pn),e&&t(kg),e&&t($n),e&&t(Ag),e&&t(B),e&&t(Tg),e&&t(kn),e&&t(jg),e&&t(ya),e&&t(xg),e&&t(An),e&&t(Dg),g(gi,e),e&&t(Gg),e&&t(Ea),e&&t(Ig),e&&t(Pa),e&&t(Ug),e&&t($a),e&&t(Sg),e&&t(Rn),e&&t(zg),e&&t(Xn),e&&t(Ng),e&&t(ka),e&&t(Cg),e&&t(Yn),e&&t(Lg),e&&t(Aa),e&&t(Mg),e&&t(he),e&&t(Og),e&&t(tt),g($i),e&&t(Bg),e&&t(fe),e&&t(qg),e&&t(Zn),e&&t(Vg),e&&t(at),g(Ai),e&&t(Hg),e&&t(Kn),e&&t(Fg),e&&t(ep),e&&t(Wg),e&&t(tp),e&&t(Rg),e&&t(xa),e&&t(Xg),e&&t(st),g(Ti),e&&t(Yg),e&&t(ce),e&&t(Qg),e&&t(ap),e&&t(Jg),e&&t(sp),e&&t(Zg),e&&t(rp),e&&t(Kg),e&&t(ip),e&&t(e1),e&&t(Ga),e&&t(t1),e&&t(op),e&&t(a1),g(ji,e),e&&t(s1),e&&t(xi),e&&t(r1),e&&t(lp),e&&t(i1),g(Di,e),e&&t(o1),e&&t(np),e&&t(l1),e&&t(me),e&&t(n1),e&&t(rt),g(Ui),e&&t(p1),e&&t(Ua),e&&t(h1),e&&t(Sa),e&&t(f1),e&&t(it),g(Si),e&&t(c1),e&&t(hp),e&&t(m1),e&&t(fp),e&&t(d1),g(zi,e),e&&t(u1),e&&t(cp),e&&t(v1),e&&t(Na),e&&t(w1),e&&t(mp),e&&t(g1),e&&t(q),e&&t(_1),e&&t(de),e&&t(b1),e&&t(Ca),e&&t(y1),e&&t(ue),e&&t(E1),e&&t(ot),g(Mi),e&&t(P1),e&&t(ve),e&&t($1),e&&t(Ma),e&&t(k1),e&&t(dp),e&&t(A1),e&&t(lt),g(qi),e&&t(T1),e&&t(up),e&&t(j1),e&&t(V),e&&t(x1),e&&t(vp),e&&t(D1),g(Hi,e),e&&t(G1),e&&t(Ba),e&&t(I1),e&&t(nt),g(Fi),e&&t(U1),e&&t(wp),e&&t(S1),e&&t(we),e&&t(z1),e&&t(Xi),e&&t(N1),e&&t(Va),e&&t(C1),e&&t(H),e&&t(L1),e&&t(pt),g(Ki),e&&t(M1),e&&t(ht),e&&t(O1),e&&t(Fa),e&&t(B1),e&&t(_p),e&&t(q1),e&&t(eo),e&&t(V1),e&&t(Wa),e&&t(H1),e&&t(ro),e&&t(F1),e&&t(yp),e&&t(W1),e&&t(U),e&&t(R1),e&&t(Pp),e&&t(X1),e&&t(Ra),e&&t(Y1),e&&t($p),e&&t(Q1),e&&t(kp),e&&t(J1),e&&t(Ap),e&&t(Z1),e&&t(Xa),e&&t(K1),e&&t(Tp),e&&t(e_),e&&t(Ya),e&&t(t_),e&&t(Np),e&&t(a_),e&&t(Cp),e&&t(s_),e&&t(Lp),e&&t(r_),e&&t(Mp),e&&t(i_),e&&t(po),e&&t(o_),g(ho,e),e&&t(l_),e&&t(D),e&&t(n_),e&&t(vt),g(fo),e&&t(p_),e&&t(Op),e&&t(h_),e&&t(Ja),e&&t(f_),e&&t(wt),g(co),e&&t(c_),e&&t(gt),e&&t(m_),e&&t(_t),g(uo),e&&t(d_),e&&t(bt),g(vo),e&&t(u_),e&&t(Vp),e&&t(v_),e&&t(Hp),e&&t(w_),e&&t(Fp),e&&t(g_),e&&t(Wp),e&&t(__),e&&t(ts),e&&t(b_),e&&t(Xp),e&&t(y_),e&&t(Yp),e&&t(E_),e&&t(Qp),e&&t(P_),e&&t(Jp),e&&t($_),e&&t(ge),e&&t(k_),e&&t($),e&&t(A_),e&&t(yt),g(Ao),e&&t(T_),e&&t(ss),e&&t(j_),e&&t(rs),e&&t(x_),e&&t(Kp),e&&t(D_),e&&t(_e),e&&t(G_),e&&t(eh),e&&t(I_),e&&t(Et),g(Do),e&&t(U_),e&&t(th),e&&t(S_),e&&t(ah)}}}const sN={local:"performance-and-scalability-how-to-fit-a-bigger-model-and-train-it-faster",sections:[{local:"load-model",title:"Load Model"},{local:"vanilla-training",title:"Vanilla Training"},{local:"gradient-accumulation",title:"Gradient Accumulation"},{local:"gradient-checkpointing",title:"Gradient Checkpointing"},{local:"fp16-training",title:"FP16 Training"},{local:"optimizer",sections:[{local:"adafactor",title:"Adafactor"},{local:"8bit-adam",title:"8-bit Adam"}],title:"Optimizer"},{local:"using-accelerate",title:"Using \u{1F917} Accelerate"},{local:"how-to-scale",title:"How to scale"},{local:"multigpu-training",title:"Multi-GPU Training"},{local:"what-if-my-model-still-does-not-fit",title:"What if my model still does not fit?"},{local:"further-discussions",title:"Further discussions"},{local:"faster-training",title:"Faster Training"},{local:"bigger-models",title:"Bigger Models"},{local:"hardware",sections:[{local:"power-and-cooling",title:"Power and Cooling"},{local:"multigpu-connectivity",title:"Multi-GPU Connectivity"},{local:"nvlink",title:"NVlink"}],title:"Hardware"},{local:"software",sections:[{local:"model-scalability",title:"Model Scalability"},{local:"anatomy-of-models-operations",title:"Anatomy of Model's Operations"},{local:"anatomy-of-models-memory",sections:[{local:"model-weights",title:"Model Weights"},{local:"optimizer-states",title:"Optimizer States"},{local:"gradients",title:"Gradients"},{local:"forward-activations",title:"Forward Activations"},{local:"temporary-memory",title:"Temporary Memory"},{local:"functionalityspecific-memory",title:"Functionality-specific memory"}],title:"Anatomy of Model's Memory"},{local:"forward-vs-backward-execution-speed",title:"`forward` vs `backward` Execution Speed"},{local:"floating-data-types",sections:[{local:"fp16",sections:[{local:"fp16-caching",title:"fp16 caching"},{local:"fp16-inference",title:"fp16 Inference"}],title:"fp16"},{local:"bf16",sections:[{local:"bf16-inference",title:"bf16 Inference"}],title:"bf16"},{local:"tf32",title:"tf32"}],title:"Floating Data Types"},{local:"gradient-accumulation",title:"Gradient Accumulation"},{local:"gradient-checkpointing",title:"Gradient Checkpointing"},{local:"batch-sizes",title:"Batch sizes"},{local:"dp-vs-ddp",title:"DP vs DDP"},{local:"dataloader",title:"DataLoader"}],title:"Software"},{local:"faster-optimizer",sections:[{local:"sparsity",sections:[{local:"mixture-of-experts",title:"Mixture of Experts"}],title:"Sparsity"},{local:"efficient-software-prebuilds",title:"Efficient Software Prebuilds"}],title:"Faster optimizer"},{local:"contribute",title:"Contribute"}],title:"Performance and Scalability: How To Fit a Bigger Model and Train It Faster"};function rN(ys,y,k){let{fw:E}=y;return ys.$$set=A=>{"fw"in A&&k(0,E=A.fw)},[E]}class fN extends Yz{constructor(y){super();Qz(this,y,rN,aN,Jz,{fw:0})}}export{fN as default,sN as metadata};
267
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/community.mdx-df0c9955.js
import{S as RD,i as CD,s as FD,e as a,k as n,w as u1,t as s,M as MD,c as r,d as o,m as i,a as l,x as m1,h,b as e,N as g,F as t,g as b,y as _1,L as ID,q as b1,o as v1,B as p1}from"../chunks/vendor-4833417e.js";import{I as E1}from"../chunks/IconCopyLink-4b81c553.js";function PD(Tc){let bt,No,m,v,Rh,Ne,wc,Ch,Dc,ec,Oo,yc,ac,wt,kt,Fh,Oe,Ac,Mh,kc,rc,Rt,Ih,Dt,xo,Rc,Cc,zo,Fc,Mc,Vo,Ic,Pc,Ph,yt,qo,xe,Hc,Lc,_,Bc,Qo,Sc,Gc,ze,Nc,Oc,Ve,xc,zc,Vc,jo,qe,qc,oc,At,Ct,Hh,Qe,Qc,Lh,jc,lc,Ft,Bh,p,Wo,Wc,Uc,Uo,$c,Kc,$o,Yc,Xc,Sh,Jc,c,E,Ko,je,Zc,tf,Yo,ef,af,Xo,We,rf,of,Jo,Ue,Zo,T1,lf,T,tl,$e,nf,sf,el,hf,cf,al,Ke,ff,gf,rl,Ye,ol,w1,df,w,ll,Xe,uf,mf,nl,_f,bf,il,Je,vf,pf,sl,Ze,hl,D1,Ef,D,cl,ta,Tf,wf,fl,Df,yf,gl,ea,Af,kf,dl,aa,ul,y1,Rf,y,ml,ra,Cf,Ff,_l,Mf,If,bl,oa,Pf,Hf,vl,la,pl,A1,Lf,A,El,na,Bf,Sf,Tl,Gf,Nf,wl,ia,Of,xf,Dl,sa,yl,k1,zf,k,Al,ha,Vf,qf,kl,Qf,jf,Rl,ca,Wf,Uf,Cl,fa,Fl,R1,$f,R,Ml,ga,Kf,Yf,Il,Xf,Jf,Pl,da,Zf,tg,Hl,ua,Ll,C1,eg,C,Bl,ma,ag,rg,Sl,og,lg,Gl,_a,ng,ig,Nl,ba,Ol,F1,sg,F,xl,va,hg,cg,zl,fg,gg,Vl,pa,dg,ug,ql,Ea,Ql,M1,mg,M,jl,Ta,_g,bg,Wl,vg,pg,Ul,wa,Eg,Tg,$l,Da,Kl,I1,wg,I,Yl,ya,Dg,yg,Aa,Ag,Gh,kg,Rg,Xl,ka,Cg,Fg,Jl,Ra,Zl,P1,Mg,P,tn,Ca,Ig,Pg,en,Hg,Lg,an,Fa,Bg,Sg,rn,Ma,on,H1,Gg,H,ln,Ia,Ng,Og,nn,xg,zg,sn,Pa,Vg,qg,hn,Ha,cn,L1,Qg,L,fn,La,jg,Wg,gn,Ug,$g,dn,Ba,Kg,Yg,un,Sa,mn,B1,Xg,B,_n,Ga,Jg,Zg,bn,td,ed,vn,Na,ad,rd,pn,Oa,En,S1,od,S,Tn,xa,ld,nd,wn,id,sd,Dn,za,hd,cd,yn,Va,An,G1,fd,G,kn,qa,gd,dd,Rn,ud,md,Cn,Qa,_d,bd,Fn,ja,Mn,N1,vd,N,In,Wa,pd,Ed,Pn,Td,wd,Hn,Ua,Dd,yd,Ln,$a,Bn,O1,Ad,O,Sn,Ka,kd,Rd,Gn,Cd,Fd,Nn,Ya,Md,Id,On,Xa,xn,x1,Pd,x,zn,Ja,Hd,Ld,Vn,Bd,Sd,qn,Za,Gd,Nd,Qn,tr,jn,z1,Od,z,Wn,er,xd,zd,Un,Vd,qd,$n,ar,Qd,jd,Kn,rr,Yn,V1,Wd,V,Xn,or,Ud,$d,Jn,Kd,Yd,Zn,lr,Xd,Jd,ti,nr,ei,q1,Zd,q,ai,ir,tu,eu,ri,au,ru,oi,sr,ou,lu,li,hr,ni,Q1,nu,Q,ii,cr,iu,su,si,hu,cu,hi,fr,fu,gu,ci,gr,fi,j1,du,j,gi,dr,uu,mu,di,_u,bu,ui,ur,vu,pu,mi,mr,_i,W1,Eu,W,bi,_r,Tu,wu,vi,Du,yu,pi,br,Au,ku,Ei,vr,Ti,U1,Ru,U,wi,pr,Cu,Fu,vt,Mu,Nh,Iu,Pu,Oh,Hu,Lu,Bu,Di,Er,Su,Gu,yi,Tr,Ai,$1,Nu,$,ki,wr,Ou,xu,pt,zu,xh,Vu,qu,zh,Qu,ju,Wu,Ri,Dr,Uu,$u,Ci,yr,Fi,K1,Ku,K,Mi,Ar,Yu,Xu,Et,Ju,Vh,Zu,tm,qh,em,am,rm,Ii,kr,om,lm,Pi,Rr,Hi,Y1,nm,Y,Li,Cr,im,sm,Tt,hm,Qh,cm,fm,jh,gm,dm,um,Bi,Fr,mm,_m,Si,Mr,Gi,X1,bm,X,Ni,Ir,vm,pm,Oi,Em,Tm,xi,Pr,wm,Dm,zi,Hr,Vi,J1,ym,J,qi,Lr,Am,km,Mt,Rm,Wh,Cm,Fm,Mm,Qi,Br,Im,Pm,ji,Sr,Wi,Z1,Hm,Z,Ui,Gr,Lm,Bm,$i,Sm,Gm,Ki,Nr,Nm,Om,Yi,Or,Xi,tv,xm,tt,Ji,xr,zm,Vm,Zi,qm,Qm,ts,zr,jm,Wm,es,Vr,as,ev,Um,et,rs,qr,$m,Km,os,Ym,Xm,ls,Qr,Jm,Zm,ns,jr,is,av,t_,at,ss,Wr,e_,a_,It,r_,Uh,o_,l_,n_,hs,Ur,i_,s_,cs,$r,fs,rv,h_,rt,gs,Kr,c_,f_,ds,g_,d_,us,Yr,u_,m_,ms,Xr,_s,ov,__,ot,bs,Jr,b_,v_,vs,p_,E_,ps,Zr,T_,w_,Es,to,Ts,lv,D_,lt,ws,eo,y_,A_,Ds,k_,R_,ys,ao,C_,F_,As,ro,ks,nv,M_,nt,Rs,oo,I_,P_,Cs,H_,L_,Fs,lo,B_,S_,Ms,no,Is,iv,G_,it,Ps,io,N_,O_,Hs,x_,z_,Ls,so,V_,q_,Bs,ho,Ss,sv,Q_,st,Gs,co,j_,W_,Ns,U_,$_,Os,fo,K_,Y_,xs,go,zs,hv,X_,ht,Vs,uo,J_,Z_,Pt,tb,$h,eb,ab,rb,qs,mo,ob,lb,Qs,_o,js,cv,nb,ct,Ws,bo,ib,sb,Ht,hb,Kh,cb,fb,gb,Us,vo,db,ub,$s,po,Ks,fv,mb,ft,Ys,Eo,_b,bb,Lt,vb,Yh,pb,Eb,Tb,Xs,To,wb,Db,Js,wo,Zs,gv,yb,gt,th,Do,Ab,kb,Bt,Rb,Xh,Cb,Fb,Mb,eh,yo,Ib,Pb,ah,Ao,rh,dv,Hb,dt,oh,ko,Lb,Bb,lh,Sb,Gb,nh,Ro,Nb,Ob,ih,Co,sh,uv,xb,ut,hh,Fo,zb,Vb,St,qb,Jh,Qb,jb,Wb,ch,Mo,Ub,$b,fh,Io,gh,mv,Kb,mt,dh,Po,Yb,Xb,Gt,Jb,Zh,Zb,t1,e1,uh,Ho,a1,r1,mh,Lo,_h,_v,o1,_t,bh,Bo,l1,n1,Nt,i1,tc,s1,h1,c1,vh,So,f1,g1,ph,Go,Eh,bv,nc;return Ne=new E1({}),Oe=new E1({}),Qe=new E1({}),{c(){bt=a("meta"),No=n(),m=a("h1"),v=a("a"),Rh=a("span"),u1(Ne.$$.fragment),wc=n(),Ch=a("span"),Dc=s("Community"),ec=n(),Oo=a("p"),yc=s("This page regroups resources around \u{1F917} Transformers developed by the community."),ac=n(),wt=a("h2"),kt=a("a"),Fh=a("span"),u1(Oe.$$.fragment),Ac=n(),Mh=a("span"),kc=s("Community resources:"),rc=n(),Rt=a("table"),Ih=a("thead"),Dt=a("tr"),xo=a("th"),Rc=s("Resource"),Cc=n(),zo=a("th"),Fc=s("Description"),Mc=n(),Vo=a("th"),Ic=s("Author"),Pc=n(),Ph=a("tbody"),yt=a("tr"),qo=a("td"),xe=a("a"),Hc=s("Hugging Face Transformers Glossary Flashcards"),Lc=n(),_=a("td"),Bc=s("A set of flashcards based on the "),Qo=a("a"),Sc=s("Transformers Docs Glossary"),Gc=s(" that has been put into a form which can be easily learnt/revised using "),ze=a("a"),Nc=s("Anki"),Oc=s(" an open source, cross platform app specifically designed for long term knowledge retention. See this "),Ve=a("a"),xc=s("Introductory video on how to use the flashcards"),zc=s("."),Vc=n(),jo=a("td"),qe=a("a"),qc=s("Darigov Research"),oc=n(),At=a("h2"),Ct=a("a"),Hh=a("span"),u1(Qe.$$.fragment),Qc=n(),Lh=a("span"),jc=s("Community notebooks:"),lc=n(),Ft=a("table"),Bh=a("thead"),p=a("tr"),Wo=a("th"),Wc=s("Notebook"),Uc=n(),Uo=a("th"),$c=s("Description"),Kc=n(),$o=a("th"),Yc=s("Author"),Xc=n(),Sh=a("th"),Jc=n(),c=a("tbody"),E=a("tr"),Ko=a("td"),je=a("a"),Zc=s("Fine-tune a pre-trained Transformer to generate lyrics"),tf=n(),Yo=a("td"),ef=s("How to generate lyrics in the style of your favorite artist by fine-tuning a GPT-2 model"),af=n(),Xo=a("td"),We=a("a"),rf=s("Aleksey Korshuk"),of=n(),Jo=a("td"),Ue=a("a"),Zo=a("img"),lf=n(),T=a("tr"),tl=a("td"),$e=a("a"),nf=s("Train T5 in Tensorflow 2"),sf=n(),el=a("td"),hf=s("How to train T5 for any task using Tensorflow 2. This notebook demonstrates a Question & Answer task implemented in Tensorflow 2 using SQUAD"),cf=n(),al=a("td"),Ke=a("a"),ff=s("Muhammad Harris"),gf=n(),rl=a("td"),Ye=a("a"),ol=a("img"),df=n(),w=a("tr"),ll=a("td"),Xe=a("a"),uf=s("Train T5 on TPU"),mf=n(),nl=a("td"),_f=s("How to train T5 on SQUAD with Transformers and Nlp"),bf=n(),il=a("td"),Je=a("a"),vf=s("Suraj Patil"),pf=n(),sl=a("td"),Ze=a("a"),hl=a("img"),Ef=n(),D=a("tr"),cl=a("td"),ta=a("a"),Tf=s("Fine-tune T5 for Classification and Multiple Choice"),wf=n(),fl=a("td"),Df=s("How to fine-tune T5 for classification and multiple choice tasks using a text-to-text format with PyTorch Lightning"),yf=n(),gl=a("td"),ea=a("a"),Af=s("Suraj Patil"),kf=n(),dl=a("td"),aa=a("a"),ul=a("img"),Rf=n(),y=a("tr"),ml=a("td"),ra=a("a"),Cf=s("Fine-tune DialoGPT on New Datasets and Languages"),Ff=n(),_l=a("td"),Mf=s("How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots"),If=n(),bl=a("td"),oa=a("a"),Pf=s("Nathan Cooper"),Hf=n(),vl=a("td"),la=a("a"),pl=a("img"),Lf=n(),A=a("tr"),El=a("td"),na=a("a"),Bf=s("Long Sequence Modeling with Reformer"),Sf=n(),Tl=a("td"),Gf=s("How to train on sequences as long as 500,000 tokens with Reformer"),Nf=n(),wl=a("td"),ia=a("a"),Of=s("Patrick von Platen"),xf=n(),Dl=a("td"),sa=a("a"),yl=a("img"),zf=n(),k=a("tr"),Al=a("td"),ha=a("a"),Vf=s("Fine-tune BART for Summarization"),qf=n(),kl=a("td"),Qf=s("How to fine-tune BART for summarization with fastai using blurr"),jf=n(),Rl=a("td"),ca=a("a"),Wf=s("Wayde Gilliam"),Uf=n(),Cl=a("td"),fa=a("a"),Fl=a("img"),$f=n(),R=a("tr"),Ml=a("td"),ga=a("a"),Kf=s("Fine-tune a pre-trained Transformer on anyone\u2019s tweets"),Yf=n(),Il=a("td"),Xf=s("How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model"),Jf=n(),Pl=a("td"),da=a("a"),Zf=s("Boris Dayma"),tg=n(),Hl=a("td"),ua=a("a"),Ll=a("img"),eg=n(),C=a("tr"),Bl=a("td"),ma=a("a"),ag=s("Optimize \u{1F917} Hugging Face models with Weights & Biases"),rg=n(),Sl=a("td"),og=s("A complete tutorial showcasing W&B integration with Hugging Face"),lg=n(),Gl=a("td"),_a=a("a"),ng=s("Boris Dayma"),ig=n(),Nl=a("td"),ba=a("a"),Ol=a("img"),sg=n(),F=a("tr"),xl=a("td"),va=a("a"),hg=s("Pretrain Longformer"),cg=n(),zl=a("td"),fg=s("How to build a \u201Clong\u201D version of existing pretrained models"),gg=n(),Vl=a("td"),pa=a("a"),dg=s("Iz Beltagy"),ug=n(),ql=a("td"),Ea=a("a"),Ql=a("img"),mg=n(),M=a("tr"),jl=a("td"),Ta=a("a"),_g=s("Fine-tune Longformer for QA"),bg=n(),Wl=a("td"),vg=s("How to fine-tune longformer model for QA task"),pg=n(),Ul=a("td"),wa=a("a"),Eg=s("Suraj Patil"),Tg=n(),$l=a("td"),Da=a("a"),Kl=a("img"),wg=n(),I=a("tr"),Yl=a("td"),ya=a("a"),Dg=s("Evaluate Model with \u{1F917}nlp"),yg=n(),Aa=a("td"),Ag=s("How to evaluate longformer on TriviaQA with "),Gh=a("code"),kg=s("nlp"),Rg=n(),Xl=a("td"),ka=a("a"),Cg=s("Patrick von Platen"),Fg=n(),Jl=a("td"),Ra=a("a"),Zl=a("img"),Mg=n(),P=a("tr"),tn=a("td"),Ca=a("a"),Ig=s("Fine-tune T5 for Sentiment Span Extraction"),Pg=n(),en=a("td"),Hg=s("How to fine-tune T5 for sentiment span extraction using a text-to-text format with PyTorch Lightning"),Lg=n(),an=a("td"),Fa=a("a"),Bg=s("Lorenzo Ampil"),Sg=n(),rn=a("td"),Ma=a("a"),on=a("img"),Gg=n(),H=a("tr"),ln=a("td"),Ia=a("a"),Ng=s("Fine-tune DistilBert for Multiclass Classification"),Og=n(),nn=a("td"),xg=s("How to fine-tune DistilBert for multiclass classification with PyTorch"),zg=n(),sn=a("td"),Pa=a("a"),Vg=s("Abhishek Kumar Mishra"),qg=n(),hn=a("td"),Ha=a("a"),cn=a("img"),Qg=n(),L=a("tr"),fn=a("td"),La=a("a"),jg=s("Fine-tune BERT for Multi-label Classification"),Wg=n(),gn=a("td"),Ug=s("How to fine-tune BERT for multi-label classification using PyTorch"),$g=n(),dn=a("td"),Ba=a("a"),Kg=s("Abhishek Kumar Mishra"),Yg=n(),un=a("td"),Sa=a("a"),mn=a("img"),Xg=n(),B=a("tr"),_n=a("td"),Ga=a("a"),Jg=s("Fine-tune T5 for Summarization"),Zg=n(),bn=a("td"),td=s("How to fine-tune T5 for summarization in PyTorch and track experiments with WandB"),ed=n(),vn=a("td"),Na=a("a"),ad=s("Abhishek Kumar Mishra"),rd=n(),pn=a("td"),Oa=a("a"),En=a("img"),od=n(),S=a("tr"),Tn=a("td"),xa=a("a"),ld=s("Speed up Fine-Tuning in Transformers with Dynamic Padding / Bucketing"),nd=n(),wn=a("td"),id=s("How to speed up fine-tuning by a factor of 2 using dynamic padding / bucketing"),sd=n(),Dn=a("td"),za=a("a"),hd=s("Michael Benesty"),cd=n(),yn=a("td"),Va=a("a"),An=a("img"),fd=n(),G=a("tr"),kn=a("td"),qa=a("a"),gd=s("Pretrain Reformer for Masked Language Modeling"),dd=n(),Rn=a("td"),ud=s("How to train a Reformer model with bi-directional self-attention layers"),md=n(),Cn=a("td"),Qa=a("a"),_d=s("Patrick von Platen"),bd=n(),Fn=a("td"),ja=a("a"),Mn=a("img"),vd=n(),N=a("tr"),In=a("td"),Wa=a("a"),pd=s("Expand and Fine Tune Sci-BERT"),Ed=n(),Pn=a("td"),Td=s("How to increase vocabulary of a pretrained SciBERT model from AllenAI on the CORD dataset and pipeline it."),wd=n(),Hn=a("td"),Ua=a("a"),Dd=s("Tanmay Thakur"),yd=n(),Ln=a("td"),$a=a("a"),Bn=a("img"),Ad=n(),O=a("tr"),Sn=a("td"),Ka=a("a"),kd=s("Fine Tune BlenderBotSmall for Summarization using the Trainer API"),Rd=n(),Gn=a("td"),Cd=s("How to fine tune BlenderBotSmall for summarization on a custom dataset, using the Trainer API."),Fd=n(),Nn=a("td"),Ya=a("a"),Md=s("Tanmay Thakur"),Id=n(),On=a("td"),Xa=a("a"),xn=a("img"),Pd=n(),x=a("tr"),zn=a("td"),Ja=a("a"),Hd=s("Fine-tune Electra and interpret with Integrated Gradients"),Ld=n(),Vn=a("td"),Bd=s("How to fine-tune Electra for sentiment analysis and interpret predictions with Captum Integrated Gradients"),Sd=n(),qn=a("td"),Za=a("a"),Gd=s("Eliza Szczechla"),Nd=n(),Qn=a("td"),tr=a("a"),jn=a("img"),Od=n(),z=a("tr"),Wn=a("td"),er=a("a"),xd=s("fine-tune a non-English GPT-2 Model with Trainer class"),zd=n(),Un=a("td"),Vd=s("How to fine-tune a non-English GPT-2 Model with Trainer class"),qd=n(),$n=a("td"),ar=a("a"),Qd=s("Philipp Schmid"),jd=n(),Kn=a("td"),rr=a("a"),Yn=a("img"),Wd=n(),V=a("tr"),Xn=a("td"),or=a("a"),Ud=s("Fine-tune a DistilBERT Model for Multi Label Classification task"),$d=n(),Jn=a("td"),Kd=s("How to fine-tune a DistilBERT Model for Multi Label Classification task"),Yd=n(),Zn=a("td"),lr=a("a"),Xd=s("Dhaval Taunk"),Jd=n(),ti=a("td"),nr=a("a"),ei=a("img"),Zd=n(),q=a("tr"),ai=a("td"),ir=a("a"),tu=s("Fine-tune ALBERT for sentence-pair classification"),eu=n(),ri=a("td"),au=s("How to fine-tune an ALBERT model or another BERT-based model for the sentence-pair classification task"),ru=n(),oi=a("td"),sr=a("a"),ou=s("Nadir El Manouzi"),lu=n(),li=a("td"),hr=a("a"),ni=a("img"),nu=n(),Q=a("tr"),ii=a("td"),cr=a("a"),iu=s("Fine-tune Roberta for sentiment analysis"),su=n(),si=a("td"),hu=s("How to fine-tune a Roberta model for sentiment analysis"),cu=n(),hi=a("td"),fr=a("a"),fu=s("Dhaval Taunk"),gu=n(),ci=a("td"),gr=a("a"),fi=a("img"),du=n(),j=a("tr"),gi=a("td"),dr=a("a"),uu=s("Evaluating Question Generation Models"),mu=n(),di=a("td"),_u=s("How accurate are the answers to questions generated by your seq2seq transformer model?"),bu=n(),ui=a("td"),ur=a("a"),vu=s("Pascal Zoleko"),pu=n(),mi=a("td"),mr=a("a"),_i=a("img"),Eu=n(),W=a("tr"),bi=a("td"),_r=a("a"),Tu=s("Classify text with DistilBERT and Tensorflow"),wu=n(),vi=a("td"),Du=s("How to fine-tune DistilBERT for text classification in TensorFlow"),yu=n(),pi=a("td"),br=a("a"),Au=s("Peter Bayerle"),ku=n(),Ei=a("td"),vr=a("a"),Ti=a("img"),Ru=n(),U=a("tr"),wi=a("td"),pr=a("a"),Cu=s("Leverage BERT for Encoder-Decoder Summarization on CNN/Dailymail"),Fu=n(),vt=a("td"),Mu=s("How to warm-start a "),Nh=a("em"),Iu=s("EncoderDecoderModel"),Pu=s(" with a "),Oh=a("em"),Hu=s("bert-base-uncased"),Lu=s(" checkpoint for summarization on CNN/Dailymail"),Bu=n(),Di=a("td"),Er=a("a"),Su=s("Patrick von Platen"),Gu=n(),yi=a("td"),Tr=a("a"),Ai=a("img"),Nu=n(),$=a("tr"),ki=a("td"),wr=a("a"),Ou=s("Leverage RoBERTa for Encoder-Decoder Summarization on BBC XSum"),xu=n(),pt=a("td"),zu=s("How to warm-start a shared "),xh=a("em"),Vu=s("EncoderDecoderModel"),qu=s(" with a "),zh=a("em"),Qu=s("roberta-base"),ju=s(" checkpoint for summarization on BBC/XSum"),Wu=n(),Ri=a("td"),Dr=a("a"),Uu=s("Patrick von Platen"),$u=n(),Ci=a("td"),yr=a("a"),Fi=a("img"),Ku=n(),K=a("tr"),Mi=a("td"),Ar=a("a"),Yu=s("Fine-tune TAPAS on Sequential Question Answering (SQA)"),Xu=n(),Et=a("td"),Ju=s("How to fine-tune "),Vh=a("em"),Zu=s("TapasForQuestionAnswering"),tm=s(" with a "),qh=a("em"),em=s("tapas-base"),am=s(" checkpoint on the Sequential Question Answering (SQA) dataset"),rm=n(),Ii=a("td"),kr=a("a"),om=s("Niels Rogge"),lm=n(),Pi=a("td"),Rr=a("a"),Hi=a("img"),nm=n(),Y=a("tr"),Li=a("td"),Cr=a("a"),im=s("Evaluate TAPAS on Table Fact Checking (TabFact)"),sm=n(),Tt=a("td"),hm=s("How to evaluate a fine-tuned "),Qh=a("em"),cm=s("TapasForSequenceClassification"),fm=s(" with a "),jh=a("em"),gm=s("tapas-base-finetuned-tabfact"),dm=s(" checkpoint using a combination of the \u{1F917} datasets and \u{1F917} transformers libraries"),um=n(),Bi=a("td"),Fr=a("a"),mm=s("Niels Rogge"),_m=n(),Si=a("td"),Mr=a("a"),Gi=a("img"),bm=n(),X=a("tr"),Ni=a("td"),Ir=a("a"),vm=s("Fine-tuning mBART for translation"),pm=n(),Oi=a("td"),Em=s("How to fine-tune mBART using Seq2SeqTrainer for Hindi to English translation"),Tm=n(),xi=a("td"),Pr=a("a"),wm=s("Vasudev Gupta"),Dm=n(),zi=a("td"),Hr=a("a"),Vi=a("img"),ym=n(),J=a("tr"),qi=a("td"),Lr=a("a"),Am=s("Fine-tune LayoutLM on FUNSD (a form understanding dataset)"),km=n(),Mt=a("td"),Rm=s("How to fine-tune "),Wh=a("em"),Cm=s("LayoutLMForTokenClassification"),Fm=s(" on the FUNSD dataset for information extraction from scanned documents"),Mm=n(),Qi=a("td"),Br=a("a"),Im=s("Niels Rogge"),Pm=n(),ji=a("td"),Sr=a("a"),Wi=a("img"),Hm=n(),Z=a("tr"),Ui=a("td"),Gr=a("a"),Lm=s("Fine-Tune DistilGPT2 and Generate Text"),Bm=n(),$i=a("td"),Sm=s("How to fine-tune DistilGPT2 and generate text"),Gm=n(),Ki=a("td"),Nr=a("a"),Nm=s("Aakash Tripathi"),Om=n(),Yi=a("td"),Or=a("a"),Xi=a("img"),xm=n(),tt=a("tr"),Ji=a("td"),xr=a("a"),zm=s("Fine-Tune LED on up to 8K tokens"),Vm=n(),Zi=a("td"),qm=s("How to fine-tune LED on pubmed for long-range summarization"),Qm=n(),ts=a("td"),zr=a("a"),jm=s("Patrick von Platen"),Wm=n(),es=a("td"),Vr=a("a"),as=a("img"),Um=n(),et=a("tr"),rs=a("td"),qr=a("a"),$m=s("Evaluate LED on Arxiv"),Km=n(),os=a("td"),Ym=s("How to effectively evaluate LED on long-range summarization"),Xm=n(),ls=a("td"),Qr=a("a"),Jm=s("Patrick von Platen"),Zm=n(),ns=a("td"),jr=a("a"),is=a("img"),t_=n(),at=a("tr"),ss=a("td"),Wr=a("a"),e_=s("Fine-tune LayoutLM on RVL-CDIP (a document image classification dataset)"),a_=n(),It=a("td"),r_=s("How to fine-tune "),Uh=a("em"),o_=s("LayoutLMForSequenceClassification"),l_=s(" on the RVL-CDIP dataset for scanned document classification"),n_=n(),hs=a("td"),Ur=a("a"),i_=s("Niels Rogge"),s_=n(),cs=a("td"),$r=a("a"),fs=a("img"),h_=n(),rt=a("tr"),gs=a("td"),Kr=a("a"),c_=s("Wav2Vec2 CTC decoding with GPT2 adjustment"),f_=n(),ds=a("td"),g_=s("How to decode CTC sequence with language model adjustment"),d_=n(),us=a("td"),Yr=a("a"),u_=s("Eric Lam"),m_=n(),ms=a("td"),Xr=a("a"),_s=a("img"),__=n(),ot=a("tr"),bs=a("td"),Jr=a("a"),b_=s("Fine-tune BART for summarization in two languages with Trainer class"),v_=n(),vs=a("td"),p_=s("How to fine-tune BART for summarization in two languages with Trainer class"),E_=n(),ps=a("td"),Zr=a("a"),T_=s("Eliza Szczechla"),w_=n(),Es=a("td"),to=a("a"),Ts=a("img"),D_=n(),lt=a("tr"),ws=a("td"),eo=a("a"),y_=s("Evaluate Big Bird on Trivia QA"),A_=n(),Ds=a("td"),k_=s("How to evaluate BigBird on long document question answering on Trivia QA"),R_=n(),ys=a("td"),ao=a("a"),C_=s("Patrick von Platen"),F_=n(),As=a("td"),ro=a("a"),ks=a("img"),M_=n(),nt=a("tr"),Rs=a("td"),oo=a("a"),I_=s("Create video captions using Wav2Vec2"),P_=n(),Cs=a("td"),H_=s("How to create YouTube captions from any video by transcribing the audio with Wav2Vec"),L_=n(),Fs=a("td"),lo=a("a"),B_=s("Niklas Muennighoff"),S_=n(),Ms=a("td"),no=a("a"),Is=a("img"),G_=n(),it=a("tr"),Ps=a("td"),io=a("a"),N_=s("Fine-tune the Vision Transformer on CIFAR-10 using PyTorch Lightning"),O_=n(),Hs=a("td"),x_=s("How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and PyTorch Lightning"),z_=n(),Ls=a("td"),so=a("a"),V_=s("Niels Rogge"),q_=n(),Bs=a("td"),ho=a("a"),Ss=a("img"),Q_=n(),st=a("tr"),Gs=a("td"),co=a("a"),j_=s("Fine-tune the Vision Transformer on CIFAR-10 using the \u{1F917} Trainer"),W_=n(),Ns=a("td"),U_=s("How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and the \u{1F917} Trainer"),$_=n(),Os=a("td"),fo=a("a"),K_=s("Niels Rogge"),Y_=n(),xs=a("td"),go=a("a"),zs=a("img"),X_=n(),ht=a("tr"),Vs=a("td"),uo=a("a"),J_=s("Evaluate LUKE on Open Entity, an entity typing dataset"),Z_=n(),Pt=a("td"),tb=s("How to evaluate "),$h=a("em"),eb=s("LukeForEntityClassification"),ab=s(" on the Open Entity dataset"),rb=n(),qs=a("td"),mo=a("a"),ob=s("Ikuya Yamada"),lb=n(),Qs=a("td"),_o=a("a"),js=a("img"),nb=n(),ct=a("tr"),Ws=a("td"),bo=a("a"),ib=s("Evaluate LUKE on TACRED, a relation extraction dataset"),sb=n(),Ht=a("td"),hb=s("How to evaluate "),Kh=a("em"),cb=s("LukeForEntityPairClassification"),fb=s(" on the TACRED dataset"),gb=n(),Us=a("td"),vo=a("a"),db=s("Ikuya Yamada"),ub=n(),$s=a("td"),po=a("a"),Ks=a("img"),mb=n(),ft=a("tr"),Ys=a("td"),Eo=a("a"),_b=s("Evaluate LUKE on CoNLL-2003, an important NER benchmark"),bb=n(),Lt=a("td"),vb=s("How to evaluate "),Yh=a("em"),pb=s("LukeForEntitySpanClassification"),Eb=s(" on the CoNLL-2003 dataset"),Tb=n(),Xs=a("td"),To=a("a"),wb=s("Ikuya Yamada"),Db=n(),Js=a("td"),wo=a("a"),Zs=a("img"),yb=n(),gt=a("tr"),th=a("td"),Do=a("a"),Ab=s("Evaluate BigBird-Pegasus on PubMed dataset"),kb=n(),Bt=a("td"),Rb=s("How to evaluate "),Xh=a("em"),Cb=s("BigBirdPegasusForConditionalGeneration"),Fb=s(" on PubMed dataset"),Mb=n(),eh=a("td"),yo=a("a"),Ib=s("Vasudev Gupta"),Pb=n(),ah=a("td"),Ao=a("a"),rh=a("img"),Hb=n(),dt=a("tr"),oh=a("td"),ko=a("a"),Lb=s("Speech Emotion Classification with Wav2Vec2"),Bb=n(),lh=a("td"),Sb=s("How to leverage a pretrained Wav2Vec2 model for Emotion Classification on the MEGA dataset"),Gb=n(),nh=a("td"),Ro=a("a"),Nb=s("Mehrdad Farahani"),Ob=n(),ih=a("td"),Co=a("a"),sh=a("img"),xb=n(),ut=a("tr"),hh=a("td"),Fo=a("a"),zb=s("Detect objects in an image with DETR"),Vb=n(),St=a("td"),qb=s("How to use a trained "),Jh=a("em"),Qb=s("DetrForObjectDetection"),jb=s(" model to detect objects in an image and visualize attention"),Wb=n(),ch=a("td"),Mo=a("a"),Ub=s("Niels Rogge"),$b=n(),fh=a("td"),Io=a("a"),gh=a("img"),Kb=n(),mt=a("tr"),dh=a("td"),Po=a("a"),Yb=s("Fine-tune DETR on a custom object detection dataset"),Xb=n(),Gt=a("td"),Jb=s("How to fine-tune "),Zh=a("em"),Zb=s("DetrForObjectDetection"),t1=s(" on a custom object detection dataset"),e1=n(),uh=a("td"),Ho=a("a"),a1=s("Niels Rogge"),r1=n(),mh=a("td"),Lo=a("a"),_h=a("img"),o1=n(),_t=a("tr"),bh=a("td"),Bo=a("a"),l1=s("Finetune T5 for Named Entity Recognition"),n1=n(),Nt=a("td"),i1=s("How to fine-tune "),tc=a("em"),s1=s("T5"),h1=s(" on a Named Entity Recognition Task"),c1=n(),vh=a("td"),So=a("a"),f1=s("Ogundepo Odunayo"),g1=n(),ph=a("td"),Go=a("a"),Eh=a("img"),this.h()},l(d){const u=MD('[data-svelte="svelte-1phssyn"]',document.head);bt=r(u,"META",{name:!0,content:!0}),u.forEach(o),No=i(d),m=r(d,"H1",{class:!0});var ic=l(m);v=r(ic,"A",{id:!0,class:!0,href:!0});var vv=l(v);Rh=r(vv,"SPAN",{});var pv=l(Rh);m1(Ne.$$.fragment,pv),pv.forEach(o),vv.forEach(o),wc=i(ic),Ch=r(ic,"SPAN",{});var Ev=l(Ch);Dc=h(Ev,"Community"),Ev.forEach(o),ic.forEach(o),ec=i(d),Oo=r(d,"P",{});var Tv=l(Oo);yc=h(Tv,"This page regroups resources around \u{1F917} Transformers developed by the community."),Tv.forEach(o),ac=i(d),wt=r(d,"H2",{class:!0});var sc=l(wt);kt=r(sc,"A",{id:!0,class:!0,href:!0});var wv=l(kt);Fh=r(wv,"SPAN",{});var Dv=l(Fh);m1(Oe.$$.fragment,Dv),Dv.forEach(o),wv.forEach(o),Ac=i(sc),Mh=r(sc,"SPAN",{});var yv=l(Mh);kc=h(yv,"Community resources:"),yv.forEach(o),sc.forEach(o),rc=i(d),Rt=r(d,"TABLE",{});var hc=l(Rt);Ih=r(hc,"THEAD",{});var Av=l(Ih);Dt=r(Av,"TR",{});var Th=l(Dt);xo=r(Th,"TH",{align:!0});var kv=l(xo);Rc=h(kv,"Resource"),kv.forEach(o),Cc=i(Th),zo=r(Th,"TH",{align:!0});var Rv=l(zo);Fc=h(Rv,"Description"),Rv.forEach(o),Mc=i(Th),Vo=r(Th,"TH",{align:!0});var Cv=l(Vo);Ic=h(Cv,"Author"),Cv.forEach(o),Th.forEach(o),Av.forEach(o),Pc=i(hc),Ph=r(hc,"TBODY",{});var Fv=l(Ph);yt=r(Fv,"TR",{});var wh=l(yt);qo=r(wh,"TD",{align:!0});var Mv=l(qo);xe=r(Mv,"A",{href:!0,rel:!0});var Iv=l(xe);Hc=h(Iv,"Hugging Face Transformers Glossary Flashcards"),Iv.forEach(o),Mv.forEach(o),Lc=i(wh),_=r(wh,"TD",{align:!0});var Ot=l(_);Bc=h(Ot,"A set of flashcards based on the "),Qo=r(Ot,"A",{href:!0});var Pv=l(Qo);Sc=h(Pv,"Transformers Docs Glossary"),Pv.forEach(o),Gc=h(Ot," that has been put into a form which can be easily learnt/revised using "),ze=r(Ot,"A",{href:!0,rel:!0});var Hv=l(ze);Nc=h(Hv,"Anki"),Hv.forEach(o),Oc=h(Ot," an open source, cross platform app specifically designed for long term knowledge retention. See this "),Ve=r(Ot,"A",{href:!0,rel:!0});var Lv=l(Ve);xc=h(Lv,"Introductory video on how to use the flashcards"),Lv.forEach(o),zc=h(Ot,"."),Ot.forEach(o),Vc=i(wh),jo=r(wh,"TD",{align:!0});var Bv=l(jo);qe=r(Bv,"A",{href:!0,rel:!0});var Sv=l(qe);qc=h(Sv,"Darigov Research"),Sv.forEach(o),Bv.forEach(o),wh.forEach(o),Fv.forEach(o),hc.forEach(o),oc=i(d),At=r(d,"H2",{class:!0});var cc=l(At);Ct=r(cc,"A",{id:!0,class:!0,href:!0});var Gv=l(Ct);Hh=r(Gv,"SPAN",{});var Nv=l(Hh);m1(Qe.$$.fragment,Nv),Nv.forEach(o),Gv.forEach(o),Qc=i(cc),Lh=r(cc,"SPAN",{});var Ov=l(Lh);jc=h(Ov,"Community notebooks:"),Ov.forEach(o),cc.forEach(o),lc=i(d),Ft=r(d,"TABLE",{});var fc=l(Ft);Bh=r(fc,"THEAD",{});var xv=l(Bh);p=r(xv,"TR",{});var xt=l(p);Wo=r(xt,"TH",{align:!0});var zv=l(Wo);Wc=h(zv,"Notebook"),zv.forEach(o),Uc=i(xt),Uo=r(xt,"TH",{align:!0});var Vv=l(Uo);$c=h(Vv,"Description"),Vv.forEach(o),Kc=i(xt),$o=r(xt,"TH",{align:!0});var qv=l($o);Yc=h(qv,"Author"),qv.forEach(o),Xc=i(xt),Sh=r(xt,"TH",{align:!0}),l(Sh).forEach(o),xt.forEach(o),xv.forEach(o),Jc=i(fc),c=r(fc,"TBODY",{});var f=l(c);E=r(f,"TR",{});var zt=l(E);Ko=r(zt,"TD",{align:!0});var Qv=l(Ko);je=r(Qv,"A",{href:!0,rel:!0});var jv=l(je);Zc=h(jv,"Fine-tune a pre-trained Transformer to generate lyrics"),jv.forEach(o),Qv.forEach(o),tf=i(zt),Yo=r(zt,"TD",{align:!0});var Wv=l(Yo);ef=h(Wv,"How to generate lyrics in the style of your favorite artist by fine-tuning a GPT-2 model"),Wv.forEach(o),af=i(zt),Xo=r(zt,"TD",{align:!0});var Uv=l(Xo);We=r(Uv,"A",{href:!0,rel:!0});var $v=l(We);rf=h($v,"Aleksey Korshuk"),$v.forEach(o),Uv.forEach(o),of=i(zt),Jo=r(zt,"TD",{align:!0});var Kv=l(Jo);Ue=r(Kv,"A",{href:!0,rel:!0});var Yv=l(Ue);Zo=r(Yv,"IMG",{src:!0,alt:!0}),Yv.forEach(o),Kv.forEach(o),zt.forEach(o),lf=i(f),T=r(f,"TR",{});var Vt=l(T);tl=r(Vt,"TD",{align:!0});var Xv=l(tl);$e=r(Xv,"A",{href:!0,rel:!0});var Jv=l($e);nf=h(Jv,"Train T5 in Tensorflow 2"),Jv.forEach(o),Xv.forEach(o),sf=i(Vt),el=r(Vt,"TD",{align:!0});var Zv=l(el);hf=h(Zv,"How to train T5 for any task using Tensorflow 2. This notebook demonstrates a Question & Answer task implemented in Tensorflow 2 using SQUAD"),Zv.forEach(o),cf=i(Vt),al=r(Vt,"TD",{align:!0});var tp=l(al);Ke=r(tp,"A",{href:!0,rel:!0});var ep=l(Ke);ff=h(ep,"Muhammad Harris"),ep.forEach(o),tp.forEach(o),gf=i(Vt),rl=r(Vt,"TD",{align:!0});var ap=l(rl);Ye=r(ap,"A",{href:!0,rel:!0});var rp=l(Ye);ol=r(rp,"IMG",{src:!0,alt:!0}),rp.forEach(o),ap.forEach(o),Vt.forEach(o),df=i(f),w=r(f,"TR",{});var qt=l(w);ll=r(qt,"TD",{align:!0});var op=l(ll);Xe=r(op,"A",{href:!0,rel:!0});var lp=l(Xe);uf=h(lp,"Train T5 on TPU"),lp.forEach(o),op.forEach(o),mf=i(qt),nl=r(qt,"TD",{align:!0});var np=l(nl);_f=h(np,"How to train T5 on SQUAD with Transformers and Nlp"),np.forEach(o),bf=i(qt),il=r(qt,"TD",{align:!0});var ip=l(il);Je=r(ip,"A",{href:!0,rel:!0});var sp=l(Je);vf=h(sp,"Suraj Patil"),sp.forEach(o),ip.forEach(o),pf=i(qt),sl=r(qt,"TD",{align:!0});var hp=l(sl);Ze=r(hp,"A",{href:!0,rel:!0});var cp=l(Ze);hl=r(cp,"IMG",{src:!0,alt:!0}),cp.forEach(o),hp.forEach(o),qt.forEach(o),Ef=i(f),D=r(f,"TR",{});var Qt=l(D);cl=r(Qt,"TD",{align:!0});var fp=l(cl);ta=r(fp,"A",{href:!0,rel:!0});var gp=l(ta);Tf=h(gp,"Fine-tune T5 for Classification and Multiple Choice"),gp.forEach(o),fp.forEach(o),wf=i(Qt),fl=r(Qt,"TD",{align:!0});var dp=l(fl);Df=h(dp,"How to fine-tune T5 for classification and multiple choice tasks using a text-to-text format with PyTorch Lightning"),dp.forEach(o),yf=i(Qt),gl=r(Qt,"TD",{align:!0});var up=l(gl);ea=r(up,"A",{href:!0,rel:!0});var mp=l(ea);Af=h(mp,"Suraj Patil"),mp.forEach(o),up.forEach(o),kf=i(Qt),dl=r(Qt,"TD",{align:!0});var _p=l(dl);aa=r(_p,"A",{href:!0,rel:!0});var bp=l(aa);ul=r(bp,"IMG",{src:!0,alt:!0}),bp.forEach(o),_p.forEach(o),Qt.forEach(o),Rf=i(f),y=r(f,"TR",{});var jt=l(y);ml=r(jt,"TD",{align:!0});var vp=l(ml);ra=r(vp,"A",{href:!0,rel:!0});var pp=l(ra);Cf=h(pp,"Fine-tune DialoGPT on New Datasets and Languages"),pp.forEach(o),vp.forEach(o),Ff=i(jt),_l=r(jt,"TD",{align:!0});var Ep=l(_l);Mf=h(Ep,"How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots"),Ep.forEach(o),If=i(jt),bl=r(jt,"TD",{align:!0});var Tp=l(bl);oa=r(Tp,"A",{href:!0,rel:!0});var wp=l(oa);Pf=h(wp,"Nathan Cooper"),wp.forEach(o),Tp.forEach(o),Hf=i(jt),vl=r(jt,"TD",{align:!0});var Dp=l(vl);la=r(Dp,"A",{href:!0,rel:!0});var yp=l(la);pl=r(yp,"IMG",{src:!0,alt:!0}),yp.forEach(o),Dp.forEach(o),jt.forEach(o),Lf=i(f),A=r(f,"TR",{});var Wt=l(A);El=r(Wt,"TD",{align:!0});var Ap=l(El);na=r(Ap,"A",{href:!0,rel:!0});var kp=l(na);Bf=h(kp,"Long Sequence Modeling with Reformer"),kp.forEach(o),Ap.forEach(o),Sf=i(Wt),Tl=r(Wt,"TD",{align:!0});var Rp=l(Tl);Gf=h(Rp,"How to train on sequences as long as 500,000 tokens with Reformer"),Rp.forEach(o),Nf=i(Wt),wl=r(Wt,"TD",{align:!0});var Cp=l(wl);ia=r(Cp,"A",{href:!0,rel:!0});var Fp=l(ia);Of=h(Fp,"Patrick von Platen"),Fp.forEach(o),Cp.forEach(o),xf=i(Wt),Dl=r(Wt,"TD",{align:!0});var Mp=l(Dl);sa=r(Mp,"A",{href:!0,rel:!0});var Ip=l(sa);yl=r(Ip,"IMG",{src:!0,alt:!0}),Ip.forEach(o),Mp.forEach(o),Wt.forEach(o),zf=i(f),k=r(f,"TR",{});var Ut=l(k);Al=r(Ut,"TD",{align:!0});var Pp=l(Al);ha=r(Pp,"A",{href:!0,rel:!0});var Hp=l(ha);Vf=h(Hp,"Fine-tune BART for Summarization"),Hp.forEach(o),Pp.forEach(o),qf=i(Ut),kl=r(Ut,"TD",{align:!0});var Lp=l(kl);Qf=h(Lp,"How to fine-tune BART for summarization with fastai using blurr"),Lp.forEach(o),jf=i(Ut),Rl=r(Ut,"TD",{align:!0});var Bp=l(Rl);ca=r(Bp,"A",{href:!0,rel:!0});var Sp=l(ca);Wf=h(Sp,"Wayde Gilliam"),Sp.forEach(o),Bp.forEach(o),Uf=i(Ut),Cl=r(Ut,"TD",{align:!0});var Gp=l(Cl);fa=r(Gp,"A",{href:!0,rel:!0});var Np=l(fa);Fl=r(Np,"IMG",{src:!0,alt:!0}),Np.forEach(o),Gp.forEach(o),Ut.forEach(o),$f=i(f),R=r(f,"TR",{});var $t=l(R);Ml=r($t,"TD",{align:!0});var Op=l(Ml);ga=r(Op,"A",{href:!0,rel:!0});var xp=l(ga);Kf=h(xp,"Fine-tune a pre-trained Transformer on anyone\u2019s tweets"),xp.forEach(o),Op.forEach(o),Yf=i($t),Il=r($t,"TD",{align:!0});var zp=l(Il);Xf=h(zp,"How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model"),zp.forEach(o),Jf=i($t),Pl=r($t,"TD",{align:!0});var Vp=l(Pl);da=r(Vp,"A",{href:!0,rel:!0});var qp=l(da);Zf=h(qp,"Boris Dayma"),qp.forEach(o),Vp.forEach(o),tg=i($t),Hl=r($t,"TD",{align:!0});var Qp=l(Hl);ua=r(Qp,"A",{href:!0,rel:!0});var jp=l(ua);Ll=r(jp,"IMG",{src:!0,alt:!0}),jp.forEach(o),Qp.forEach(o),$t.forEach(o),eg=i(f),C=r(f,"TR",{});var Kt=l(C);Bl=r(Kt,"TD",{align:!0});var Wp=l(Bl);ma=r(Wp,"A",{href:!0,rel:!0});var Up=l(ma);ag=h(Up,"Optimize \u{1F917} Hugging Face models with Weights & Biases"),Up.forEach(o),Wp.forEach(o),rg=i(Kt),Sl=r(Kt,"TD",{align:!0});var $p=l(Sl);og=h($p,"A complete tutorial showcasing W&B integration with Hugging Face"),$p.forEach(o),lg=i(Kt),Gl=r(Kt,"TD",{align:!0});var Kp=l(Gl);_a=r(Kp,"A",{href:!0,rel:!0});var Yp=l(_a);ng=h(Yp,"Boris Dayma"),Yp.forEach(o),Kp.forEach(o),ig=i(Kt),Nl=r(Kt,"TD",{align:!0});var Xp=l(Nl);ba=r(Xp,"A",{href:!0,rel:!0});var Jp=l(ba);Ol=r(Jp,"IMG",{src:!0,alt:!0}),Jp.forEach(o),Xp.forEach(o),Kt.forEach(o),sg=i(f),F=r(f,"TR",{});var Yt=l(F);xl=r(Yt,"TD",{align:!0});var Zp=l(xl);va=r(Zp,"A",{href:!0,rel:!0});var tE=l(va);hg=h(tE,"Pretrain Longformer"),tE.forEach(o),Zp.forEach(o),cg=i(Yt),zl=r(Yt,"TD",{align:!0});var eE=l(zl);fg=h(eE,"How to build a \u201Clong\u201D version of existing pretrained models"),eE.forEach(o),gg=i(Yt),Vl=r(Yt,"TD",{align:!0});var aE=l(Vl);pa=r(aE,"A",{href:!0,rel:!0});var rE=l(pa);dg=h(rE,"Iz Beltagy"),rE.forEach(o),aE.forEach(o),ug=i(Yt),ql=r(Yt,"TD",{align:!0});var oE=l(ql);Ea=r(oE,"A",{href:!0,rel:!0});var lE=l(Ea);Ql=r(lE,"IMG",{src:!0,alt:!0}),lE.forEach(o),oE.forEach(o),Yt.forEach(o),mg=i(f),M=r(f,"TR",{});var Xt=l(M);jl=r(Xt,"TD",{align:!0});var nE=l(jl);Ta=r(nE,"A",{href:!0,rel:!0});var iE=l(Ta);_g=h(iE,"Fine-tune Longformer for QA"),iE.forEach(o),nE.forEach(o),bg=i(Xt),Wl=r(Xt,"TD",{align:!0});var sE=l(Wl);vg=h(sE,"How to fine-tune longformer model for QA task"),sE.forEach(o),pg=i(Xt),Ul=r(Xt,"TD",{align:!0});var hE=l(Ul);wa=r(hE,"A",{href:!0,rel:!0});var cE=l(wa);Eg=h(cE,"Suraj Patil"),cE.forEach(o),hE.forEach(o),Tg=i(Xt),$l=r(Xt,"TD",{align:!0});var fE=l($l);Da=r(fE,"A",{href:!0,rel:!0});var gE=l(Da);Kl=r(gE,"IMG",{src:!0,alt:!0}),gE.forEach(o),fE.forEach(o),Xt.forEach(o),wg=i(f),I=r(f,"TR",{});var Jt=l(I);Yl=r(Jt,"TD",{align:!0});var dE=l(Yl);ya=r(dE,"A",{href:!0,rel:!0});var uE=l(ya);Dg=h(uE,"Evaluate Model with \u{1F917}nlp"),uE.forEach(o),dE.forEach(o),yg=i(Jt),Aa=r(Jt,"TD",{align:!0});var d1=l(Aa);Ag=h(d1,"How to evaluate longformer on TriviaQA with "),Gh=r(d1,"CODE",{});var mE=l(Gh);kg=h(mE,"nlp"),mE.forEach(o),d1.forEach(o),Rg=i(Jt),Xl=r(Jt,"TD",{align:!0});var _E=l(Xl);ka=r(_E,"A",{href:!0,rel:!0});var bE=l(ka);Cg=h(bE,"Patrick von Platen"),bE.forEach(o),_E.forEach(o),Fg=i(Jt),Jl=r(Jt,"TD",{align:!0});var vE=l(Jl);Ra=r(vE,"A",{href:!0,rel:!0});var pE=l(Ra);Zl=r(pE,"IMG",{src:!0,alt:!0}),pE.forEach(o),vE.forEach(o),Jt.forEach(o),Mg=i(f),P=r(f,"TR",{});var Zt=l(P);tn=r(Zt,"TD",{align:!0});var EE=l(tn);Ca=r(EE,"A",{href:!0,rel:!0});var TE=l(Ca);Ig=h(TE,"Fine-tune T5 for Sentiment Span Extraction"),TE.forEach(o),EE.forEach(o),Pg=i(Zt),en=r(Zt,"TD",{align:!0});var wE=l(en);Hg=h(wE,"How to fine-tune T5 for sentiment span extraction using a text-to-text format with PyTorch Lightning"),wE.forEach(o),Lg=i(Zt),an=r(Zt,"TD",{align:!0});var DE=l(an);Fa=r(DE,"A",{href:!0,rel:!0});var yE=l(Fa);Bg=h(yE,"Lorenzo Ampil"),yE.forEach(o),DE.forEach(o),Sg=i(Zt),rn=r(Zt,"TD",{align:!0});var AE=l(rn);Ma=r(AE,"A",{href:!0,rel:!0});var kE=l(Ma);on=r(kE,"IMG",{src:!0,alt:!0}),kE.forEach(o),AE.forEach(o),Zt.forEach(o),Gg=i(f),H=r(f,"TR",{});var te=l(H);ln=r(te,"TD",{align:!0});var RE=l(ln);Ia=r(RE,"A",{href:!0,rel:!0});var CE=l(Ia);Ng=h(CE,"Fine-tune DistilBert for Multiclass Classification"),CE.forEach(o),RE.forEach(o),Og=i(te),nn=r(te,"TD",{align:!0});var FE=l(nn);xg=h(FE,"How to fine-tune DistilBert for multiclass classification with PyTorch"),FE.forEach(o),zg=i(te),sn=r(te,"TD",{align:!0});var ME=l(sn);Pa=r(ME,"A",{href:!0,rel:!0});var IE=l(Pa);Vg=h(IE,"Abhishek Kumar Mishra"),IE.forEach(o),ME.forEach(o),qg=i(te),hn=r(te,"TD",{align:!0});var PE=l(hn);Ha=r(PE,"A",{href:!0,rel:!0});var HE=l(Ha);cn=r(HE,"IMG",{src:!0,alt:!0}),HE.forEach(o),PE.forEach(o),te.forEach(o),Qg=i(f),L=r(f,"TR",{});var ee=l(L);fn=r(ee,"TD",{align:!0});var LE=l(fn);La=r(LE,"A",{href:!0,rel:!0});var BE=l(La);jg=h(BE,"Fine-tune BERT for Multi-label Classification"),BE.forEach(o),LE.forEach(o),Wg=i(ee),gn=r(ee,"TD",{align:!0});var SE=l(gn);Ug=h(SE,"How to fine-tune BERT for multi-label classification using PyTorch"),SE.forEach(o),$g=i(ee),dn=r(ee,"TD",{align:!0});var GE=l(dn);Ba=r(GE,"A",{href:!0,rel:!0});var NE=l(Ba);Kg=h(NE,"Abhishek Kumar Mishra"),NE.forEach(o),GE.forEach(o),Yg=i(ee),un=r(ee,"TD",{align:!0});var OE=l(un);Sa=r(OE,"A",{href:!0,rel:!0});var xE=l(Sa);mn=r(xE,"IMG",{src:!0,alt:!0}),xE.forEach(o),OE.forEach(o),ee.forEach(o),Xg=i(f),B=r(f,"TR",{});var ae=l(B);_n=r(ae,"TD",{align:!0});var zE=l(_n);Ga=r(zE,"A",{href:!0,rel:!0});var VE=l(Ga);Jg=h(VE,"Fine-tune T5 for Summarization"),VE.forEach(o),zE.forEach(o),Zg=i(ae),bn=r(ae,"TD",{align:!0});var qE=l(bn);td=h(qE,"How to fine-tune T5 for summarization in PyTorch and track experiments with WandB"),qE.forEach(o),ed=i(ae),vn=r(ae,"TD",{align:!0});var QE=l(vn);Na=r(QE,"A",{href:!0,rel:!0});var jE=l(Na);ad=h(jE,"Abhishek Kumar Mishra"),jE.forEach(o),QE.forEach(o),rd=i(ae),pn=r(ae,"TD",{align:!0});var WE=l(pn);Oa=r(WE,"A",{href:!0,rel:!0});var UE=l(Oa);En=r(UE,"IMG",{src:!0,alt:!0}),UE.forEach(o),WE.forEach(o),ae.forEach(o),od=i(f),S=r(f,"TR",{});var re=l(S);Tn=r(re,"TD",{align:!0});var $E=l(Tn);xa=r($E,"A",{href:!0,rel:!0});var KE=l(xa);ld=h(KE,"Speed up Fine-Tuning in Transformers with Dynamic Padding / Bucketing"),KE.forEach(o),$E.forEach(o),nd=i(re),wn=r(re,"TD",{align:!0});var YE=l(wn);id=h(YE,"How to speed up fine-tuning by a factor of 2 using dynamic padding / bucketing"),YE.forEach(o),sd=i(re),Dn=r(re,"TD",{align:!0});var XE=l(Dn);za=r(XE,"A",{href:!0,rel:!0});var JE=l(za);hd=h(JE,"Michael Benesty"),JE.forEach(o),XE.forEach(o),cd=i(re),yn=r(re,"TD",{align:!0});var ZE=l(yn);Va=r(ZE,"A",{href:!0,rel:!0});var tT=l(Va);An=r(tT,"IMG",{src:!0,alt:!0}),tT.forEach(o),ZE.forEach(o),re.forEach(o),fd=i(f),G=r(f,"TR",{});var oe=l(G);kn=r(oe,"TD",{align:!0});var eT=l(kn);qa=r(eT,"A",{href:!0,rel:!0});var aT=l(qa);gd=h(aT,"Pretrain Reformer for Masked Language Modeling"),aT.forEach(o),eT.forEach(o),dd=i(oe),Rn=r(oe,"TD",{align:!0});var rT=l(Rn);ud=h(rT,"How to train a Reformer model with bi-directional self-attention layers"),rT.forEach(o),md=i(oe),Cn=r(oe,"TD",{align:!0});var oT=l(Cn);Qa=r(oT,"A",{href:!0,rel:!0});var lT=l(Qa);_d=h(lT,"Patrick von Platen"),lT.forEach(o),oT.forEach(o),bd=i(oe),Fn=r(oe,"TD",{align:!0});var nT=l(Fn);ja=r(nT,"A",{href:!0,rel:!0});var iT=l(ja);Mn=r(iT,"IMG",{src:!0,alt:!0}),iT.forEach(o),nT.forEach(o),oe.forEach(o),vd=i(f),N=r(f,"TR",{});var le=l(N);In=r(le,"TD",{align:!0});var sT=l(In);Wa=r(sT,"A",{href:!0,rel:!0});var hT=l(Wa);pd=h(hT,"Expand and Fine Tune Sci-BERT"),hT.forEach(o),sT.forEach(o),Ed=i(le),Pn=r(le,"TD",{align:!0});var cT=l(Pn);Td=h(cT,"How to increase vocabulary of a pretrained SciBERT model from AllenAI on the CORD dataset and pipeline it."),cT.forEach(o),wd=i(le),Hn=r(le,"TD",{align:!0});var fT=l(Hn);Ua=r(fT,"A",{href:!0,rel:!0});var gT=l(Ua);Dd=h(gT,"Tanmay Thakur"),gT.forEach(o),fT.forEach(o),yd=i(le),Ln=r(le,"TD",{align:!0});var dT=l(Ln);$a=r(dT,"A",{href:!0,rel:!0});var uT=l($a);Bn=r(uT,"IMG",{src:!0,alt:!0}),uT.forEach(o),dT.forEach(o),le.forEach(o),Ad=i(f),O=r(f,"TR",{});var ne=l(O);Sn=r(ne,"TD",{align:!0});var mT=l(Sn);Ka=r(mT,"A",{href:!0,rel:!0});var _T=l(Ka);kd=h(_T,"Fine Tune BlenderBotSmall for Summarization using the Trainer API"),_T.forEach(o),mT.forEach(o),Rd=i(ne),Gn=r(ne,"TD",{align:!0});var bT=l(Gn);Cd=h(bT,"How to fine tune BlenderBotSmall for summarization on a custom dataset, using the Trainer API."),bT.forEach(o),Fd=i(ne),Nn=r(ne,"TD",{align:!0});var vT=l(Nn);Ya=r(vT,"A",{href:!0,rel:!0});var pT=l(Ya);Md=h(pT,"Tanmay Thakur"),pT.forEach(o),vT.forEach(o),Id=i(ne),On=r(ne,"TD",{align:!0});var ET=l(On);Xa=r(ET,"A",{href:!0,rel:!0});var TT=l(Xa);xn=r(TT,"IMG",{src:!0,alt:!0}),TT.forEach(o),ET.forEach(o),ne.forEach(o),Pd=i(f),x=r(f,"TR",{});var ie=l(x);zn=r(ie,"TD",{align:!0});var wT=l(zn);Ja=r(wT,"A",{href:!0,rel:!0});var DT=l(Ja);Hd=h(DT,"Fine-tune Electra and interpret with Integrated Gradients"),DT.forEach(o),wT.forEach(o),Ld=i(ie),Vn=r(ie,"TD",{align:!0});var yT=l(Vn);Bd=h(yT,"How to fine-tune Electra for sentiment analysis and interpret predictions with Captum Integrated Gradients"),yT.forEach(o),Sd=i(ie),qn=r(ie,"TD",{align:!0});var AT=l(qn);Za=r(AT,"A",{href:!0,rel:!0});var kT=l(Za);Gd=h(kT,"Eliza Szczechla"),kT.forEach(o),AT.forEach(o),Nd=i(ie),Qn=r(ie,"TD",{align:!0});var RT=l(Qn);tr=r(RT,"A",{href:!0,rel:!0});var CT=l(tr);jn=r(CT,"IMG",{src:!0,alt:!0}),CT.forEach(o),RT.forEach(o),ie.forEach(o),Od=i(f),z=r(f,"TR",{});var se=l(z);Wn=r(se,"TD",{align:!0});var FT=l(Wn);er=r(FT,"A",{href:!0,rel:!0});var MT=l(er);xd=h(MT,"fine-tune a non-English GPT-2 Model with Trainer class"),MT.forEach(o),FT.forEach(o),zd=i(se),Un=r(se,"TD",{align:!0});var IT=l(Un);Vd=h(IT,"How to fine-tune a non-English GPT-2 Model with Trainer class"),IT.forEach(o),qd=i(se),$n=r(se,"TD",{align:!0});var PT=l($n);ar=r(PT,"A",{href:!0,rel:!0});var HT=l(ar);Qd=h(HT,"Philipp Schmid"),HT.forEach(o),PT.forEach(o),jd=i(se),Kn=r(se,"TD",{align:!0});var LT=l(Kn);rr=r(LT,"A",{href:!0,rel:!0});var BT=l(rr);Yn=r(BT,"IMG",{src:!0,alt:!0}),BT.forEach(o),LT.forEach(o),se.forEach(o),Wd=i(f),V=r(f,"TR",{});var he=l(V);Xn=r(he,"TD",{align:!0});var ST=l(Xn);or=r(ST,"A",{href:!0,rel:!0});var GT=l(or);Ud=h(GT,"Fine-tune a DistilBERT Model for Multi Label Classification task"),GT.forEach(o),ST.forEach(o),$d=i(he),Jn=r(he,"TD",{align:!0});var NT=l(Jn);Kd=h(NT,"How to fine-tune a DistilBERT Model for Multi Label Classification task"),NT.forEach(o),Yd=i(he),Zn=r(he,"TD",{align:!0});var OT=l(Zn);lr=r(OT,"A",{href:!0,rel:!0});var xT=l(lr);Xd=h(xT,"Dhaval Taunk"),xT.forEach(o),OT.forEach(o),Jd=i(he),ti=r(he,"TD",{align:!0});var zT=l(ti);nr=r(zT,"A",{href:!0,rel:!0});var VT=l(nr);ei=r(VT,"IMG",{src:!0,alt:!0}),VT.forEach(o),zT.forEach(o),he.forEach(o),Zd=i(f),q=r(f,"TR",{});var ce=l(q);ai=r(ce,"TD",{align:!0});var qT=l(ai);ir=r(qT,"A",{href:!0,rel:!0});var QT=l(ir);tu=h(QT,"Fine-tune ALBERT for sentence-pair classification"),QT.forEach(o),qT.forEach(o),eu=i(ce),ri=r(ce,"TD",{align:!0});var jT=l(ri);au=h(jT,"How to fine-tune an ALBERT model or another BERT-based model for the sentence-pair classification task"),jT.forEach(o),ru=i(ce),oi=r(ce,"TD",{align:!0});var WT=l(oi);sr=r(WT,"A",{href:!0,rel:!0});var UT=l(sr);ou=h(UT,"Nadir El Manouzi"),UT.forEach(o),WT.forEach(o),lu=i(ce),li=r(ce,"TD",{align:!0});var $T=l(li);hr=r($T,"A",{href:!0,rel:!0});var KT=l(hr);ni=r(KT,"IMG",{src:!0,alt:!0}),KT.forEach(o),$T.forEach(o),ce.forEach(o),nu=i(f),Q=r(f,"TR",{});var fe=l(Q);ii=r(fe,"TD",{align:!0});var YT=l(ii);cr=r(YT,"A",{href:!0,rel:!0});var XT=l(cr);iu=h(XT,"Fine-tune Roberta for sentiment analysis"),XT.forEach(o),YT.forEach(o),su=i(fe),si=r(fe,"TD",{align:!0});var JT=l(si);hu=h(JT,"How to fine-tune a Roberta model for sentiment analysis"),JT.forEach(o),cu=i(fe),hi=r(fe,"TD",{align:!0});var ZT=l(hi);fr=r(ZT,"A",{href:!0,rel:!0});var t2=l(fr);fu=h(t2,"Dhaval Taunk"),t2.forEach(o),ZT.forEach(o),gu=i(fe),ci=r(fe,"TD",{align:!0});var e2=l(ci);gr=r(e2,"A",{href:!0,rel:!0});var a2=l(gr);fi=r(a2,"IMG",{src:!0,alt:!0}),a2.forEach(o),e2.forEach(o),fe.forEach(o),du=i(f),j=r(f,"TR",{});var ge=l(j);gi=r(ge,"TD",{align:!0});var r2=l(gi);dr=r(r2,"A",{href:!0,rel:!0});var o2=l(dr);uu=h(o2,"Evaluating Question Generation Models"),o2.forEach(o),r2.forEach(o),mu=i(ge),di=r(ge,"TD",{align:!0});var l2=l(di);_u=h(l2,"How accurate are the answers to questions generated by your seq2seq transformer model?"),l2.forEach(o),bu=i(ge),ui=r(ge,"TD",{align:!0});var n2=l(ui);ur=r(n2,"A",{href:!0,rel:!0});var i2=l(ur);vu=h(i2,"Pascal Zoleko"),i2.forEach(o),n2.forEach(o),pu=i(ge),mi=r(ge,"TD",{align:!0});var s2=l(mi);mr=r(s2,"A",{href:!0,rel:!0});var h2=l(mr);_i=r(h2,"IMG",{src:!0,alt:!0}),h2.forEach(o),s2.forEach(o),ge.forEach(o),Eu=i(f),W=r(f,"TR",{});var de=l(W);bi=r(de,"TD",{align:!0});var c2=l(bi);_r=r(c2,"A",{href:!0,rel:!0});var f2=l(_r);Tu=h(f2,"Classify text with DistilBERT and Tensorflow"),f2.forEach(o),c2.forEach(o),wu=i(de),vi=r(de,"TD",{align:!0});var g2=l(vi);Du=h(g2,"How to fine-tune DistilBERT for text classification in TensorFlow"),g2.forEach(o),yu=i(de),pi=r(de,"TD",{align:!0});var d2=l(pi);br=r(d2,"A",{href:!0,rel:!0});var u2=l(br);Au=h(u2,"Peter Bayerle"),u2.forEach(o),d2.forEach(o),ku=i(de),Ei=r(de,"TD",{align:!0});var m2=l(Ei);vr=r(m2,"A",{href:!0,rel:!0});var _2=l(vr);Ti=r(_2,"IMG",{src:!0,alt:!0}),_2.forEach(o),m2.forEach(o),de.forEach(o),Ru=i(f),U=r(f,"TR",{});var ue=l(U);wi=r(ue,"TD",{align:!0});var b2=l(wi);pr=r(b2,"A",{href:!0,rel:!0});var v2=l(pr);Cu=h(v2,"Leverage BERT for Encoder-Decoder Summarization on CNN/Dailymail"),v2.forEach(o),b2.forEach(o),Fu=i(ue),vt=r(ue,"TD",{align:!0});var Dh=l(vt);Mu=h(Dh,"How to warm-start a "),Nh=r(Dh,"EM",{});var p2=l(Nh);Iu=h(p2,"EncoderDecoderModel"),p2.forEach(o),Pu=h(Dh," with a "),Oh=r(Dh,"EM",{});var E2=l(Oh);Hu=h(E2,"bert-base-uncased"),E2.forEach(o),Lu=h(Dh," checkpoint for summarization on CNN/Dailymail"),Dh.forEach(o),Bu=i(ue),Di=r(ue,"TD",{align:!0});var T2=l(Di);Er=r(T2,"A",{href:!0,rel:!0});var w2=l(Er);Su=h(w2,"Patrick von Platen"),w2.forEach(o),T2.forEach(o),Gu=i(ue),yi=r(ue,"TD",{align:!0});var D2=l(yi);Tr=r(D2,"A",{href:!0,rel:!0});var y2=l(Tr);Ai=r(y2,"IMG",{src:!0,alt:!0}),y2.forEach(o),D2.forEach(o),ue.forEach(o),Nu=i(f),$=r(f,"TR",{});var me=l($);ki=r(me,"TD",{align:!0});var A2=l(ki);wr=r(A2,"A",{href:!0,rel:!0});var k2=l(wr);Ou=h(k2,"Leverage RoBERTa for Encoder-Decoder Summarization on BBC XSum"),k2.forEach(o),A2.forEach(o),xu=i(me),pt=r(me,"TD",{align:!0});var yh=l(pt);zu=h(yh,"How to warm-start a shared "),xh=r(yh,"EM",{});var R2=l(xh);Vu=h(R2,"EncoderDecoderModel"),R2.forEach(o),qu=h(yh," with a "),zh=r(yh,"EM",{});var C2=l(zh);Qu=h(C2,"roberta-base"),C2.forEach(o),ju=h(yh," checkpoint for summarization on BBC/XSum"),yh.forEach(o),Wu=i(me),Ri=r(me,"TD",{align:!0});var F2=l(Ri);Dr=r(F2,"A",{href:!0,rel:!0});var M2=l(Dr);Uu=h(M2,"Patrick von Platen"),M2.forEach(o),F2.forEach(o),$u=i(me),Ci=r(me,"TD",{align:!0});var I2=l(Ci);yr=r(I2,"A",{href:!0,rel:!0});var P2=l(yr);Fi=r(P2,"IMG",{src:!0,alt:!0}),P2.forEach(o),I2.forEach(o),me.forEach(o),Ku=i(f),K=r(f,"TR",{});var _e=l(K);Mi=r(_e,"TD",{align:!0});var H2=l(Mi);Ar=r(H2,"A",{href:!0,rel:!0});var L2=l(Ar);Yu=h(L2,"Fine-tune TAPAS on Sequential Question Answering (SQA)"),L2.forEach(o),H2.forEach(o),Xu=i(_e),Et=r(_e,"TD",{align:!0});var Ah=l(Et);Ju=h(Ah,"How to fine-tune "),Vh=r(Ah,"EM",{});var B2=l(Vh);Zu=h(B2,"TapasForQuestionAnswering"),B2.forEach(o),tm=h(Ah," with a "),qh=r(Ah,"EM",{});var S2=l(qh);em=h(S2,"tapas-base"),S2.forEach(o),am=h(Ah," checkpoint on the Sequential Question Answering (SQA) dataset"),Ah.forEach(o),rm=i(_e),Ii=r(_e,"TD",{align:!0});var G2=l(Ii);kr=r(G2,"A",{href:!0,rel:!0});var N2=l(kr);om=h(N2,"Niels Rogge"),N2.forEach(o),G2.forEach(o),lm=i(_e),Pi=r(_e,"TD",{align:!0});var O2=l(Pi);Rr=r(O2,"A",{href:!0,rel:!0});var x2=l(Rr);Hi=r(x2,"IMG",{src:!0,alt:!0}),x2.forEach(o),O2.forEach(o),_e.forEach(o),nm=i(f),Y=r(f,"TR",{});var be=l(Y);Li=r(be,"TD",{align:!0});var z2=l(Li);Cr=r(z2,"A",{href:!0,rel:!0});var V2=l(Cr);im=h(V2,"Evaluate TAPAS on Table Fact Checking (TabFact)"),V2.forEach(o),z2.forEach(o),sm=i(be),Tt=r(be,"TD",{align:!0});var kh=l(Tt);hm=h(kh,"How to evaluate a fine-tuned "),Qh=r(kh,"EM",{});var q2=l(Qh);cm=h(q2,"TapasForSequenceClassification"),q2.forEach(o),fm=h(kh," with a "),jh=r(kh,"EM",{});var Q2=l(jh);gm=h(Q2,"tapas-base-finetuned-tabfact"),Q2.forEach(o),dm=h(kh," checkpoint using a combination of the \u{1F917} datasets and \u{1F917} transformers libraries"),kh.forEach(o),um=i(be),Bi=r(be,"TD",{align:!0});var j2=l(Bi);Fr=r(j2,"A",{href:!0,rel:!0});var W2=l(Fr);mm=h(W2,"Niels Rogge"),W2.forEach(o),j2.forEach(o),_m=i(be),Si=r(be,"TD",{align:!0});var U2=l(Si);Mr=r(U2,"A",{href:!0,rel:!0});var $2=l(Mr);Gi=r($2,"IMG",{src:!0,alt:!0}),$2.forEach(o),U2.forEach(o),be.forEach(o),bm=i(f),X=r(f,"TR",{});var ve=l(X);Ni=r(ve,"TD",{align:!0});var K2=l(Ni);Ir=r(K2,"A",{href:!0,rel:!0});var Y2=l(Ir);vm=h(Y2,"Fine-tuning mBART for translation"),Y2.forEach(o),K2.forEach(o),pm=i(ve),Oi=r(ve,"TD",{align:!0});var X2=l(Oi);Em=h(X2,"How to fine-tune mBART using Seq2SeqTrainer for Hindi to English translation"),X2.forEach(o),Tm=i(ve),xi=r(ve,"TD",{align:!0});var J2=l(xi);Pr=r(J2,"A",{href:!0,rel:!0});var Z2=l(Pr);wm=h(Z2,"Vasudev Gupta"),Z2.forEach(o),J2.forEach(o),Dm=i(ve),zi=r(ve,"TD",{align:!0});var tw=l(zi);Hr=r(tw,"A",{href:!0,rel:!0});var ew=l(Hr);Vi=r(ew,"IMG",{src:!0,alt:!0}),ew.forEach(o),tw.forEach(o),ve.forEach(o),ym=i(f),J=r(f,"TR",{});var pe=l(J);qi=r(pe,"TD",{align:!0});var aw=l(qi);Lr=r(aw,"A",{href:!0,rel:!0});var rw=l(Lr);Am=h(rw,"Fine-tune LayoutLM on FUNSD (a form understanding dataset)"),rw.forEach(o),aw.forEach(o),km=i(pe),Mt=r(pe,"TD",{align:!0});var gc=l(Mt);Rm=h(gc,"How to fine-tune "),Wh=r(gc,"EM",{});var ow=l(Wh);Cm=h(ow,"LayoutLMForTokenClassification"),ow.forEach(o),Fm=h(gc," on the FUNSD dataset for information extraction from scanned documents"),gc.forEach(o),Mm=i(pe),Qi=r(pe,"TD",{align:!0});var lw=l(Qi);Br=r(lw,"A",{href:!0,rel:!0});var nw=l(Br);Im=h(nw,"Niels Rogge"),nw.forEach(o),lw.forEach(o),Pm=i(pe),ji=r(pe,"TD",{align:!0});var iw=l(ji);Sr=r(iw,"A",{href:!0,rel:!0});var sw=l(Sr);Wi=r(sw,"IMG",{src:!0,alt:!0}),sw.forEach(o),iw.forEach(o),pe.forEach(o),Hm=i(f),Z=r(f,"TR",{});var Ee=l(Z);Ui=r(Ee,"TD",{align:!0});var hw=l(Ui);Gr=r(hw,"A",{href:!0,rel:!0});var cw=l(Gr);Lm=h(cw,"Fine-Tune DistilGPT2 and Generate Text"),cw.forEach(o),hw.forEach(o),Bm=i(Ee),$i=r(Ee,"TD",{align:!0});var fw=l($i);Sm=h(fw,"How to fine-tune DistilGPT2 and generate text"),fw.forEach(o),Gm=i(Ee),Ki=r(Ee,"TD",{align:!0});var gw=l(Ki);Nr=r(gw,"A",{href:!0,rel:!0});var dw=l(Nr);Nm=h(dw,"Aakash Tripathi"),dw.forEach(o),gw.forEach(o),Om=i(Ee),Yi=r(Ee,"TD",{align:!0});var uw=l(Yi);Or=r(uw,"A",{href:!0,rel:!0});var mw=l(Or);Xi=r(mw,"IMG",{src:!0,alt:!0}),mw.forEach(o),uw.forEach(o),Ee.forEach(o),xm=i(f),tt=r(f,"TR",{});var Te=l(tt);Ji=r(Te,"TD",{align:!0});var _w=l(Ji);xr=r(_w,"A",{href:!0,rel:!0});var bw=l(xr);zm=h(bw,"Fine-Tune LED on up to 8K tokens"),bw.forEach(o),_w.forEach(o),Vm=i(Te),Zi=r(Te,"TD",{align:!0});var vw=l(Zi);qm=h(vw,"How to fine-tune LED on pubmed for long-range summarization"),vw.forEach(o),Qm=i(Te),ts=r(Te,"TD",{align:!0});var pw=l(ts);zr=r(pw,"A",{href:!0,rel:!0});var Ew=l(zr);jm=h(Ew,"Patrick von Platen"),Ew.forEach(o),pw.forEach(o),Wm=i(Te),es=r(Te,"TD",{align:!0});var Tw=l(es);Vr=r(Tw,"A",{href:!0,rel:!0});var ww=l(Vr);as=r(ww,"IMG",{src:!0,alt:!0}),ww.forEach(o),Tw.forEach(o),Te.forEach(o),Um=i(f),et=r(f,"TR",{});var we=l(et);rs=r(we,"TD",{align:!0});var Dw=l(rs);qr=r(Dw,"A",{href:!0,rel:!0});var yw=l(qr);$m=h(yw,"Evaluate LED on Arxiv"),yw.forEach(o),Dw.forEach(o),Km=i(we),os=r(we,"TD",{align:!0});var Aw=l(os);Ym=h(Aw,"How to effectively evaluate LED on long-range summarization"),Aw.forEach(o),Xm=i(we),ls=r(we,"TD",{align:!0});var kw=l(ls);Qr=r(kw,"A",{href:!0,rel:!0});var Rw=l(Qr);Jm=h(Rw,"Patrick von Platen"),Rw.forEach(o),kw.forEach(o),Zm=i(we),ns=r(we,"TD",{align:!0});var Cw=l(ns);jr=r(Cw,"A",{href:!0,rel:!0});var Fw=l(jr);is=r(Fw,"IMG",{src:!0,alt:!0}),Fw.forEach(o),Cw.forEach(o),we.forEach(o),t_=i(f),at=r(f,"TR",{});var De=l(at);ss=r(De,"TD",{align:!0});var Mw=l(ss);Wr=r(Mw,"A",{href:!0,rel:!0});var Iw=l(Wr);e_=h(Iw,"Fine-tune LayoutLM on RVL-CDIP (a document image classification dataset)"),Iw.forEach(o),Mw.forEach(o),a_=i(De),It=r(De,"TD",{align:!0});var dc=l(It);r_=h(dc,"How to fine-tune "),Uh=r(dc,"EM",{});var Pw=l(Uh);o_=h(Pw,"LayoutLMForSequenceClassification"),Pw.forEach(o),l_=h(dc," on the RVL-CDIP dataset for scanned document classification"),dc.forEach(o),n_=i(De),hs=r(De,"TD",{align:!0});var Hw=l(hs);Ur=r(Hw,"A",{href:!0,rel:!0});var Lw=l(Ur);i_=h(Lw,"Niels Rogge"),Lw.forEach(o),Hw.forEach(o),s_=i(De),cs=r(De,"TD",{align:!0});var Bw=l(cs);$r=r(Bw,"A",{href:!0,rel:!0});var Sw=l($r);fs=r(Sw,"IMG",{src:!0,alt:!0}),Sw.forEach(o),Bw.forEach(o),De.forEach(o),h_=i(f),rt=r(f,"TR",{});var ye=l(rt);gs=r(ye,"TD",{align:!0});var Gw=l(gs);Kr=r(Gw,"A",{href:!0,rel:!0});var Nw=l(Kr);c_=h(Nw,"Wav2Vec2 CTC decoding with GPT2 adjustment"),Nw.forEach(o),Gw.forEach(o),f_=i(ye),ds=r(ye,"TD",{align:!0});var Ow=l(ds);g_=h(Ow,"How to decode CTC sequence with language model adjustment"),Ow.forEach(o),d_=i(ye),us=r(ye,"TD",{align:!0});var xw=l(us);Yr=r(xw,"A",{href:!0,rel:!0});var zw=l(Yr);u_=h(zw,"Eric Lam"),zw.forEach(o),xw.forEach(o),m_=i(ye),ms=r(ye,"TD",{align:!0});var Vw=l(ms);Xr=r(Vw,"A",{href:!0,rel:!0});var qw=l(Xr);_s=r(qw,"IMG",{src:!0,alt:!0}),qw.forEach(o),Vw.forEach(o),ye.forEach(o),__=i(f),ot=r(f,"TR",{});var Ae=l(ot);bs=r(Ae,"TD",{align:!0});var Qw=l(bs);Jr=r(Qw,"A",{href:!0,rel:!0});var jw=l(Jr);b_=h(jw,"Fine-tune BART for summarization in two languages with Trainer class"),jw.forEach(o),Qw.forEach(o),v_=i(Ae),vs=r(Ae,"TD",{align:!0});var Ww=l(vs);p_=h(Ww,"How to fine-tune BART for summarization in two languages with Trainer class"),Ww.forEach(o),E_=i(Ae),ps=r(Ae,"TD",{align:!0});var Uw=l(ps);Zr=r(Uw,"A",{href:!0,rel:!0});var $w=l(Zr);T_=h($w,"Eliza Szczechla"),$w.forEach(o),Uw.forEach(o),w_=i(Ae),Es=r(Ae,"TD",{align:!0});var Kw=l(Es);to=r(Kw,"A",{href:!0,rel:!0});var Yw=l(to);Ts=r(Yw,"IMG",{src:!0,alt:!0}),Yw.forEach(o),Kw.forEach(o),Ae.forEach(o),D_=i(f),lt=r(f,"TR",{});var ke=l(lt);ws=r(ke,"TD",{align:!0});var Xw=l(ws);eo=r(Xw,"A",{href:!0,rel:!0});var Jw=l(eo);y_=h(Jw,"Evaluate Big Bird on Trivia QA"),Jw.forEach(o),Xw.forEach(o),A_=i(ke),Ds=r(ke,"TD",{align:!0});var Zw=l(Ds);k_=h(Zw,"How to evaluate BigBird on long document question answering on Trivia QA"),Zw.forEach(o),R_=i(ke),ys=r(ke,"TD",{align:!0});var t3=l(ys);ao=r(t3,"A",{href:!0,rel:!0});var e3=l(ao);C_=h(e3,"Patrick von Platen"),e3.forEach(o),t3.forEach(o),F_=i(ke),As=r(ke,"TD",{align:!0});var a3=l(As);ro=r(a3,"A",{href:!0,rel:!0});var r3=l(ro);ks=r(r3,"IMG",{src:!0,alt:!0}),r3.forEach(o),a3.forEach(o),ke.forEach(o),M_=i(f),nt=r(f,"TR",{});var Re=l(nt);Rs=r(Re,"TD",{align:!0});var o3=l(Rs);oo=r(o3,"A",{href:!0,rel:!0});var l3=l(oo);I_=h(l3,"Create video captions using Wav2Vec2"),l3.forEach(o),o3.forEach(o),P_=i(Re),Cs=r(Re,"TD",{align:!0});var n3=l(Cs);H_=h(n3,"How to create YouTube captions from any video by transcribing the audio with Wav2Vec"),n3.forEach(o),L_=i(Re),Fs=r(Re,"TD",{align:!0});var i3=l(Fs);lo=r(i3,"A",{href:!0,rel:!0});var s3=l(lo);B_=h(s3,"Niklas Muennighoff"),s3.forEach(o),i3.forEach(o),S_=i(Re),Ms=r(Re,"TD",{align:!0});var h3=l(Ms);no=r(h3,"A",{href:!0,rel:!0});var c3=l(no);Is=r(c3,"IMG",{src:!0,alt:!0}),c3.forEach(o),h3.forEach(o),Re.forEach(o),G_=i(f),it=r(f,"TR",{});var Ce=l(it);Ps=r(Ce,"TD",{align:!0});var f3=l(Ps);io=r(f3,"A",{href:!0,rel:!0});var g3=l(io);N_=h(g3,"Fine-tune the Vision Transformer on CIFAR-10 using PyTorch Lightning"),g3.forEach(o),f3.forEach(o),O_=i(Ce),Hs=r(Ce,"TD",{align:!0});var d3=l(Hs);x_=h(d3,"How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and PyTorch Lightning"),d3.forEach(o),z_=i(Ce),Ls=r(Ce,"TD",{align:!0});var u3=l(Ls);so=r(u3,"A",{href:!0,rel:!0});var m3=l(so);V_=h(m3,"Niels Rogge"),m3.forEach(o),u3.forEach(o),q_=i(Ce),Bs=r(Ce,"TD",{align:!0});var _3=l(Bs);ho=r(_3,"A",{href:!0,rel:!0});var b3=l(ho);Ss=r(b3,"IMG",{src:!0,alt:!0}),b3.forEach(o),_3.forEach(o),Ce.forEach(o),Q_=i(f),st=r(f,"TR",{});var Fe=l(st);Gs=r(Fe,"TD",{align:!0});var v3=l(Gs);co=r(v3,"A",{href:!0,rel:!0});var p3=l(co);j_=h(p3,"Fine-tune the Vision Transformer on CIFAR-10 using the \u{1F917} Trainer"),p3.forEach(o),v3.forEach(o),W_=i(Fe),Ns=r(Fe,"TD",{align:!0});var E3=l(Ns);U_=h(E3,"How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and the \u{1F917} Trainer"),E3.forEach(o),$_=i(Fe),Os=r(Fe,"TD",{align:!0});var T3=l(Os);fo=r(T3,"A",{href:!0,rel:!0});var w3=l(fo);K_=h(w3,"Niels Rogge"),w3.forEach(o),T3.forEach(o),Y_=i(Fe),xs=r(Fe,"TD",{align:!0});var D3=l(xs);go=r(D3,"A",{href:!0,rel:!0});var y3=l(go);zs=r(y3,"IMG",{src:!0,alt:!0}),y3.forEach(o),D3.forEach(o),Fe.forEach(o),X_=i(f),ht=r(f,"TR",{});var Me=l(ht);Vs=r(Me,"TD",{align:!0});var A3=l(Vs);uo=r(A3,"A",{href:!0,rel:!0});var k3=l(uo);J_=h(k3,"Evaluate LUKE on Open Entity, an entity typing dataset"),k3.forEach(o),A3.forEach(o),Z_=i(Me),Pt=r(Me,"TD",{align:!0});var uc=l(Pt);tb=h(uc,"How to evaluate "),$h=r(uc,"EM",{});var R3=l($h);eb=h(R3,"LukeForEntityClassification"),R3.forEach(o),ab=h(uc," on the Open Entity dataset"),uc.forEach(o),rb=i(Me),qs=r(Me,"TD",{align:!0});var C3=l(qs);mo=r(C3,"A",{href:!0,rel:!0});var F3=l(mo);ob=h(F3,"Ikuya Yamada"),F3.forEach(o),C3.forEach(o),lb=i(Me),Qs=r(Me,"TD",{align:!0});var M3=l(Qs);_o=r(M3,"A",{href:!0,rel:!0});var I3=l(_o);js=r(I3,"IMG",{src:!0,alt:!0}),I3.forEach(o),M3.forEach(o),Me.forEach(o),nb=i(f),ct=r(f,"TR",{});var Ie=l(ct);Ws=r(Ie,"TD",{align:!0});var P3=l(Ws);bo=r(P3,"A",{href:!0,rel:!0});var H3=l(bo);ib=h(H3,"Evaluate LUKE on TACRED, a relation extraction dataset"),H3.forEach(o),P3.forEach(o),sb=i(Ie),Ht=r(Ie,"TD",{align:!0});var mc=l(Ht);hb=h(mc,"How to evaluate "),Kh=r(mc,"EM",{});var L3=l(Kh);cb=h(L3,"LukeForEntityPairClassification"),L3.forEach(o),fb=h(mc," on the TACRED dataset"),mc.forEach(o),gb=i(Ie),Us=r(Ie,"TD",{align:!0});var B3=l(Us);vo=r(B3,"A",{href:!0,rel:!0});var S3=l(vo);db=h(S3,"Ikuya Yamada"),S3.forEach(o),B3.forEach(o),ub=i(Ie),$s=r(Ie,"TD",{align:!0});var G3=l($s);po=r(G3,"A",{href:!0,rel:!0});var N3=l(po);Ks=r(N3,"IMG",{src:!0,alt:!0}),N3.forEach(o),G3.forEach(o),Ie.forEach(o),mb=i(f),ft=r(f,"TR",{});var Pe=l(ft);Ys=r(Pe,"TD",{align:!0});var O3=l(Ys);Eo=r(O3,"A",{href:!0,rel:!0});var x3=l(Eo);_b=h(x3,"Evaluate LUKE on CoNLL-2003, an important NER benchmark"),x3.forEach(o),O3.forEach(o),bb=i(Pe),Lt=r(Pe,"TD",{align:!0});var _c=l(Lt);vb=h(_c,"How to evaluate "),Yh=r(_c,"EM",{});var z3=l(Yh);pb=h(z3,"LukeForEntitySpanClassification"),z3.forEach(o),Eb=h(_c," on the CoNLL-2003 dataset"),_c.forEach(o),Tb=i(Pe),Xs=r(Pe,"TD",{align:!0});var V3=l(Xs);To=r(V3,"A",{href:!0,rel:!0});var q3=l(To);wb=h(q3,"Ikuya Yamada"),q3.forEach(o),V3.forEach(o),Db=i(Pe),Js=r(Pe,"TD",{align:!0});var Q3=l(Js);wo=r(Q3,"A",{href:!0,rel:!0});var j3=l(wo);Zs=r(j3,"IMG",{src:!0,alt:!0}),j3.forEach(o),Q3.forEach(o),Pe.forEach(o),yb=i(f),gt=r(f,"TR",{});var He=l(gt);th=r(He,"TD",{align:!0});var W3=l(th);Do=r(W3,"A",{href:!0,rel:!0});var U3=l(Do);Ab=h(U3,"Evaluate BigBird-Pegasus on PubMed dataset"),U3.forEach(o),W3.forEach(o),kb=i(He),Bt=r(He,"TD",{align:!0});var bc=l(Bt);Rb=h(bc,"How to evaluate "),Xh=r(bc,"EM",{});var $3=l(Xh);Cb=h($3,"BigBirdPegasusForConditionalGeneration"),$3.forEach(o),Fb=h(bc," on PubMed dataset"),bc.forEach(o),Mb=i(He),eh=r(He,"TD",{align:!0});var K3=l(eh);yo=r(K3,"A",{href:!0,rel:!0});var Y3=l(yo);Ib=h(Y3,"Vasudev Gupta"),Y3.forEach(o),K3.forEach(o),Pb=i(He),ah=r(He,"TD",{align:!0});var X3=l(ah);Ao=r(X3,"A",{href:!0,rel:!0});var J3=l(Ao);rh=r(J3,"IMG",{src:!0,alt:!0}),J3.forEach(o),X3.forEach(o),He.forEach(o),Hb=i(f),dt=r(f,"TR",{});var Le=l(dt);oh=r(Le,"TD",{align:!0});var Z3=l(oh);ko=r(Z3,"A",{href:!0,rel:!0});var tD=l(ko);Lb=h(tD,"Speech Emotion Classification with Wav2Vec2"),tD.forEach(o),Z3.forEach(o),Bb=i(Le),lh=r(Le,"TD",{align:!0});var eD=l(lh);Sb=h(eD,"How to leverage a pretrained Wav2Vec2 model for Emotion Classification on the MEGA dataset"),eD.forEach(o),Gb=i(Le),nh=r(Le,"TD",{align:!0});var aD=l(nh);Ro=r(aD,"A",{href:!0,rel:!0});var rD=l(Ro);Nb=h(rD,"Mehrdad Farahani"),rD.forEach(o),aD.forEach(o),Ob=i(Le),ih=r(Le,"TD",{align:!0});var oD=l(ih);Co=r(oD,"A",{href:!0,rel:!0});var lD=l(Co);sh=r(lD,"IMG",{src:!0,alt:!0}),lD.forEach(o),oD.forEach(o),Le.forEach(o),xb=i(f),ut=r(f,"TR",{});var Be=l(ut);hh=r(Be,"TD",{align:!0});var nD=l(hh);Fo=r(nD,"A",{href:!0,rel:!0});var iD=l(Fo);zb=h(iD,"Detect objects in an image with DETR"),iD.forEach(o),nD.forEach(o),Vb=i(Be),St=r(Be,"TD",{align:!0});var vc=l(St);qb=h(vc,"How to use a trained "),Jh=r(vc,"EM",{});var sD=l(Jh);Qb=h(sD,"DetrForObjectDetection"),sD.forEach(o),jb=h(vc," model to detect objects in an image and visualize attention"),vc.forEach(o),Wb=i(Be),ch=r(Be,"TD",{align:!0});var hD=l(ch);Mo=r(hD,"A",{href:!0,rel:!0});var cD=l(Mo);Ub=h(cD,"Niels Rogge"),cD.forEach(o),hD.forEach(o),$b=i(Be),fh=r(Be,"TD",{align:!0});var fD=l(fh);Io=r(fD,"A",{href:!0,rel:!0});var gD=l(Io);gh=r(gD,"IMG",{src:!0,alt:!0}),gD.forEach(o),fD.forEach(o),Be.forEach(o),Kb=i(f),mt=r(f,"TR",{});var Se=l(mt);dh=r(Se,"TD",{align:!0});var dD=l(dh);Po=r(dD,"A",{href:!0,rel:!0});var uD=l(Po);Yb=h(uD,"Fine-tune DETR on a custom object detection dataset"),uD.forEach(o),dD.forEach(o),Xb=i(Se),Gt=r(Se,"TD",{align:!0});var pc=l(Gt);Jb=h(pc,"How to fine-tune "),Zh=r(pc,"EM",{});var mD=l(Zh);Zb=h(mD,"DetrForObjectDetection"),mD.forEach(o),t1=h(pc," on a custom object detection dataset"),pc.forEach(o),e1=i(Se),uh=r(Se,"TD",{align:!0});var _D=l(uh);Ho=r(_D,"A",{href:!0,rel:!0});var bD=l(Ho);a1=h(bD,"Niels Rogge"),bD.forEach(o),_D.forEach(o),r1=i(Se),mh=r(Se,"TD",{align:!0});var vD=l(mh);Lo=r(vD,"A",{href:!0,rel:!0});var pD=l(Lo);_h=r(pD,"IMG",{src:!0,alt:!0}),pD.forEach(o),vD.forEach(o),Se.forEach(o),o1=i(f),_t=r(f,"TR",{});var Ge=l(_t);bh=r(Ge,"TD",{align:!0});var ED=l(bh);Bo=r(ED,"A",{href:!0,rel:!0});var TD=l(Bo);l1=h(TD,"Finetune T5 for Named Entity Recognition"),TD.forEach(o),ED.forEach(o),n1=i(Ge),Nt=r(Ge,"TD",{align:!0});var Ec=l(Nt);i1=h(Ec,"How to fine-tune "),tc=r(Ec,"EM",{});var wD=l(tc);s1=h(wD,"T5"),wD.forEach(o),h1=h(Ec," on a Named Entity Recognition Task"),Ec.forEach(o),c1=i(Ge),vh=r(Ge,"TD",{align:!0});var DD=l(vh);So=r(DD,"A",{href:!0,rel:!0});var yD=l(So);f1=h(yD,"Ogundepo Odunayo"),yD.forEach(o),DD.forEach(o),g1=i(Ge),ph=r(Ge,"TD",{align:!0});var AD=l(ph);Go=r(AD,"A",{href:!0,rel:!0});var kD=l(Go);Eh=r(kD,"IMG",{src:!0,alt:!0}),kD.forEach(o),AD.forEach(o),Ge.forEach(o),f.forEach(o),fc.forEach(o),this.h()},h(){e(bt,"name","hf:doc:metadata"),e(bt,"content",JSON.stringify(HD)),e(v,"id","community"),e(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),e(v,"href","#community"),e(m,"class","relative group"),e(kt,"id","community-resources"),e(kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),e(kt,"href","#community-resources"),e(wt,"class","relative group"),e(xo,"align","left"),e(zo,"align","left"),e(Vo,"align","right"),e(xe,"href","https://www.darigovresearch.com/huggingface-transformers-glossary-flashcards"),e(xe,"rel","nofollow"),e(qo,"align","left"),e(Qo,"href","glossary"),e(ze,"href","https://apps.ankiweb.net/"),e(ze,"rel","nofollow"),e(Ve,"href","https://www.youtube.com/watch?v=Dji_h7PILrw"),e(Ve,"rel","nofollow"),e(_,"align","left"),e(qe,"href","https://www.darigovresearch.com/"),e(qe,"rel","nofollow"),e(jo,"align","right"),e(Ct,"id","community-notebooks"),e(Ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),e(Ct,"href","#community-notebooks"),e(At,"class","relative group"),e(Wo,"align","left"),e(Uo,"align","left"),e($o,"align","left"),e(Sh,"align","right"),e(je,"href","https://github.com/AlekseyKorshuk/huggingartists"),e(je,"rel","nofollow"),e(Ko,"align","left"),e(Yo,"align","left"),e(We,"href","https://github.com/AlekseyKorshuk"),e(We,"rel","nofollow"),e(Xo,"align","left"),g(Zo.src,T1="https://colab.research.google.com/assets/colab-badge.svg")||e(Zo,"src",T1),e(Zo,"alt","Open In Colab"),e(Ue,"href","https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb"),e(Ue,"rel","nofollow"),e(Jo,"align","right"),e($e,"href","https://github.com/snapthat/TF-T5-text-to-text"),e($e,"rel","nofollow"),e(tl,"align","left"),e(el,"align","left"),e(Ke,"href","https://github.com/HarrisDePerceptron"),e(Ke,"rel","nofollow"),e(al,"align","left"),g(ol.src,w1="https://colab.research.google.com/assets/colab-badge.svg")||e(ol,"src",w1),e(ol,"alt","Open In Colab"),e(Ye,"href","https://colab.research.google.com/github/snapthat/TF-T5-text-to-text/blob/master/snapthatT5/notebooks/TF-T5-Datasets%20Training.ipynb"),e(Ye,"rel","nofollow"),e(rl,"align","right"),e(Xe,"href","https://github.com/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb"),e(Xe,"rel","nofollow"),e(ll,"align","left"),e(nl,"align","left"),e(Je,"href","https://github.com/patil-suraj"),e(Je,"rel","nofollow"),e(il,"align","left"),g(hl.src,D1="https://colab.research.google.com/assets/colab-badge.svg")||e(hl,"src",D1),e(hl,"alt","Open In Colab"),e(Ze,"href","https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb#scrollTo=QLGiFCDqvuil"),e(Ze,"rel","nofollow"),e(sl,"align","right"),e(ta,"href","https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb"),e(ta,"rel","nofollow"),e(cl,"align","left"),e(fl,"align","left"),e(ea,"href","https://github.com/patil-suraj"),e(ea,"rel","nofollow"),e(gl,"align","left"),g(ul.src,y1="https://colab.research.google.com/assets/colab-badge.svg")||e(ul,"src",y1),e(ul,"alt","Open In Colab"),e(aa,"href","https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb"),e(aa,"rel","nofollow"),e(dl,"align","right"),e(ra,"href","https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb"),e(ra,"rel","nofollow"),e(ml,"align","left"),e(_l,"align","left"),e(oa,"href","https://github.com/ncoop57"),e(oa,"rel","nofollow"),e(bl,"align","left"),g(pl.src,A1="https://colab.research.google.com/assets/colab-badge.svg")||e(pl,"src",A1),e(pl,"alt","Open In Colab"),e(la,"href","https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb"),e(la,"rel","nofollow"),e(vl,"align","right"),e(na,"href","https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb"),e(na,"rel","nofollow"),e(El,"align","left"),e(Tl,"align","left"),e(ia,"href","https://github.com/patrickvonplaten"),e(ia,"rel","nofollow"),e(wl,"align","left"),g(yl.src,k1="https://colab.research.google.com/assets/colab-badge.svg")||e(yl,"src",k1),e(yl,"alt","Open In Colab"),e(sa,"href","https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb"),e(sa,"rel","nofollow"),e(Dl,"align","right"),e(ha,"href","https://github.com/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb"),e(ha,"rel","nofollow"),e(Al,"align","left"),e(kl,"align","left"),e(ca,"href","https://ohmeow.com/"),e(ca,"rel","nofollow"),e(Rl,"align","left"),g(Fl.src,R1="https://colab.research.google.com/assets/colab-badge.svg")||e(Fl,"src",R1),e(Fl,"alt","Open In Colab"),e(fa,"href","https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb"),e(fa,"rel","nofollow"),e(Cl,"align","right"),e(ga,"href","https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb"),e(ga,"rel","nofollow"),e(Ml,"align","left"),e(Il,"align","left"),e(da,"href","https://github.com/borisdayma"),e(da,"rel","nofollow"),e(Pl,"align","left"),g(Ll.src,C1="https://colab.research.google.com/assets/colab-badge.svg")||e(Ll,"src",C1),e(Ll,"alt","Open In Colab"),e(ua,"href","https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb"),e(ua,"rel","nofollow"),e(Hl,"align","right"),e(ma,"href","https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb"),e(ma,"rel","nofollow"),e(Bl,"align","left"),e(Sl,"align","left"),e(_a,"href","https://github.com/borisdayma"),e(_a,"rel","nofollow"),e(Gl,"align","left"),g(Ol.src,F1="https://colab.research.google.com/assets/colab-badge.svg")||e(Ol,"src",F1),e(Ol,"alt","Open In Colab"),e(ba,"href","https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb"),e(ba,"rel","nofollow"),e(Nl,"align","right"),e(va,"href","https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb"),e(va,"rel","nofollow"),e(xl,"align","left"),e(zl,"align","left"),e(pa,"href","https://beltagy.net"),e(pa,"rel","nofollow"),e(Vl,"align","left"),g(Ql.src,M1="https://colab.research.google.com/assets/colab-badge.svg")||e(Ql,"src",M1),e(Ql,"alt","Open In Colab"),e(Ea,"href","https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb"),e(Ea,"rel","nofollow"),e(ql,"align","right"),e(Ta,"href","https://github.com/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb"),e(Ta,"rel","nofollow"),e(jl,"align","left"),e(Wl,"align","left"),e(wa,"href","https://github.com/patil-suraj"),e(wa,"rel","nofollow"),e(Ul,"align","left"),g(Kl.src,I1="https://colab.research.google.com/assets/colab-badge.svg")||e(Kl,"src",I1),e(Kl,"alt","Open In Colab"),e(Da,"href","https://colab.research.google.com/github/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb"),e(Da,"rel","nofollow"),e($l,"align","right"),e(ya,"href","https://github.com/patrickvonplaten/notebooks/blob/master/How_to_evaluate_Longformer_on_TriviaQA_using_NLP.ipynb"),e(ya,"rel","nofollow"),e(Yl,"align","left"),e(Aa,"align","left"),e(ka,"href","https://github.com/patrickvonplaten"),e(ka,"rel","nofollow"),e(Xl,"align","left"),g(Zl.src,P1="https://colab.research.google.com/assets/colab-badge.svg")||e(Zl,"src",P1),e(Zl,"alt","Open In Colab"),e(Ra,"href","https://colab.research.google.com/drive/1m7eTGlPmLRgoPkkA7rkhQdZ9ydpmsdLE?usp=sharing"),e(Ra,"rel","nofollow"),e(Jl,"align","right"),e(Ca,"href","https://github.com/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb"),e(Ca,"rel","nofollow"),e(tn,"align","left"),e(en,"align","left"),e(Fa,"href","https://github.com/enzoampil"),e(Fa,"rel","nofollow"),e(an,"align","left"),g(on.src,H1="https://colab.research.google.com/assets/colab-badge.svg")||e(on,"src",H1),e(on,"alt","Open In Colab"),e(Ma,"href","https://colab.research.google.com/github/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb"),e(Ma,"rel","nofollow"),e(rn,"align","right"),e(Ia,"href","https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb"),e(Ia,"rel","nofollow"),e(ln,"align","left"),e(nn,"align","left"),e(Pa,"href","https://github.com/abhimishra91"),e(Pa,"rel","nofollow"),e(sn,"align","left"),g(cn.src,L1="https://colab.research.google.com/assets/colab-badge.svg")||e(cn,"src",L1),e(cn,"alt","Open In Colab"),e(Ha,"href","https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb"),e(Ha,"rel","nofollow"),e(hn,"align","right"),e(La,"href","https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb"),e(La,"rel","nofollow"),e(fn,"align","left"),e(gn,"align","left"),e(Ba,"href","https://github.com/abhimishra91"),e(Ba,"rel","nofollow"),e(dn,"align","left"),g(mn.src,B1="https://colab.research.google.com/assets/colab-badge.svg")||e(mn,"src",B1),e(mn,"alt","Open In Colab"),e(Sa,"href","https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb"),e(Sa,"rel","nofollow"),e(un,"align","right"),e(Ga,"href","https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb"),e(Ga,"rel","nofollow"),e(_n,"align","left"),e(bn,"align","left"),e(Na,"href","https://github.com/abhimishra91"),e(Na,"rel","nofollow"),e(vn,"align","left"),g(En.src,S1="https://colab.research.google.com/assets/colab-badge.svg")||e(En,"src",S1),e(En,"alt","Open In Colab"),e(Oa,"href","https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb"),e(Oa,"rel","nofollow"),e(pn,"align","right"),e(xa,"href","https://github.com/ELS-RD/transformers-notebook/blob/master/Divide_Hugging_Face_Transformers_training_time_by_2_or_more.ipynb"),e(xa,"rel","nofollow"),e(Tn,"align","left"),e(wn,"align","left"),e(za,"href","https://github.com/pommedeterresautee"),e(za,"rel","nofollow"),e(Dn,"align","left"),g(An.src,G1="https://colab.research.google.com/assets/colab-badge.svg")||e(An,"src",G1),e(An,"alt","Open In Colab"),e(Va,"href","https://colab.research.google.com/drive/1CBfRU1zbfu7-ijiOqAAQUA-RJaxfcJoO?usp=sharing"),e(Va,"rel","nofollow"),e(yn,"align","right"),e(qa,"href","https://github.com/patrickvonplaten/notebooks/blob/master/Reformer_For_Masked_LM.ipynb"),e(qa,"rel","nofollow"),e(kn,"align","left"),e(Rn,"align","left"),e(Qa,"href","https://github.com/patrickvonplaten"),e(Qa,"rel","nofollow"),e(Cn,"align","left"),g(Mn.src,N1="https://colab.research.google.com/assets/colab-badge.svg")||e(Mn,"src",N1),e(Mn,"alt","Open In Colab"),e(ja,"href","https://colab.research.google.com/drive/1tzzh0i8PgDQGV3SMFUGxM7_gGae3K-uW?usp=sharing"),e(ja,"rel","nofollow"),e(Fn,"align","right"),e(Wa,"href","https://github.com/lordtt13/word-embeddings/blob/master/COVID-19%20Research%20Data/COVID-SciBERT.ipynb"),e(Wa,"rel","nofollow"),e(In,"align","left"),e(Pn,"align","left"),e(Ua,"href","https://github.com/lordtt13"),e(Ua,"rel","nofollow"),e(Hn,"align","left"),g(Bn.src,O1="https://colab.research.google.com/assets/colab-badge.svg")||e(Bn,"src",O1),e(Bn,"alt","Open In Colab"),e($a,"href","https://colab.research.google.com/drive/1rqAR40goxbAfez1xvF3hBJphSCsvXmh8"),e($a,"rel","nofollow"),e(Ln,"align","right"),e(Ka,"href","https://github.com/lordtt13/transformers-experiments/blob/master/Custom%20Tasks/fine-tune-blenderbot_small-for-summarization.ipynb"),e(Ka,"rel","nofollow"),e(Sn,"align","left"),e(Gn,"align","left"),e(Ya,"href","https://github.com/lordtt13"),e(Ya,"rel","nofollow"),e(Nn,"align","left"),g(xn.src,x1="https://colab.research.google.com/assets/colab-badge.svg")||e(xn,"src",x1),e(xn,"alt","Open In Colab"),e(Xa,"href","https://colab.research.google.com/drive/19Wmupuls7mykSGyRN_Qo6lPQhgp56ymq?usp=sharing"),e(Xa,"rel","nofollow"),e(On,"align","right"),e(Ja,"href","https://github.com/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb"),e(Ja,"rel","nofollow"),e(zn,"align","left"),e(Vn,"align","left"),e(Za,"href","https://elsanns.github.io"),e(Za,"rel","nofollow"),e(qn,"align","left"),g(jn.src,z1="https://colab.research.google.com/assets/colab-badge.svg")||e(jn,"src",z1),e(jn,"alt","Open In Colab"),e(tr,"href","https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb"),e(tr,"rel","nofollow"),e(Qn,"align","right"),e(er,"href","https://github.com/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb"),e(er,"rel","nofollow"),e(Wn,"align","left"),e(Un,"align","left"),e(ar,"href","https://www.philschmid.de"),e(ar,"rel","nofollow"),e($n,"align","left"),g(Yn.src,V1="https://colab.research.google.com/assets/colab-badge.svg")||e(Yn,"src",V1),e(Yn,"alt","Open In Colab"),e(rr,"href","https://colab.research.google.com/github/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb"),e(rr,"rel","nofollow"),e(Kn,"align","right"),e(or,"href","https://github.com/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb"),e(or,"rel","nofollow"),e(Xn,"align","left"),e(Jn,"align","left"),e(lr,"href","https://github.com/DhavalTaunk08"),e(lr,"rel","nofollow"),e(Zn,"align","left"),g(ei.src,q1="https://colab.research.google.com/assets/colab-badge.svg")||e(ei,"src",q1),e(ei,"alt","Open In Colab"),e(nr,"href","https://colab.research.google.com/github/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb"),e(nr,"rel","nofollow"),e(ti,"align","right"),e(ir,"href","https://github.com/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb"),e(ir,"rel","nofollow"),e(ai,"align","left"),e(ri,"align","left"),e(sr,"href","https://github.com/NadirEM"),e(sr,"rel","nofollow"),e(oi,"align","left"),g(ni.src,Q1="https://colab.research.google.com/assets/colab-badge.svg")||e(ni,"src",Q1),e(ni,"alt","Open In Colab"),e(hr,"href","https://colab.research.google.com/github/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb"),e(hr,"rel","nofollow"),e(li,"align","right"),e(cr,"href","https://github.com/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb"),e(cr,"rel","nofollow"),e(ii,"align","left"),e(si,"align","left"),e(fr,"href","https://github.com/DhavalTaunk08"),e(fr,"rel","nofollow"),e(hi,"align","left"),g(fi.src,j1="https://colab.research.google.com/assets/colab-badge.svg")||e(fi,"src",j1),e(fi,"alt","Open In Colab"),e(gr,"href","https://colab.research.google.com/github/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb"),e(gr,"rel","nofollow"),e(ci,"align","right"),e(dr,"href","https://github.com/flexudy-pipe/qugeev"),e(dr,"rel","nofollow"),e(gi,"align","left"),e(di,"align","left"),e(ur,"href","https://github.com/zolekode"),e(ur,"rel","nofollow"),e(ui,"align","left"),g(_i.src,W1="https://colab.research.google.com/assets/colab-badge.svg")||e(_i,"src",W1),e(_i,"alt","Open In Colab"),e(mr,"href","https://colab.research.google.com/drive/1bpsSqCQU-iw_5nNoRm_crPq6FRuJthq_?usp=sharing"),e(mr,"rel","nofollow"),e(mi,"align","right"),e(_r,"href","https://github.com/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb"),e(_r,"rel","nofollow"),e(bi,"align","left"),e(vi,"align","left"),e(br,"href","https://github.com/peterbayerle"),e(br,"rel","nofollow"),e(pi,"align","left"),g(Ti.src,U1="https://colab.research.google.com/assets/colab-badge.svg")||e(Ti,"src",U1),e(Ti,"alt","Open In Colab"),e(vr,"href","https://colab.research.google.com/github/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb"),e(vr,"rel","nofollow"),e(Ei,"align","right"),e(pr,"href","https://github.com/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb"),e(pr,"rel","nofollow"),e(wi,"align","left"),e(vt,"align","left"),e(Er,"href","https://github.com/patrickvonplaten"),e(Er,"rel","nofollow"),e(Di,"align","left"),g(Ai.src,$1="https://colab.research.google.com/assets/colab-badge.svg")||e(Ai,"src",$1),e(Ai,"alt","Open In Colab"),e(Tr,"href","https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb"),e(Tr,"rel","nofollow"),e(yi,"align","right"),e(wr,"href","https://github.com/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb"),e(wr,"rel","nofollow"),e(ki,"align","left"),e(pt,"align","left"),e(Dr,"href","https://github.com/patrickvonplaten"),e(Dr,"rel","nofollow"),e(Ri,"align","left"),g(Fi.src,K1="https://colab.research.google.com/assets/colab-badge.svg")||e(Fi,"src",K1),e(Fi,"alt","Open In Colab"),e(yr,"href","https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb"),e(yr,"rel","nofollow"),e(Ci,"align","right"),e(Ar,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb"),e(Ar,"rel","nofollow"),e(Mi,"align","left"),e(Et,"align","left"),e(kr,"href","https://github.com/nielsrogge"),e(kr,"rel","nofollow"),e(Ii,"align","left"),g(Hi.src,Y1="https://colab.research.google.com/assets/colab-badge.svg")||e(Hi,"src",Y1),e(Hi,"alt","Open In Colab"),e(Rr,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb"),e(Rr,"rel","nofollow"),e(Pi,"align","right"),e(Cr,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb"),e(Cr,"rel","nofollow"),e(Li,"align","left"),e(Tt,"align","left"),e(Fr,"href","https://github.com/nielsrogge"),e(Fr,"rel","nofollow"),e(Bi,"align","left"),g(Gi.src,X1="https://colab.research.google.com/assets/colab-badge.svg")||e(Gi,"src",X1),e(Gi,"alt","Open In Colab"),e(Mr,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb"),e(Mr,"rel","nofollow"),e(Si,"align","right"),e(Ir,"href","https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb"),e(Ir,"rel","nofollow"),e(Ni,"align","left"),e(Oi,"align","left"),e(Pr,"href","https://github.com/vasudevgupta7"),e(Pr,"rel","nofollow"),e(xi,"align","left"),g(Vi.src,J1="https://colab.research.google.com/assets/colab-badge.svg")||e(Vi,"src",J1),e(Vi,"alt","Open In Colab"),e(Hr,"href","https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb"),e(Hr,"rel","nofollow"),e(zi,"align","right"),e(Lr,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb"),e(Lr,"rel","nofollow"),e(qi,"align","left"),e(Mt,"align","left"),e(Br,"href","https://github.com/nielsrogge"),e(Br,"rel","nofollow"),e(Qi,"align","left"),g(Wi.src,Z1="https://colab.research.google.com/assets/colab-badge.svg")||e(Wi,"src",Z1),e(Wi,"alt","Open In Colab"),e(Sr,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb"),e(Sr,"rel","nofollow"),e(ji,"align","right"),e(Gr,"href","https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb"),e(Gr,"rel","nofollow"),e(Ui,"align","left"),e($i,"align","left"),e(Nr,"href","https://github.com/tripathiaakash"),e(Nr,"rel","nofollow"),e(Ki,"align","left"),g(Xi.src,tv="https://colab.research.google.com/assets/colab-badge.svg")||e(Xi,"src",tv),e(Xi,"alt","Open In Colab"),e(Or,"href","https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb"),e(Or,"rel","nofollow"),e(Yi,"align","right"),e(xr,"href","https://github.com/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb"),e(xr,"rel","nofollow"),e(Ji,"align","left"),e(Zi,"align","left"),e(zr,"href","https://github.com/patrickvonplaten"),e(zr,"rel","nofollow"),e(ts,"align","left"),g(as.src,ev="https://colab.research.google.com/assets/colab-badge.svg")||e(as,"src",ev),e(as,"alt","Open In Colab"),e(Vr,"href","https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb"),e(Vr,"rel","nofollow"),e(es,"align","right"),e(qr,"href","https://github.com/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb"),e(qr,"rel","nofollow"),e(rs,"align","left"),e(os,"align","left"),e(Qr,"href","https://github.com/patrickvonplaten"),e(Qr,"rel","nofollow"),e(ls,"align","left"),g(is.src,av="https://colab.research.google.com/assets/colab-badge.svg")||e(is,"src",av),e(is,"alt","Open In Colab"),e(jr,"href","https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb"),e(jr,"rel","nofollow"),e(ns,"align","right"),e(Wr,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb"),e(Wr,"rel","nofollow"),e(ss,"align","left"),e(It,"align","left"),e(Ur,"href","https://github.com/nielsrogge"),e(Ur,"rel","nofollow"),e(hs,"align","left"),g(fs.src,rv="https://colab.research.google.com/assets/colab-badge.svg")||e(fs,"src",rv),e(fs,"alt","Open In Colab"),e($r,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb"),e($r,"rel","nofollow"),e(cs,"align","right"),e(Kr,"href","https://github.com/voidful/huggingface_notebook/blob/main/xlsr_gpt.ipynb"),e(Kr,"rel","nofollow"),e(gs,"align","left"),e(ds,"align","left"),e(Yr,"href","https://github.com/voidful"),e(Yr,"rel","nofollow"),e(us,"align","left"),g(_s.src,ov="https://colab.research.google.com/assets/colab-badge.svg")||e(_s,"src",ov),e(_s,"alt","Open In Colab"),e(Xr,"href","https://colab.research.google.com/drive/1e_z5jQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing"),e(Xr,"rel","nofollow"),e(ms,"align","right"),e(Jr,"href","https://github.com/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb"),e(Jr,"rel","nofollow"),e(bs,"align","left"),e(vs,"align","left"),e(Zr,"href","https://github.com/elsanns"),e(Zr,"rel","nofollow"),e(ps,"align","left"),g(Ts.src,lv="https://colab.research.google.com/assets/colab-badge.svg")||e(Ts,"src",lv),e(Ts,"alt","Open In Colab"),e(to,"href","https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb"),e(to,"rel","nofollow"),e(Es,"align","right"),e(eo,"href","https://github.com/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb"),e(eo,"rel","nofollow"),e(ws,"align","left"),e(Ds,"align","left"),e(ao,"href","https://github.com/patrickvonplaten"),e(ao,"rel","nofollow"),e(ys,"align","left"),g(ks.src,nv="https://colab.research.google.com/assets/colab-badge.svg")||e(ks,"src",nv),e(ks,"alt","Open In Colab"),e(ro,"href","https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb"),e(ro,"rel","nofollow"),e(As,"align","right"),e(oo,"href","https://github.com/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb"),e(oo,"rel","nofollow"),e(Rs,"align","left"),e(Cs,"align","left"),e(lo,"href","https://github.com/Muennighoff"),e(lo,"rel","nofollow"),e(Fs,"align","left"),g(Is.src,iv="https://colab.research.google.com/assets/colab-badge.svg")||e(Is,"src",iv),e(Is,"alt","Open In Colab"),e(no,"href","https://colab.research.google.com/github/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb"),e(no,"rel","nofollow"),e(Ms,"align","right"),e(io,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb"),e(io,"rel","nofollow"),e(Ps,"align","left"),e(Hs,"align","left"),e(so,"href","https://github.com/nielsrogge"),e(so,"rel","nofollow"),e(Ls,"align","left"),g(Ss.src,sv="https://colab.research.google.com/assets/colab-badge.svg")||e(Ss,"src",sv),e(Ss,"alt","Open In Colab"),e(ho,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb"),e(ho,"rel","nofollow"),e(Bs,"align","right"),e(co,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb"),e(co,"rel","nofollow"),e(Gs,"align","left"),e(Ns,"align","left"),e(fo,"href","https://github.com/nielsrogge"),e(fo,"rel","nofollow"),e(Os,"align","left"),g(zs.src,hv="https://colab.research.google.com/assets/colab-badge.svg")||e(zs,"src",hv),e(zs,"alt","Open In Colab"),e(go,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb"),e(go,"rel","nofollow"),e(xs,"align","right"),e(uo,"href","https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb"),e(uo,"rel","nofollow"),e(Vs,"align","left"),e(Pt,"align","left"),e(mo,"href","https://github.com/ikuyamada"),e(mo,"rel","nofollow"),e(qs,"align","left"),g(js.src,cv="https://colab.research.google.com/assets/colab-badge.svg")||e(js,"src",cv),e(js,"alt","Open In Colab"),e(_o,"href","https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb"),e(_o,"rel","nofollow"),e(Qs,"align","right"),e(bo,"href","https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb"),e(bo,"rel","nofollow"),e(Ws,"align","left"),e(Ht,"align","left"),e(vo,"href","https://github.com/ikuyamada"),e(vo,"rel","nofollow"),e(Us,"align","left"),g(Ks.src,fv="https://colab.research.google.com/assets/colab-badge.svg")||e(Ks,"src",fv),e(Ks,"alt","Open In Colab"),e(po,"href","https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb"),e(po,"rel","nofollow"),e($s,"align","right"),e(Eo,"href","https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb"),e(Eo,"rel","nofollow"),e(Ys,"align","left"),e(Lt,"align","left"),e(To,"href","https://github.com/ikuyamada"),e(To,"rel","nofollow"),e(Xs,"align","left"),g(Zs.src,gv="https://colab.research.google.com/assets/colab-badge.svg")||e(Zs,"src",gv),e(Zs,"alt","Open In Colab"),e(wo,"href","https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb"),e(wo,"rel","nofollow"),e(Js,"align","right"),e(Do,"href","https://github.com/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb"),e(Do,"rel","nofollow"),e(th,"align","left"),e(Bt,"align","left"),e(yo,"href","https://github.com/vasudevgupta7"),e(yo,"rel","nofollow"),e(eh,"align","left"),g(rh.src,dv="https://colab.research.google.com/assets/colab-badge.svg")||e(rh,"src",dv),e(rh,"alt","Open In Colab"),e(Ao,"href","https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb"),e(Ao,"rel","nofollow"),e(ah,"align","right"),e(ko,"href","https://github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb"),e(ko,"rel","nofollow"),e(oh,"align","left"),e(lh,"align","left"),e(Ro,"href","https://github.com/m3hrdadfi"),e(Ro,"rel","nofollow"),e(nh,"align","left"),g(sh.src,uv="https://colab.research.google.com/assets/colab-badge.svg")||e(sh,"src",uv),e(sh,"alt","Open In Colab"),e(Co,"href","https://colab.research.google.com/github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb"),e(Co,"rel","nofollow"),e(ih,"align","right"),e(Fo,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb"),e(Fo,"rel","nofollow"),e(hh,"align","left"),e(St,"align","left"),e(Mo,"href","https://github.com/NielsRogge"),e(Mo,"rel","nofollow"),e(ch,"align","left"),g(gh.src,mv="https://colab.research.google.com/assets/colab-badge.svg")||e(gh,"src",mv),e(gh,"alt","Open In Colab"),e(Io,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb"),e(Io,"rel","nofollow"),e(fh,"align","right"),e(Po,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb"),e(Po,"rel","nofollow"),e(dh,"align","left"),e(Gt,"align","left"),e(Ho,"href","https://github.com/NielsRogge"),e(Ho,"rel","nofollow"),e(uh,"align","left"),g(_h.src,_v="https://colab.research.google.com/assets/colab-badge.svg")||e(_h,"src",_v),e(_h,"alt","Open In Colab"),e(Lo,"href","https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb"),e(Lo,"rel","nofollow"),e(mh,"align","right"),e(Bo,"href","https://github.com/ToluClassics/Notebooks/blob/main/T5_Ner_Finetuning.ipynb"),e(Bo,"rel","nofollow"),e(bh,"align","left"),e(Nt,"align","left"),e(So,"href","https://github.com/ToluClassics"),e(So,"rel","nofollow"),e(vh,"align","left"),g(Eh.src,bv="https://colab.research.google.com/assets/colab-badge.svg")||e(Eh,"src",bv),e(Eh,"alt","Open In Colab"),e(Go,"href","https://colab.research.google.com/drive/1obr78FY_cBmWY5ODViCmzdY6O1KB65Vc?usp=sharing"),e(Go,"rel","nofollow"),e(ph,"align","right")},m(d,u){t(document.head,bt),b(d,No,u),b(d,m,u),t(m,v),t(v,Rh),_1(Ne,Rh,null),t(m,wc),t(m,Ch),t(Ch,Dc),b(d,ec,u),b(d,Oo,u),t(Oo,yc),b(d,ac,u),b(d,wt,u),t(wt,kt),t(kt,Fh),_1(Oe,Fh,null),t(wt,Ac),t(wt,Mh),t(Mh,kc),b(d,rc,u),b(d,Rt,u),t(Rt,Ih),t(Ih,Dt),t(Dt,xo),t(xo,Rc),t(Dt,Cc),t(Dt,zo),t(zo,Fc),t(Dt,Mc),t(Dt,Vo),t(Vo,Ic),t(Rt,Pc),t(Rt,Ph),t(Ph,yt),t(yt,qo),t(qo,xe),t(xe,Hc),t(yt,Lc),t(yt,_),t(_,Bc),t(_,Qo),t(Qo,Sc),t(_,Gc),t(_,ze),t(ze,Nc),t(_,Oc),t(_,Ve),t(Ve,xc),t(_,zc),t(yt,Vc),t(yt,jo),t(jo,qe),t(qe,qc),b(d,oc,u),b(d,At,u),t(At,Ct),t(Ct,Hh),_1(Qe,Hh,null),t(At,Qc),t(At,Lh),t(Lh,jc),b(d,lc,u),b(d,Ft,u),t(Ft,Bh),t(Bh,p),t(p,Wo),t(Wo,Wc),t(p,Uc),t(p,Uo),t(Uo,$c),t(p,Kc),t(p,$o),t($o,Yc),t(p,Xc),t(p,Sh),t(Ft,Jc),t(Ft,c),t(c,E),t(E,Ko),t(Ko,je),t(je,Zc),t(E,tf),t(E,Yo),t(Yo,ef),t(E,af),t(E,Xo),t(Xo,We),t(We,rf),t(E,of),t(E,Jo),t(Jo,Ue),t(Ue,Zo),t(c,lf),t(c,T),t(T,tl),t(tl,$e),t($e,nf),t(T,sf),t(T,el),t(el,hf),t(T,cf),t(T,al),t(al,Ke),t(Ke,ff),t(T,gf),t(T,rl),t(rl,Ye),t(Ye,ol),t(c,df),t(c,w),t(w,ll),t(ll,Xe),t(Xe,uf),t(w,mf),t(w,nl),t(nl,_f),t(w,bf),t(w,il),t(il,Je),t(Je,vf),t(w,pf),t(w,sl),t(sl,Ze),t(Ze,hl),t(c,Ef),t(c,D),t(D,cl),t(cl,ta),t(ta,Tf),t(D,wf),t(D,fl),t(fl,Df),t(D,yf),t(D,gl),t(gl,ea),t(ea,Af),t(D,kf),t(D,dl),t(dl,aa),t(aa,ul),t(c,Rf),t(c,y),t(y,ml),t(ml,ra),t(ra,Cf),t(y,Ff),t(y,_l),t(_l,Mf),t(y,If),t(y,bl),t(bl,oa),t(oa,Pf),t(y,Hf),t(y,vl),t(vl,la),t(la,pl),t(c,Lf),t(c,A),t(A,El),t(El,na),t(na,Bf),t(A,Sf),t(A,Tl),t(Tl,Gf),t(A,Nf),t(A,wl),t(wl,ia),t(ia,Of),t(A,xf),t(A,Dl),t(Dl,sa),t(sa,yl),t(c,zf),t(c,k),t(k,Al),t(Al,ha),t(ha,Vf),t(k,qf),t(k,kl),t(kl,Qf),t(k,jf),t(k,Rl),t(Rl,ca),t(ca,Wf),t(k,Uf),t(k,Cl),t(Cl,fa),t(fa,Fl),t(c,$f),t(c,R),t(R,Ml),t(Ml,ga),t(ga,Kf),t(R,Yf),t(R,Il),t(Il,Xf),t(R,Jf),t(R,Pl),t(Pl,da),t(da,Zf),t(R,tg),t(R,Hl),t(Hl,ua),t(ua,Ll),t(c,eg),t(c,C),t(C,Bl),t(Bl,ma),t(ma,ag),t(C,rg),t(C,Sl),t(Sl,og),t(C,lg),t(C,Gl),t(Gl,_a),t(_a,ng),t(C,ig),t(C,Nl),t(Nl,ba),t(ba,Ol),t(c,sg),t(c,F),t(F,xl),t(xl,va),t(va,hg),t(F,cg),t(F,zl),t(zl,fg),t(F,gg),t(F,Vl),t(Vl,pa),t(pa,dg),t(F,ug),t(F,ql),t(ql,Ea),t(Ea,Ql),t(c,mg),t(c,M),t(M,jl),t(jl,Ta),t(Ta,_g),t(M,bg),t(M,Wl),t(Wl,vg),t(M,pg),t(M,Ul),t(Ul,wa),t(wa,Eg),t(M,Tg),t(M,$l),t($l,Da),t(Da,Kl),t(c,wg),t(c,I),t(I,Yl),t(Yl,ya),t(ya,Dg),t(I,yg),t(I,Aa),t(Aa,Ag),t(Aa,Gh),t(Gh,kg),t(I,Rg),t(I,Xl),t(Xl,ka),t(ka,Cg),t(I,Fg),t(I,Jl),t(Jl,Ra),t(Ra,Zl),t(c,Mg),t(c,P),t(P,tn),t(tn,Ca),t(Ca,Ig),t(P,Pg),t(P,en),t(en,Hg),t(P,Lg),t(P,an),t(an,Fa),t(Fa,Bg),t(P,Sg),t(P,rn),t(rn,Ma),t(Ma,on),t(c,Gg),t(c,H),t(H,ln),t(ln,Ia),t(Ia,Ng),t(H,Og),t(H,nn),t(nn,xg),t(H,zg),t(H,sn),t(sn,Pa),t(Pa,Vg),t(H,qg),t(H,hn),t(hn,Ha),t(Ha,cn),t(c,Qg),t(c,L),t(L,fn),t(fn,La),t(La,jg),t(L,Wg),t(L,gn),t(gn,Ug),t(L,$g),t(L,dn),t(dn,Ba),t(Ba,Kg),t(L,Yg),t(L,un),t(un,Sa),t(Sa,mn),t(c,Xg),t(c,B),t(B,_n),t(_n,Ga),t(Ga,Jg),t(B,Zg),t(B,bn),t(bn,td),t(B,ed),t(B,vn),t(vn,Na),t(Na,ad),t(B,rd),t(B,pn),t(pn,Oa),t(Oa,En),t(c,od),t(c,S),t(S,Tn),t(Tn,xa),t(xa,ld),t(S,nd),t(S,wn),t(wn,id),t(S,sd),t(S,Dn),t(Dn,za),t(za,hd),t(S,cd),t(S,yn),t(yn,Va),t(Va,An),t(c,fd),t(c,G),t(G,kn),t(kn,qa),t(qa,gd),t(G,dd),t(G,Rn),t(Rn,ud),t(G,md),t(G,Cn),t(Cn,Qa),t(Qa,_d),t(G,bd),t(G,Fn),t(Fn,ja),t(ja,Mn),t(c,vd),t(c,N),t(N,In),t(In,Wa),t(Wa,pd),t(N,Ed),t(N,Pn),t(Pn,Td),t(N,wd),t(N,Hn),t(Hn,Ua),t(Ua,Dd),t(N,yd),t(N,Ln),t(Ln,$a),t($a,Bn),t(c,Ad),t(c,O),t(O,Sn),t(Sn,Ka),t(Ka,kd),t(O,Rd),t(O,Gn),t(Gn,Cd),t(O,Fd),t(O,Nn),t(Nn,Ya),t(Ya,Md),t(O,Id),t(O,On),t(On,Xa),t(Xa,xn),t(c,Pd),t(c,x),t(x,zn),t(zn,Ja),t(Ja,Hd),t(x,Ld),t(x,Vn),t(Vn,Bd),t(x,Sd),t(x,qn),t(qn,Za),t(Za,Gd),t(x,Nd),t(x,Qn),t(Qn,tr),t(tr,jn),t(c,Od),t(c,z),t(z,Wn),t(Wn,er),t(er,xd),t(z,zd),t(z,Un),t(Un,Vd),t(z,qd),t(z,$n),t($n,ar),t(ar,Qd),t(z,jd),t(z,Kn),t(Kn,rr),t(rr,Yn),t(c,Wd),t(c,V),t(V,Xn),t(Xn,or),t(or,Ud),t(V,$d),t(V,Jn),t(Jn,Kd),t(V,Yd),t(V,Zn),t(Zn,lr),t(lr,Xd),t(V,Jd),t(V,ti),t(ti,nr),t(nr,ei),t(c,Zd),t(c,q),t(q,ai),t(ai,ir),t(ir,tu),t(q,eu),t(q,ri),t(ri,au),t(q,ru),t(q,oi),t(oi,sr),t(sr,ou),t(q,lu),t(q,li),t(li,hr),t(hr,ni),t(c,nu),t(c,Q),t(Q,ii),t(ii,cr),t(cr,iu),t(Q,su),t(Q,si),t(si,hu),t(Q,cu),t(Q,hi),t(hi,fr),t(fr,fu),t(Q,gu),t(Q,ci),t(ci,gr),t(gr,fi),t(c,du),t(c,j),t(j,gi),t(gi,dr),t(dr,uu),t(j,mu),t(j,di),t(di,_u),t(j,bu),t(j,ui),t(ui,ur),t(ur,vu),t(j,pu),t(j,mi),t(mi,mr),t(mr,_i),t(c,Eu),t(c,W),t(W,bi),t(bi,_r),t(_r,Tu),t(W,wu),t(W,vi),t(vi,Du),t(W,yu),t(W,pi),t(pi,br),t(br,Au),t(W,ku),t(W,Ei),t(Ei,vr),t(vr,Ti),t(c,Ru),t(c,U),t(U,wi),t(wi,pr),t(pr,Cu),t(U,Fu),t(U,vt),t(vt,Mu),t(vt,Nh),t(Nh,Iu),t(vt,Pu),t(vt,Oh),t(Oh,Hu),t(vt,Lu),t(U,Bu),t(U,Di),t(Di,Er),t(Er,Su),t(U,Gu),t(U,yi),t(yi,Tr),t(Tr,Ai),t(c,Nu),t(c,$),t($,ki),t(ki,wr),t(wr,Ou),t($,xu),t($,pt),t(pt,zu),t(pt,xh),t(xh,Vu),t(pt,qu),t(pt,zh),t(zh,Qu),t(pt,ju),t($,Wu),t($,Ri),t(Ri,Dr),t(Dr,Uu),t($,$u),t($,Ci),t(Ci,yr),t(yr,Fi),t(c,Ku),t(c,K),t(K,Mi),t(Mi,Ar),t(Ar,Yu),t(K,Xu),t(K,Et),t(Et,Ju),t(Et,Vh),t(Vh,Zu),t(Et,tm),t(Et,qh),t(qh,em),t(Et,am),t(K,rm),t(K,Ii),t(Ii,kr),t(kr,om),t(K,lm),t(K,Pi),t(Pi,Rr),t(Rr,Hi),t(c,nm),t(c,Y),t(Y,Li),t(Li,Cr),t(Cr,im),t(Y,sm),t(Y,Tt),t(Tt,hm),t(Tt,Qh),t(Qh,cm),t(Tt,fm),t(Tt,jh),t(jh,gm),t(Tt,dm),t(Y,um),t(Y,Bi),t(Bi,Fr),t(Fr,mm),t(Y,_m),t(Y,Si),t(Si,Mr),t(Mr,Gi),t(c,bm),t(c,X),t(X,Ni),t(Ni,Ir),t(Ir,vm),t(X,pm),t(X,Oi),t(Oi,Em),t(X,Tm),t(X,xi),t(xi,Pr),t(Pr,wm),t(X,Dm),t(X,zi),t(zi,Hr),t(Hr,Vi),t(c,ym),t(c,J),t(J,qi),t(qi,Lr),t(Lr,Am),t(J,km),t(J,Mt),t(Mt,Rm),t(Mt,Wh),t(Wh,Cm),t(Mt,Fm),t(J,Mm),t(J,Qi),t(Qi,Br),t(Br,Im),t(J,Pm),t(J,ji),t(ji,Sr),t(Sr,Wi),t(c,Hm),t(c,Z),t(Z,Ui),t(Ui,Gr),t(Gr,Lm),t(Z,Bm),t(Z,$i),t($i,Sm),t(Z,Gm),t(Z,Ki),t(Ki,Nr),t(Nr,Nm),t(Z,Om),t(Z,Yi),t(Yi,Or),t(Or,Xi),t(c,xm),t(c,tt),t(tt,Ji),t(Ji,xr),t(xr,zm),t(tt,Vm),t(tt,Zi),t(Zi,qm),t(tt,Qm),t(tt,ts),t(ts,zr),t(zr,jm),t(tt,Wm),t(tt,es),t(es,Vr),t(Vr,as),t(c,Um),t(c,et),t(et,rs),t(rs,qr),t(qr,$m),t(et,Km),t(et,os),t(os,Ym),t(et,Xm),t(et,ls),t(ls,Qr),t(Qr,Jm),t(et,Zm),t(et,ns),t(ns,jr),t(jr,is),t(c,t_),t(c,at),t(at,ss),t(ss,Wr),t(Wr,e_),t(at,a_),t(at,It),t(It,r_),t(It,Uh),t(Uh,o_),t(It,l_),t(at,n_),t(at,hs),t(hs,Ur),t(Ur,i_),t(at,s_),t(at,cs),t(cs,$r),t($r,fs),t(c,h_),t(c,rt),t(rt,gs),t(gs,Kr),t(Kr,c_),t(rt,f_),t(rt,ds),t(ds,g_),t(rt,d_),t(rt,us),t(us,Yr),t(Yr,u_),t(rt,m_),t(rt,ms),t(ms,Xr),t(Xr,_s),t(c,__),t(c,ot),t(ot,bs),t(bs,Jr),t(Jr,b_),t(ot,v_),t(ot,vs),t(vs,p_),t(ot,E_),t(ot,ps),t(ps,Zr),t(Zr,T_),t(ot,w_),t(ot,Es),t(Es,to),t(to,Ts),t(c,D_),t(c,lt),t(lt,ws),t(ws,eo),t(eo,y_),t(lt,A_),t(lt,Ds),t(Ds,k_),t(lt,R_),t(lt,ys),t(ys,ao),t(ao,C_),t(lt,F_),t(lt,As),t(As,ro),t(ro,ks),t(c,M_),t(c,nt),t(nt,Rs),t(Rs,oo),t(oo,I_),t(nt,P_),t(nt,Cs),t(Cs,H_),t(nt,L_),t(nt,Fs),t(Fs,lo),t(lo,B_),t(nt,S_),t(nt,Ms),t(Ms,no),t(no,Is),t(c,G_),t(c,it),t(it,Ps),t(Ps,io),t(io,N_),t(it,O_),t(it,Hs),t(Hs,x_),t(it,z_),t(it,Ls),t(Ls,so),t(so,V_),t(it,q_),t(it,Bs),t(Bs,ho),t(ho,Ss),t(c,Q_),t(c,st),t(st,Gs),t(Gs,co),t(co,j_),t(st,W_),t(st,Ns),t(Ns,U_),t(st,$_),t(st,Os),t(Os,fo),t(fo,K_),t(st,Y_),t(st,xs),t(xs,go),t(go,zs),t(c,X_),t(c,ht),t(ht,Vs),t(Vs,uo),t(uo,J_),t(ht,Z_),t(ht,Pt),t(Pt,tb),t(Pt,$h),t($h,eb),t(Pt,ab),t(ht,rb),t(ht,qs),t(qs,mo),t(mo,ob),t(ht,lb),t(ht,Qs),t(Qs,_o),t(_o,js),t(c,nb),t(c,ct),t(ct,Ws),t(Ws,bo),t(bo,ib),t(ct,sb),t(ct,Ht),t(Ht,hb),t(Ht,Kh),t(Kh,cb),t(Ht,fb),t(ct,gb),t(ct,Us),t(Us,vo),t(vo,db),t(ct,ub),t(ct,$s),t($s,po),t(po,Ks),t(c,mb),t(c,ft),t(ft,Ys),t(Ys,Eo),t(Eo,_b),t(ft,bb),t(ft,Lt),t(Lt,vb),t(Lt,Yh),t(Yh,pb),t(Lt,Eb),t(ft,Tb),t(ft,Xs),t(Xs,To),t(To,wb),t(ft,Db),t(ft,Js),t(Js,wo),t(wo,Zs),t(c,yb),t(c,gt),t(gt,th),t(th,Do),t(Do,Ab),t(gt,kb),t(gt,Bt),t(Bt,Rb),t(Bt,Xh),t(Xh,Cb),t(Bt,Fb),t(gt,Mb),t(gt,eh),t(eh,yo),t(yo,Ib),t(gt,Pb),t(gt,ah),t(ah,Ao),t(Ao,rh),t(c,Hb),t(c,dt),t(dt,oh),t(oh,ko),t(ko,Lb),t(dt,Bb),t(dt,lh),t(lh,Sb),t(dt,Gb),t(dt,nh),t(nh,Ro),t(Ro,Nb),t(dt,Ob),t(dt,ih),t(ih,Co),t(Co,sh),t(c,xb),t(c,ut),t(ut,hh),t(hh,Fo),t(Fo,zb),t(ut,Vb),t(ut,St),t(St,qb),t(St,Jh),t(Jh,Qb),t(St,jb),t(ut,Wb),t(ut,ch),t(ch,Mo),t(Mo,Ub),t(ut,$b),t(ut,fh),t(fh,Io),t(Io,gh),t(c,Kb),t(c,mt),t(mt,dh),t(dh,Po),t(Po,Yb),t(mt,Xb),t(mt,Gt),t(Gt,Jb),t(Gt,Zh),t(Zh,Zb),t(Gt,t1),t(mt,e1),t(mt,uh),t(uh,Ho),t(Ho,a1),t(mt,r1),t(mt,mh),t(mh,Lo),t(Lo,_h),t(c,o1),t(c,_t),t(_t,bh),t(bh,Bo),t(Bo,l1),t(_t,n1),t(_t,Nt),t(Nt,i1),t(Nt,tc),t(tc,s1),t(Nt,h1),t(_t,c1),t(_t,vh),t(vh,So),t(So,f1),t(_t,g1),t(_t,ph),t(ph,Go),t(Go,Eh),nc=!0},p:ID,i(d){nc||(b1(Ne.$$.fragment,d),b1(Oe.$$.fragment,d),b1(Qe.$$.fragment,d),nc=!0)},o(d){v1(Ne.$$.fragment,d),v1(Oe.$$.fragment,d),v1(Qe.$$.fragment,d),nc=!1},d(d){o(bt),d&&o(No),d&&o(m),p1(Ne),d&&o(ec),d&&o(Oo),d&&o(ac),d&&o(wt),p1(Oe),d&&o(rc),d&&o(Rt),d&&o(oc),d&&o(At),p1(Qe),d&&o(lc),d&&o(Ft)}}}const HD={local:"community",sections:[{local:"community-resources",title:"Community resources:"},{local:"community-notebooks",title:"Community notebooks:"}],title:"Community"};function LD(Tc,bt,No){let{fw:m}=bt;return Tc.$$set=v=>{"fw"in v&&No(0,m=v.fw)},[m]}class GD extends RD{constructor(bt){super();CD(this,bt,LD,PD,FD,{fw:0})}}export{GD as default,HD as metadata};
268
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/fast_tokenizers.mdx-1a58673f.js
import{S as jt,i as $t,s as bt,e as a,k as h,w as N,t as i,M as Pt,c as n,d as t,m,a as o,x as B,h as l,b as p,F as r,g as f,y as L,L as Et,q as x,o as O,B as J}from"../chunks/vendor-4833417e.js";import{I as st}from"../chunks/IconCopyLink-4b81c553.js";import{C as $e}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function St(be){let d,W,k,c,X,$,Pe,Y,Ee,ne,z,Se,C,Ae,qe,b,Fe,Ne,oe,H,Be,ie,P,le,U,Le,fe,_,v,Z,E,xe,ee,Oe,pe,u,Je,K,We,Ce,te,He,Ue,he,S,me,w,Ke,M,Me,Ie,ke,T,y,re,A,De,se,Ge,ce,I,Qe,de,q,ze,g,Re,D,Ve,Xe,ae,Ye,Ze,ue,F,ge,j,et,G,tt,rt,_e;return $=new st({}),P=new $e({props:{code:`from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.trainers import BpeTrainer from tokenizers.pre_tokenizers import Whitespace tokenizer = Tokenizer(BPE(unk_token="[UNK]")) trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) tokenizer.pre_tokenizer = Whitespace() files = [...] tokenizer.train(files, trainer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers <span class="hljs-keyword">import</span> Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.models <span class="hljs-keyword">import</span> BPE <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.trainers <span class="hljs-keyword">import</span> BpeTrainer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.pre_tokenizers <span class="hljs-keyword">import</span> Whitespace <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Tokenizer(BPE(unk_token=<span class="hljs-string">&quot;[UNK]&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = BpeTrainer(special_tokens=[<span class="hljs-string">&quot;[UNK]&quot;</span>, <span class="hljs-string">&quot;[CLS]&quot;</span>, <span class="hljs-string">&quot;[SEP]&quot;</span>, <span class="hljs-string">&quot;[PAD]&quot;</span>, <span class="hljs-string">&quot;[MASK]&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pre_tokenizer = Whitespace() <span class="hljs-meta">&gt;&gt;&gt; </span>files = [...] <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.train(files, trainer)`}}),E=new st({}),S=new $e({props:{code:`from transformers import PreTrainedTokenizerFast fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)`}}),A=new st({}),q=new $e({props:{code:'tokenizer.save("tokenizer.json")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save(<span class="hljs-string">&quot;tokenizer.json&quot;</span>)'}}),F=new $e({props:{code:`from transformers import PreTrainedTokenizerFast fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file=<span class="hljs-string">&quot;tokenizer.json&quot;</span>)`}}),{c(){d=a("meta"),W=h(),k=a("h1"),c=a("a"),X=a("span"),N($.$$.fragment),Pe=h(),Y=a("span"),Ee=i("Using tokenizers from \u{1F917} Tokenizers"),ne=h(),z=a("p"),Se=i("The "),C=a("a"),Ae=i("PreTrainedTokenizerFast"),qe=i(" depends on the "),b=a("a"),Fe=i("\u{1F917} Tokenizers"),Ne=i(` library. The tokenizers obtained from the \u{1F917} Tokenizers library can be loaded very simply into \u{1F917} Transformers.`),oe=h(),H=a("p"),Be=i("Before getting in the specifics, let\u2019s first start by creating a dummy tokenizer in a few lines:"),ie=h(),N(P.$$.fragment),le=h(),U=a("p"),Le=i(`We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to a JSON file for future re-use.`),fe=h(),_=a("h2"),v=a("a"),Z=a("span"),N(E.$$.fragment),xe=h(),ee=a("span"),Oe=i("Loading directly from the tokenizer object"),pe=h(),u=a("p"),Je=i(`Let\u2019s see how to leverage this tokenizer object in the \u{1F917} Transformers library. The `),K=a("a"),We=i("PreTrainedTokenizerFast"),Ce=i(` class allows for easy instantiation, by accepting the instantiated `),te=a("em"),He=i("tokenizer"),Ue=i(" object as an argument:"),he=h(),N(S.$$.fragment),me=h(),w=a("p"),Ke=i("This object can now be used with all the methods shared by the \u{1F917} Transformers tokenizers! Head to "),M=a("a"),Me=i(`the tokenizer page`),Ie=i(" for more information."),ke=h(),T=a("h2"),y=a("a"),re=a("span"),N(A.$$.fragment),De=h(),se=a("span"),Ge=i("Loading from a JSON file"),ce=h(),I=a("p"),Qe=i("In order to load a tokenizer from a JSON file, let\u2019s first start by saving our tokenizer:"),de=h(),N(q.$$.fragment),ze=h(),g=a("p"),Re=i("The path to which we saved this file can be passed to the "),D=a("a"),Ve=i("PreTrainedTokenizerFast"),Xe=i(` initialization method using the `),ae=a("code"),Ye=i("tokenizer_file"),Ze=i(" parameter:"),ue=h(),N(F.$$.fragment),ge=h(),j=a("p"),et=i("This object can now be used with all the methods shared by the \u{1F917} Transformers tokenizers! Head to "),G=a("a"),tt=i(`the tokenizer page`),rt=i(" for more information."),this.h()},l(e){const s=Pt('[data-svelte="svelte-1phssyn"]',document.head);d=n(s,"META",{name:!0,content:!0}),s.forEach(t),W=m(e),k=n(e,"H1",{class:!0});var Te=o(k);c=n(Te,"A",{id:!0,class:!0,href:!0});var at=o(c);X=n(at,"SPAN",{});var nt=o(X);B($.$$.fragment,nt),nt.forEach(t),at.forEach(t),Pe=m(Te),Y=n(Te,"SPAN",{});var ot=o(Y);Ee=l(ot,"Using tokenizers from \u{1F917} Tokenizers"),ot.forEach(t),Te.forEach(t),ne=m(e),z=n(e,"P",{});var Q=o(z);Se=l(Q,"The "),C=n(Q,"A",{href:!0});var it=o(C);Ae=l(it,"PreTrainedTokenizerFast"),it.forEach(t),qe=l(Q," depends on the "),b=n(Q,"A",{href:!0,rel:!0});var lt=o(b);Fe=l(lt,"\u{1F917} Tokenizers"),lt.forEach(t),Ne=l(Q,` library. The tokenizers obtained from the \u{1F917} Tokenizers library can be loaded very simply into \u{1F917} Transformers.`),Q.forEach(t),oe=m(e),H=n(e,"P",{});var ft=o(H);Be=l(ft,"Before getting in the specifics, let\u2019s first start by creating a dummy tokenizer in a few lines:"),ft.forEach(t),ie=m(e),B(P.$$.fragment,e),le=m(e),U=n(e,"P",{});var pt=o(U);Le=l(pt,`We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to a JSON file for future re-use.`),pt.forEach(t),fe=m(e),_=n(e,"H2",{class:!0});var ve=o(_);v=n(ve,"A",{id:!0,class:!0,href:!0});var ht=o(v);Z=n(ht,"SPAN",{});var mt=o(Z);B(E.$$.fragment,mt),mt.forEach(t),ht.forEach(t),xe=m(ve),ee=n(ve,"SPAN",{});var kt=o(ee);Oe=l(kt,"Loading directly from the tokenizer object"),kt.forEach(t),ve.forEach(t),pe=m(e),u=n(e,"P",{});var R=o(u);Je=l(R,`Let\u2019s see how to leverage this tokenizer object in the \u{1F917} Transformers library. The `),K=n(R,"A",{href:!0});var ct=o(K);We=l(ct,"PreTrainedTokenizerFast"),ct.forEach(t),Ce=l(R,` class allows for easy instantiation, by accepting the instantiated `),te=n(R,"EM",{});var dt=o(te);He=l(dt,"tokenizer"),dt.forEach(t),Ue=l(R," object as an argument:"),R.forEach(t),he=m(e),B(S.$$.fragment,e),me=m(e),w=n(e,"P",{});var we=o(w);Ke=l(we,"This object can now be used with all the methods shared by the \u{1F917} Transformers tokenizers! Head to "),M=n(we,"A",{href:!0});var zt=o(M);Me=l(zt,`the tokenizer page`),zt.forEach(t),Ie=l(we," for more information."),we.forEach(t),ke=m(e),T=n(e,"H2",{class:!0});var ye=o(T);y=n(ye,"A",{id:!0,class:!0,href:!0});var ut=o(y);re=n(ut,"SPAN",{});var gt=o(re);B(A.$$.fragment,gt),gt.forEach(t),ut.forEach(t),De=m(ye),se=n(ye,"SPAN",{});var _t=o(se);Ge=l(_t,"Loading from a JSON file"),_t.forEach(t),ye.forEach(t),ce=m(e),I=n(e,"P",{});var Tt=o(I);Qe=l(Tt,"In order to load a tokenizer from a JSON file, let\u2019s first start by saving our tokenizer:"),Tt.forEach(t),de=m(e),B(q.$$.fragment,e),ze=m(e),g=n(e,"P",{});var V=o(g);Re=l(V,"The path to which we saved this file can be passed to the "),D=n(V,"A",{href:!0});var vt=o(D);Ve=l(vt,"PreTrainedTokenizerFast"),vt.forEach(t),Xe=l(V,` initialization method using the `),ae=n(V,"CODE",{});var wt=o(ae);Ye=l(wt,"tokenizer_file"),wt.forEach(t),Ze=l(V," parameter:"),V.forEach(t),ue=m(e),B(F.$$.fragment,e),ge=m(e),j=n(e,"P",{});var je=o(j);et=l(je,"This object can now be used with all the methods shared by the \u{1F917} Transformers tokenizers! Head to "),G=n(je,"A",{href:!0});var yt=o(G);tt=l(yt,`the tokenizer page`),yt.forEach(t),rt=l(je," for more information."),je.forEach(t),this.h()},h(){p(d,"name","hf:doc:metadata"),p(d,"content",JSON.stringify(At)),p(c,"id","using-tokenizers-from-tokenizers"),p(c,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(c,"href","#using-tokenizers-from-tokenizers"),p(k,"class","relative group"),p(C,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),p(b,"href","https://huggingface.co/docs/tokenizers"),p(b,"rel","nofollow"),p(v,"id","loading-directly-from-the-tokenizer-object"),p(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(v,"href","#loading-directly-from-the-tokenizer-object"),p(_,"class","relative group"),p(K,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),p(M,"href","main_classes/tokenizer"),p(y,"id","loading-from-a-json-file"),p(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(y,"href","#loading-from-a-json-file"),p(T,"class","relative group"),p(D,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),p(G,"href","main_classes/tokenizer")},m(e,s){r(document.head,d),f(e,W,s),f(e,k,s),r(k,c),r(c,X),L($,X,null),r(k,Pe),r(k,Y),r(Y,Ee),f(e,ne,s),f(e,z,s),r(z,Se),r(z,C),r(C,Ae),r(z,qe),r(z,b),r(b,Fe),r(z,Ne),f(e,oe,s),f(e,H,s),r(H,Be),f(e,ie,s),L(P,e,s),f(e,le,s),f(e,U,s),r(U,Le),f(e,fe,s),f(e,_,s),r(_,v),r(v,Z),L(E,Z,null),r(_,xe),r(_,ee),r(ee,Oe),f(e,pe,s),f(e,u,s),r(u,Je),r(u,K),r(K,We),r(u,Ce),r(u,te),r(te,He),r(u,Ue),f(e,he,s),L(S,e,s),f(e,me,s),f(e,w,s),r(w,Ke),r(w,M),r(M,Me),r(w,Ie),f(e,ke,s),f(e,T,s),r(T,y),r(y,re),L(A,re,null),r(T,De),r(T,se),r(se,Ge),f(e,ce,s),f(e,I,s),r(I,Qe),f(e,de,s),L(q,e,s),f(e,ze,s),f(e,g,s),r(g,Re),r(g,D),r(D,Ve),r(g,Xe),r(g,ae),r(ae,Ye),r(g,Ze),f(e,ue,s),L(F,e,s),f(e,ge,s),f(e,j,s),r(j,et),r(j,G),r(G,tt),r(j,rt),_e=!0},p:Et,i(e){_e||(x($.$$.fragment,e),x(P.$$.fragment,e),x(E.$$.fragment,e),x(S.$$.fragment,e),x(A.$$.fragment,e),x(q.$$.fragment,e),x(F.$$.fragment,e),_e=!0)},o(e){O($.$$.fragment,e),O(P.$$.fragment,e),O(E.$$.fragment,e),O(S.$$.fragment,e),O(A.$$.fragment,e),O(q.$$.fragment,e),O(F.$$.fragment,e),_e=!1},d(e){t(d),e&&t(W),e&&t(k),J($),e&&t(ne),e&&t(z),e&&t(oe),e&&t(H),e&&t(ie),J(P,e),e&&t(le),e&&t(U),e&&t(fe),e&&t(_),J(E),e&&t(pe),e&&t(u),e&&t(he),J(S,e),e&&t(me),e&&t(w),e&&t(ke),e&&t(T),J(A),e&&t(ce),e&&t(I),e&&t(de),J(q,e),e&&t(ze),e&&t(g),e&&t(ue),J(F,e),e&&t(ge),e&&t(j)}}}const At={local:"using-tokenizers-from-tokenizers",sections:[{local:"loading-directly-from-the-tokenizer-object",title:"Loading directly from the tokenizer object"},{local:"loading-from-a-json-file",title:"Loading from a JSON file"}],title:"Using tokenizers from \u{1F917} Tokenizers"};function qt(be,d,W){let{fw:k}=d;return be.$$set=c=>{"fw"in c&&W(0,k=c.fw)},[k]}class xt extends jt{constructor(d){super();$t(this,d,qt,St,bt,{fw:0})}}export{xt as default,At as metadata};
269
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/index.mdx-78213d4b.js
import{S as Bhe,i as Che,s as Nhe,e as r,k as d,w as Mm,t as n,M as Ihe,c as a,d as t,m as s,a as l,x as Pm,h as o,b as i,N as xhe,f as i0,F as e,g as E,y as Gm,L as Ohe,q as km,o as Bm,B as Cm}from"../chunks/vendor-4833417e.js";import{I as Nm}from"../chunks/IconCopyLink-4b81c553.js";function Fhe(d0){let $r,Mi,ot,Qr,f_,hn,s0,u_,h0,Im,Pi,c0,xm,Gi,g0,Om,it,v_,f0,u0,E_,v0,E0,T_,T0,_0,__,m0,Fm,dt,p0,cn,D0,y0,gn,A0,R0,fn,L0,b0,Wm,ki,S0,Ym,da,va,m_,un,w0,p_,M0,Jm,sa,ea,mz,P0,Zm,Vm,ha,Ea,D_,vn,G0,y_,k0,Km,Bi,B0,Xm,lt,A_,Ci,R_,C0,N0,I0,L_,Ni,b_,x0,O0,F0,S_,Ii,w_,W0,Y0,J0,M_,xi,P_,Z0,V0,K0,En,Oi,G_,X0,z0,H0,ca,Fi,k_,U0,q0,Q0,Wi,B_,j0,$0,ep,Yi,C_,tp,rp,zm,Ji,ap,Hm,ga,Ta,N_,Tn,lp,I_,np,Um,h,_a,x_,Zi,op,ip,_n,dp,sp,hp,ma,O_,Vi,cp,gp,mn,fp,up,vp,pa,F_,Ki,Ep,Tp,pn,_p,mp,pp,Da,W_,Xi,Dp,yp,Dn,Ap,Rp,Lp,ya,Y_,zi,bp,Sp,yn,wp,Mp,Pp,Aa,J_,Hi,Gp,kp,An,Bp,Cp,Np,Ra,Z_,Ui,Ip,xp,Rn,Op,Fp,Wp,La,V_,qi,Yp,Jp,Ln,Zp,Vp,Kp,ba,K_,Qi,Xp,zp,bn,Hp,Up,qp,Sa,X_,ji,Qp,jp,Sn,$p,e6,t6,wa,z_,$i,r6,a6,wn,l6,n6,o6,Ma,H_,ed,i6,d6,Mn,s6,h6,c6,Pa,U_,td,g6,f6,Pn,u6,v6,E6,Ga,q_,rd,T6,_6,Gn,m6,p6,D6,ta,Q_,ad,y6,A6,kn,R6,L6,j_,b6,S6,w6,ka,$_,ld,M6,P6,Bn,G6,k6,B6,Ba,e3,nd,C6,N6,Cn,I6,x6,O6,Ca,t3,od,F6,W6,Nn,Y6,J6,Z6,Na,r3,id,V6,K6,In,X6,z6,H6,Ia,a3,dd,U6,q6,xn,Q6,j6,$6,ra,l3,sd,e7,t7,On,r7,a7,n3,l7,n7,o7,xa,o3,hd,i7,d7,Fn,s7,h7,c7,Oa,i3,cd,g7,f7,Wn,u7,v7,E7,Fa,d3,gd,T7,_7,Yn,m7,p7,D7,Wa,s3,fd,y7,A7,Jn,R7,L7,b7,Ya,h3,ud,S7,w7,Zn,M7,P7,G7,Ja,c3,vd,k7,B7,Vn,C7,N7,I7,Za,g3,Ed,x7,O7,Kn,F7,W7,Y7,nt,f3,Td,J7,Z7,Xn,V7,K7,zn,X7,z7,Hn,H7,U7,Un,q7,Q7,j7,Va,u3,_d,$7,e9,qn,t9,r9,a9,Ka,v3,md,l9,n9,Qn,o9,i9,d9,Xa,E3,pd,s9,h9,jn,c9,g9,f9,za,T3,Dd,u9,v9,$n,E9,T9,_9,Ha,_3,yd,m9,p9,eo,D9,y9,A9,Ua,m3,Ad,R9,L9,to,b9,S9,w9,qa,p3,Rd,M9,P9,ro,G9,k9,B9,st,D3,Ld,C9,N9,ao,I9,x9,y3,O9,F9,A3,W9,Y9,J9,Qa,R3,bd,Z9,V9,lo,K9,X9,z9,ja,L3,Sd,H9,U9,no,q9,Q9,j9,$a,b3,wd,$9,e8,oo,t8,r8,a8,el,S3,Md,l8,n8,io,o8,i8,d8,tl,w3,Pd,s8,h8,so,c8,g8,f8,rl,M3,Gd,u8,v8,ho,E8,T8,_8,al,P3,kd,m8,p8,co,D8,y8,A8,ll,G3,Bd,R8,L8,go,b8,S8,w8,nl,k3,Cd,M8,P8,fo,G8,k8,B8,ol,B3,Nd,C8,N8,uo,I8,x8,O8,il,C3,Id,F8,W8,vo,Y8,J8,Z8,dl,N3,xd,V8,K8,Eo,X8,z8,H8,sl,I3,Od,U8,q8,To,Q8,j8,$8,hl,x3,Fd,eD,tD,_o,rD,aD,lD,aa,O3,Wd,nD,oD,mo,iD,dD,po,sD,hD,cD,cl,F3,Yd,gD,fD,Do,uD,vD,ED,gl,W3,Jd,TD,_D,yo,mD,pD,DD,fl,Y3,Zd,yD,AD,Ao,RD,LD,bD,ul,J3,Vd,SD,wD,Ro,MD,PD,GD,vl,Z3,Kd,kD,BD,Lo,CD,ND,ID,El,V3,Xd,xD,OD,bo,FD,WD,YD,Tl,K3,zd,JD,ZD,So,VD,KD,XD,_l,X3,Hd,zD,HD,wo,UD,qD,QD,ml,z3,Ud,jD,$D,Mo,ey,ty,ry,pl,H3,qd,ay,ly,Po,ny,oy,iy,Dl,U3,Qd,dy,sy,Go,hy,cy,gy,yl,q3,jd,fy,uy,ko,vy,Ey,Ty,Al,Q3,$d,_y,my,Bo,py,Dy,yy,Rl,j3,es,Ay,Ry,Co,Ly,by,Sy,Ll,$3,ts,wy,My,No,Py,Gy,ky,bl,e4,Io,By,Cy,xo,Ny,Iy,xy,Sl,t4,rs,Oy,Fy,Oo,Wy,Yy,Jy,wl,r4,as,Zy,Vy,Fo,Ky,Xy,zy,Ml,a4,ls,Hy,Uy,Wo,qy,Qy,jy,Pl,l4,ns,$y,eA,Yo,tA,rA,aA,Gl,n4,os,lA,nA,Jo,oA,iA,dA,kl,o4,is,sA,hA,Zo,cA,gA,fA,Bl,i4,ds,uA,vA,Vo,EA,TA,_A,Cl,d4,ss,mA,pA,Ko,DA,yA,AA,Nl,s4,hs,RA,LA,Xo,bA,SA,wA,Il,h4,cs,MA,PA,zo,GA,kA,BA,xl,c4,gs,CA,NA,Ho,IA,xA,OA,Ol,g4,fs,FA,WA,Uo,YA,JA,ZA,Fl,f4,us,VA,KA,qo,XA,zA,HA,Wl,u4,vs,UA,qA,Qo,QA,jA,$A,Yl,v4,Es,eR,tR,jo,rR,aR,lR,la,E4,Ts,nR,oR,$o,iR,dR,T4,sR,hR,cR,Jl,_4,_s,gR,fR,ei,uR,vR,ER,Zl,m4,ms,TR,_R,ti,mR,pR,DR,Vl,p4,ps,yR,AR,ri,RR,LR,bR,Kl,D4,Ds,SR,wR,ai,MR,PR,GR,Xl,y4,ys,kR,BR,li,CR,NR,IR,zl,A4,As,xR,OR,ni,FR,WR,YR,Hl,R4,Rs,JR,ZR,oi,VR,KR,XR,Ul,L4,Ls,zR,HR,ii,UR,qR,QR,ql,b4,bs,jR,$R,di,eL,tL,rL,Ql,S4,si,aL,lL,hi,nL,oL,iL,jl,w4,ci,dL,sL,gi,hL,cL,gL,$l,M4,Ss,fL,uL,fi,vL,EL,TL,en,P4,ws,_L,mL,ui,pL,DL,yL,na,G4,Ms,AL,RL,vi,LL,bL,k4,SL,wL,ML,tn,B4,Ps,PL,GL,Ei,kL,BL,CL,oa,C4,Gs,NL,IL,Ti,xL,OL,N4,FL,WL,YL,rn,I4,ks,JL,ZL,_i,VL,KL,XL,an,x4,mi,zL,HL,pi,UL,qL,QL,ln,O4,Bs,jL,$L,Di,eb,tb,qm,fa,nn,F4,yi,rb,W4,ab,Qm,Cs,lb,jm,on,Y4,T,Ns,nb,ob,Is,ib,db,xs,sb,hb,Os,cb,gb,Fs,fb,ub,Ws,vb,Eb,g,_,Ys,Tb,_b,Js,mb,pb,Zs,Db,yb,Vs,Ab,Rb,Ks,Lb,bb,Xs,Sb,wb,m,zs,Mb,Pb,Hs,Gb,kb,Us,Bb,Cb,qs,Nb,Ib,Qs,xb,Ob,js,Fb,Wb,p,$s,Yb,Jb,eh,Zb,Vb,th,Kb,Xb,rh,zb,Hb,ah,Ub,qb,lh,Qb,jb,D,nh,$b,eS,oh,tS,rS,ih,aS,lS,dh,nS,oS,sh,iS,dS,hh,sS,hS,y,ch,cS,gS,gh,fS,uS,fh,vS,ES,uh,TS,_S,vh,mS,pS,Eh,DS,yS,A,Th,AS,RS,_h,LS,bS,mh,SS,wS,ph,MS,PS,Dh,GS,kS,yh,BS,CS,R,Ah,NS,IS,Rh,xS,OS,Lh,FS,WS,bh,YS,JS,Sh,ZS,VS,wh,KS,XS,L,Mh,zS,HS,Ph,US,qS,Gh,QS,jS,kh,$S,ew,Bh,tw,rw,Ch,aw,lw,b,Nh,nw,ow,Ih,iw,dw,xh,sw,hw,Oh,cw,gw,Fh,fw,uw,Wh,vw,Ew,S,Yh,Tw,_w,Jh,mw,pw,Zh,Dw,yw,Vh,Aw,Rw,Kh,Lw,bw,Xh,Sw,ww,w,zh,Mw,Pw,Hh,Gw,kw,Uh,Bw,Cw,qh,Nw,Iw,Qh,xw,Ow,jh,Fw,Ww,M,$h,Yw,Jw,ec,Zw,Vw,tc,Kw,Xw,rc,zw,Hw,ac,Uw,qw,lc,Qw,jw,P,nc,$w,eM,oc,tM,rM,ic,aM,lM,dc,nM,oM,sc,iM,dM,hc,sM,hM,G,cc,cM,gM,gc,fM,uM,fc,vM,EM,uc,TM,_M,vc,mM,pM,Ec,DM,yM,k,Tc,AM,RM,_c,LM,bM,mc,SM,wM,pc,MM,PM,Dc,GM,kM,yc,BM,CM,B,Ac,NM,IM,Rc,xM,OM,Lc,FM,WM,bc,YM,JM,Sc,ZM,VM,wc,KM,XM,C,Mc,zM,HM,Pc,UM,qM,Gc,QM,jM,kc,$M,eP,Bc,tP,rP,Cc,aP,lP,N,Nc,nP,oP,Ic,iP,dP,xc,sP,hP,Oc,cP,gP,Fc,fP,uP,Wc,vP,EP,I,Yc,TP,_P,Jc,mP,pP,Zc,DP,yP,Vc,AP,RP,Kc,LP,bP,Xc,SP,wP,x,zc,MP,PP,Hc,GP,kP,Uc,BP,CP,qc,NP,IP,Qc,xP,OP,jc,FP,WP,O,$c,YP,JP,eg,ZP,VP,tg,KP,XP,rg,zP,HP,ag,UP,qP,lg,QP,jP,F,ng,$P,eG,og,tG,rG,ig,aG,lG,dg,nG,oG,sg,iG,dG,hg,sG,hG,W,cg,cG,gG,gg,fG,uG,fg,vG,EG,ug,TG,_G,vg,mG,pG,Eg,DG,yG,Y,Tg,AG,RG,_g,LG,bG,mg,SG,wG,pg,MG,PG,Dg,GG,kG,yg,BG,CG,J,Ag,NG,IG,Rg,xG,OG,Lg,FG,WG,bg,YG,JG,Sg,ZG,VG,wg,KG,XG,Z,Mg,zG,HG,Pg,UG,qG,Gg,QG,jG,kg,$G,ek,Bg,tk,rk,Cg,ak,lk,V,Ng,nk,ok,Ig,ik,dk,xg,sk,hk,Og,ck,gk,Fg,fk,uk,Wg,vk,Ek,K,Yg,Tk,_k,Jg,mk,pk,Zg,Dk,yk,Vg,Ak,Rk,Kg,Lk,bk,Xg,Sk,wk,X,zg,Mk,Pk,Hg,Gk,kk,Ug,Bk,Ck,qg,Nk,Ik,Qg,xk,Ok,jg,Fk,Wk,z,$g,Yk,Jk,ef,Zk,Vk,tf,Kk,Xk,rf,zk,Hk,af,Uk,qk,lf,Qk,jk,H,nf,$k,eB,of,tB,rB,df,aB,lB,sf,nB,oB,hf,iB,dB,cf,sB,hB,U,gf,cB,gB,ff,fB,uB,uf,vB,EB,vf,TB,_B,Ef,mB,pB,Tf,DB,yB,q,_f,AB,RB,mf,LB,bB,pf,SB,wB,Df,MB,PB,yf,GB,kB,Af,BB,CB,Q,Rf,NB,IB,Lf,xB,OB,bf,FB,WB,Sf,YB,JB,wf,ZB,VB,Mf,KB,XB,j,Pf,zB,HB,Gf,UB,qB,kf,QB,jB,Bf,$B,eC,Cf,tC,rC,Nf,aC,lC,$,If,nC,oC,xf,iC,dC,Of,sC,hC,Ff,cC,gC,Wf,fC,uC,Yf,vC,EC,ee,Jf,TC,_C,Zf,mC,pC,Vf,DC,yC,Kf,AC,RC,Xf,LC,bC,zf,SC,wC,te,Hf,MC,PC,Uf,GC,kC,qf,BC,CC,Qf,NC,IC,jf,xC,OC,$f,FC,WC,re,e1,YC,JC,t1,ZC,VC,r1,KC,XC,a1,zC,HC,l1,UC,qC,n1,QC,jC,ae,o1,$C,eN,i1,tN,rN,d1,aN,lN,s1,nN,oN,h1,iN,dN,c1,sN,hN,le,g1,cN,gN,f1,fN,uN,u1,vN,EN,v1,TN,_N,E1,mN,pN,T1,DN,yN,ne,_1,AN,RN,m1,LN,bN,p1,SN,wN,D1,MN,PN,y1,GN,kN,A1,BN,CN,oe,R1,NN,IN,L1,xN,ON,b1,FN,WN,S1,YN,JN,w1,ZN,VN,M1,KN,XN,ie,P1,zN,HN,G1,UN,qN,k1,QN,jN,B1,$N,eI,C1,tI,rI,N1,aI,lI,de,I1,nI,oI,x1,iI,dI,O1,sI,hI,F1,cI,gI,W1,fI,uI,Y1,vI,EI,se,J1,TI,_I,Z1,mI,pI,V1,DI,yI,K1,AI,RI,X1,LI,bI,z1,SI,wI,he,H1,MI,PI,U1,GI,kI,q1,BI,CI,Q1,NI,II,j1,xI,OI,$1,FI,WI,ce,eu,YI,JI,tu,ZI,VI,ru,KI,XI,au,zI,HI,lu,UI,qI,nu,QI,jI,ge,ou,$I,ex,iu,tx,rx,du,ax,lx,su,nx,ox,hu,ix,dx,cu,sx,hx,fe,gu,cx,gx,fu,fx,ux,uu,vx,Ex,vu,Tx,_x,Eu,mx,px,Tu,Dx,yx,ue,_u,Ax,Rx,mu,Lx,bx,pu,Sx,wx,Du,Mx,Px,yu,Gx,kx,Au,Bx,Cx,ve,Ru,Nx,Ix,Lu,xx,Ox,bu,Fx,Wx,Su,Yx,Jx,wu,Zx,Vx,Mu,Kx,Xx,Ee,Pu,zx,Hx,Gu,Ux,qx,ku,Qx,jx,Bu,$x,eO,Cu,tO,rO,Nu,aO,lO,Te,Iu,nO,oO,xu,iO,dO,Ou,sO,hO,Fu,cO,gO,Wu,fO,uO,Yu,vO,EO,_e,Ju,TO,_O,Zu,mO,pO,Vu,DO,yO,Ku,AO,RO,Xu,LO,bO,zu,SO,wO,me,Hu,MO,PO,Uu,GO,kO,qu,BO,CO,Qu,NO,IO,ju,xO,OO,$u,FO,WO,pe,ev,YO,JO,tv,ZO,VO,rv,KO,XO,av,zO,HO,lv,UO,qO,nv,QO,jO,De,ov,$O,eF,iv,tF,rF,dv,aF,lF,sv,nF,oF,hv,iF,dF,cv,sF,hF,ye,gv,cF,gF,fv,fF,uF,uv,vF,EF,vv,TF,_F,Ev,mF,pF,Tv,DF,yF,Ae,_v,AF,RF,mv,LF,bF,pv,SF,wF,Dv,MF,PF,yv,GF,kF,Av,BF,CF,Re,Rv,NF,IF,Lv,xF,OF,bv,FF,WF,Sv,YF,JF,wv,ZF,VF,Mv,KF,XF,Le,Pv,zF,HF,Gv,UF,qF,kv,QF,jF,Bv,$F,eW,Cv,tW,rW,Nv,aW,lW,be,Iv,nW,oW,xv,iW,dW,Ov,sW,hW,Fv,cW,gW,Wv,fW,uW,Yv,vW,EW,Se,Jv,TW,_W,Zv,mW,pW,Vv,DW,yW,Kv,AW,RW,Xv,LW,bW,zv,SW,wW,we,Hv,MW,PW,Uv,GW,kW,qv,BW,CW,Qv,NW,IW,jv,xW,OW,$v,FW,WW,Me,eE,YW,JW,tE,ZW,VW,rE,KW,XW,aE,zW,HW,lE,UW,qW,nE,QW,jW,Pe,oE,$W,eY,iE,tY,rY,dE,aY,lY,sE,nY,oY,hE,iY,dY,cE,sY,hY,Ge,gE,cY,gY,fE,fY,uY,uE,vY,EY,vE,TY,_Y,EE,mY,pY,TE,DY,yY,ke,_E,AY,RY,mE,LY,bY,pE,SY,wY,DE,MY,PY,yE,GY,kY,AE,BY,CY,Be,RE,NY,IY,LE,xY,OY,bE,FY,WY,SE,YY,JY,wE,ZY,VY,ME,KY,XY,Ce,PE,zY,HY,GE,UY,qY,kE,QY,jY,BE,$Y,eJ,CE,tJ,rJ,NE,aJ,lJ,Ne,IE,nJ,oJ,xE,iJ,dJ,OE,sJ,hJ,FE,cJ,gJ,WE,fJ,uJ,YE,vJ,EJ,Ie,JE,TJ,_J,ZE,mJ,pJ,VE,DJ,yJ,KE,AJ,RJ,XE,LJ,bJ,zE,SJ,wJ,xe,HE,MJ,PJ,UE,GJ,kJ,qE,BJ,CJ,QE,NJ,IJ,jE,xJ,OJ,$E,FJ,WJ,Oe,e2,YJ,JJ,t2,ZJ,VJ,r2,KJ,XJ,a2,zJ,HJ,l2,UJ,qJ,n2,QJ,jJ,Fe,o2,$J,eZ,i2,tZ,rZ,d2,aZ,lZ,s2,nZ,oZ,h2,iZ,dZ,c2,sZ,hZ,We,g2,cZ,gZ,f2,fZ,uZ,u2,vZ,EZ,v2,TZ,_Z,E2,mZ,pZ,T2,DZ,yZ,Ye,_2,AZ,RZ,m2,LZ,bZ,p2,SZ,wZ,D2,MZ,PZ,y2,GZ,kZ,A2,BZ,CZ,Je,R2,NZ,IZ,L2,xZ,OZ,b2,FZ,WZ,S2,YZ,JZ,w2,ZZ,VZ,M2,KZ,XZ,Ze,P2,zZ,HZ,G2,UZ,qZ,k2,QZ,jZ,B2,$Z,eV,C2,tV,rV,N2,aV,lV,Ve,I2,nV,oV,x2,iV,dV,O2,sV,hV,F2,cV,gV,W2,fV,uV,Y2,vV,EV,Ke,J2,TV,_V,Z2,mV,pV,V2,DV,yV,K2,AV,RV,X2,LV,bV,z2,SV,wV,Xe,H2,MV,PV,U2,GV,kV,q2,BV,CV,Q2,NV,IV,j2,xV,OV,$2,FV,WV,ze,eT,YV,JV,tT,ZV,VV,rT,KV,XV,aT,zV,HV,lT,UV,qV,nT,QV,jV,He,oT,$V,eK,iT,tK,rK,dT,aK,lK,sT,nK,oK,hT,iK,dK,cT,sK,hK,Ue,gT,cK,gK,fT,fK,uK,uT,vK,EK,vT,TK,_K,ET,mK,pK,TT,DK,yK,qe,_T,AK,RK,mT,LK,bK,pT,SK,wK,DT,MK,PK,yT,GK,kK,AT,BK,CK,Qe,RT,NK,IK,LT,xK,OK,bT,FK,WK,ST,YK,JK,wT,ZK,VK,MT,KK,XK,je,PT,zK,HK,GT,UK,qK,kT,QK,jK,BT,$K,eX,CT,tX,rX,NT,aX,lX,$e,IT,nX,oX,xT,iX,dX,OT,sX,hX,FT,cX,gX,WT,fX,uX,YT,vX,EX,et,JT,TX,_X,ZT,mX,pX,VT,DX,yX,KT,AX,RX,XT,LX,bX,zT,SX,wX,tt,HT,MX,PX,UT,GX,kX,qT,BX,CX,QT,NX,IX,jT,xX,OX,$T,FX,WX,rt,e_,YX,JX,t_,ZX,VX,r_,KX,XX,a_,zX,HX,l_,UX,qX,n_,QX,jX,at,o_,$X,ez,i_,tz,rz,d_,az,lz,s_,nz,oz,h_,iz,dz,c_,sz,$m;return hn=new Nm({}),un=new Nm({}),vn=new Nm({}),Tn=new Nm({}),yi=new Nm({}),{c(){$r=r("meta"),Mi=d(),ot=r("h1"),Qr=r("a"),f_=r("span"),Mm(hn.$$.fragment),s0=d(),u_=r("span"),h0=n("\u{1F917} Transformers"),Im=d(),Pi=r("p"),c0=n("State-of-the-art Machine Learning for PyTorch, TensorFlow and JAX."),xm=d(),Gi=r("p"),g0=n("\u{1F917} Transformers provides APIs to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you time from training a model from scratch. The models can be used across different modalities such as:"),Om=d(),it=r("ul"),v_=r("li"),f0=n("\u{1F4DD} Text: text classification, information extraction, question answering, summarization, translation, and text generation in over 100 languages."),u0=d(),E_=r("li"),v0=n("\u{1F5BC}\uFE0F Images: image classification, object detection, and segmentation."),E0=d(),T_=r("li"),T0=n("\u{1F5E3}\uFE0F Audio: speech recognition and audio classification."),_0=d(),__=r("li"),m0=n("\u{1F419} Multimodal: table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering."),Fm=d(),dt=r("p"),p0=n("Our library supports seamless integration between three of the most popular deep learning libraries: "),cn=r("a"),D0=n("PyTorch"),y0=n(", "),gn=r("a"),A0=n("TensorFlow"),R0=n(" and "),fn=r("a"),L0=n("JAX"),b0=n(". Train your model in three lines of code in one framework, and load it for inference with another."),Wm=d(),ki=r("p"),S0=n("Each \u{1F917} Transformers architecture is defined in a standalone Python module so they can be easily customized for research and experiments."),Ym=d(),da=r("h2"),va=r("a"),m_=r("span"),Mm(un.$$.fragment),w0=d(),p_=r("span"),M0=n("If you are looking for custom support from the Hugging Face team"),Jm=d(),sa=r("a"),ea=r("img"),P0=d(),Zm=r("br"),Vm=d(),ha=r("h2"),Ea=r("a"),D_=r("span"),Mm(vn.$$.fragment),G0=d(),y_=r("span"),k0=n("Contents"),Km=d(),Bi=r("p"),B0=n("The documentation is organized in five parts:"),Xm=d(),lt=r("ul"),A_=r("li"),Ci=r("p"),R_=r("strong"),C0=n("GET STARTED"),N0=n(` contains a quick tour, the installation instructions and some useful information about our philosophy and a glossary.`),I0=d(),L_=r("li"),Ni=r("p"),b_=r("strong"),x0=n("USING \u{1F917} TRANSFORMERS"),O0=n(" contains general tutorials on how to use the library."),F0=d(),S_=r("li"),Ii=r("p"),w_=r("strong"),W0=n("ADVANCED GUIDES"),Y0=n(" contains more advanced guides that are more specific to a given script or part of the library."),J0=d(),M_=r("li"),xi=r("p"),P_=r("strong"),Z0=n("RESEARCH"),V0=n(` focuses on tutorials that have less to do with how to use the library but more about general research in transformers model`),K0=d(),En=r("li"),Oi=r("p"),G_=r("strong"),X0=n("API"),z0=n(" contains the documentation of each public class and function, grouped in:"),H0=d(),ca=r("ul"),Fi=r("li"),k_=r("strong"),U0=n("MAIN CLASSES"),q0=n(" for the main classes exposing the important APIs of the library."),Q0=d(),Wi=r("li"),B_=r("strong"),j0=n("MODELS"),$0=n(" for the classes and functions related to each model implemented in the library."),ep=d(),Yi=r("li"),C_=r("strong"),tp=n("INTERNAL HELPERS"),rp=n(" for the classes and functions we use internally."),zm=d(),Ji=r("p"),ap=n(`The library currently contains Jax, PyTorch and Tensorflow implementations, pretrained model weights, usage scripts and conversion utilities for the following models.`),Hm=d(),ga=r("h3"),Ta=r("a"),N_=r("span"),Mm(Tn.$$.fragment),lp=d(),I_=r("span"),np=n("Supported models"),Um=d(),h=r("ol"),_a=r("li"),x_=r("strong"),Zi=r("a"),op=n("ALBERT"),ip=n(" (from Google Research and the Toyota Technological Institute at Chicago) released with the paper "),_n=r("a"),dp=n("ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),sp=n(", by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."),hp=d(),ma=r("li"),O_=r("strong"),Vi=r("a"),cp=n("BART"),gp=n(" (from Facebook) released with the paper "),mn=r("a"),fp=n("BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"),up=n(" by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer."),vp=d(),pa=r("li"),F_=r("strong"),Ki=r("a"),Ep=n("BARThez"),Tp=n(" (from \xC9cole polytechnique) released with the paper "),pn=r("a"),_p=n("BARThez: a Skilled Pretrained French Sequence-to-Sequence Model"),mp=n(" by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis."),pp=d(),Da=r("li"),W_=r("strong"),Xi=r("a"),Dp=n("BARTpho"),yp=n(" (from VinAI Research) released with the paper "),Dn=r("a"),Ap=n("BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese"),Rp=n(" by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen."),Lp=d(),ya=r("li"),Y_=r("strong"),zi=r("a"),bp=n("BEiT"),Sp=n(" (from Microsoft) released with the paper "),yn=r("a"),wp=n("BEiT: BERT Pre-Training of Image Transformers"),Mp=n(" by Hangbo Bao, Li Dong, Furu Wei."),Pp=d(),Aa=r("li"),J_=r("strong"),Hi=r("a"),Gp=n("BERT"),kp=n(" (from Google) released with the paper "),An=r("a"),Bp=n("BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),Cp=n(" by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova."),Np=d(),Ra=r("li"),Z_=r("strong"),Ui=r("a"),Ip=n("BERTweet"),xp=n(" (from VinAI Research) released with the paper "),Rn=r("a"),Op=n("BERTweet: A pre-trained language model for English Tweets"),Fp=n(" by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen."),Wp=d(),La=r("li"),V_=r("strong"),qi=r("a"),Yp=n("BERT For Sequence Generation"),Jp=n(" (from Google) released with the paper "),Ln=r("a"),Zp=n("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),Vp=n(" by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),Kp=d(),ba=r("li"),K_=r("strong"),Qi=r("a"),Xp=n("BigBird-RoBERTa"),zp=n(" (from Google Research) released with the paper "),bn=r("a"),Hp=n("Big Bird: Transformers for Longer Sequences"),Up=n(" by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),qp=d(),Sa=r("li"),X_=r("strong"),ji=r("a"),Qp=n("BigBird-Pegasus"),jp=n(" (from Google Research) released with the paper "),Sn=r("a"),$p=n("Big Bird: Transformers for Longer Sequences"),e6=n(" by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),t6=d(),wa=r("li"),z_=r("strong"),$i=r("a"),r6=n("Blenderbot"),a6=n(" (from Facebook) released with the paper "),wn=r("a"),l6=n("Recipes for building an open-domain chatbot"),n6=n(" by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),o6=d(),Ma=r("li"),H_=r("strong"),ed=r("a"),i6=n("BlenderbotSmall"),d6=n(" (from Facebook) released with the paper "),Mn=r("a"),s6=n("Recipes for building an open-domain chatbot"),h6=n(" by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),c6=d(),Pa=r("li"),U_=r("strong"),td=r("a"),g6=n("BORT"),f6=n(" (from Alexa) released with the paper "),Pn=r("a"),u6=n("Optimal Subarchitecture Extraction For BERT"),v6=n(" by Adrian de Wynter and Daniel J. Perry."),E6=d(),Ga=r("li"),q_=r("strong"),rd=r("a"),T6=n("ByT5"),_6=n(" (from Google Research) released with the paper "),Gn=r("a"),m6=n("ByT5: Towards a token-free future with pre-trained byte-to-byte models"),p6=n(" by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel."),D6=d(),ta=r("li"),Q_=r("strong"),ad=r("a"),y6=n("CamemBERT"),A6=n(" (from Inria/Facebook/Sorbonne) released with the paper "),kn=r("a"),R6=n("CamemBERT: a Tasty French Language Model"),L6=n(" by Louis Martin"),j_=r("em"),b6=n(", Benjamin Muller"),S6=n(", Pedro Javier Ortiz Su\xE1rez*, Yoann Dupont, Laurent Romary, \xC9ric Villemonte de la Clergerie, Djam\xE9 Seddah and Beno\xEEt Sagot."),w6=d(),ka=r("li"),$_=r("strong"),ld=r("a"),M6=n("CANINE"),P6=n(" (from Google Research) released with the paper "),Bn=r("a"),G6=n("CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation"),k6=n(" by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting."),B6=d(),Ba=r("li"),e3=r("strong"),nd=r("a"),C6=n("ConvNeXT"),N6=n(" (from Facebook AI) released with the paper "),Cn=r("a"),I6=n("A ConvNet for the 2020s"),x6=n(" by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie."),O6=d(),Ca=r("li"),t3=r("strong"),od=r("a"),F6=n("CLIP"),W6=n(" (from OpenAI) released with the paper "),Nn=r("a"),Y6=n("Learning Transferable Visual Models From Natural Language Supervision"),J6=n(" by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever."),Z6=d(),Na=r("li"),r3=r("strong"),id=r("a"),V6=n("ConvBERT"),K6=n(" (from YituTech) released with the paper "),In=r("a"),X6=n("ConvBERT: Improving BERT with Span-based Dynamic Convolution"),z6=n(" by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan."),H6=d(),Ia=r("li"),a3=r("strong"),dd=r("a"),U6=n("CPM"),q6=n(" (from Tsinghua University) released with the paper "),xn=r("a"),Q6=n("CPM: A Large-scale Generative Chinese Pre-trained Language Model"),j6=n(" by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun."),$6=d(),ra=r("li"),l3=r("strong"),sd=r("a"),e7=n("CTRL"),t7=n(" (from Salesforce) released with the paper "),On=r("a"),r7=n("CTRL: A Conditional Transformer Language Model for Controllable Generation"),a7=n(" by Nitish Shirish Keskar"),n3=r("em"),l7=n(", Bryan McCann"),n7=n(", Lav R. Varshney, Caiming Xiong and Richard Socher."),o7=d(),xa=r("li"),o3=r("strong"),hd=r("a"),i7=n("Data2Vec"),d7=n(" (from Facebook) released with the paper "),Fn=r("a"),s7=n("Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language"),h7=n(" by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli."),c7=d(),Oa=r("li"),i3=r("strong"),cd=r("a"),g7=n("DeBERTa"),f7=n(" (from Microsoft) released with the paper "),Wn=r("a"),u7=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),v7=n(" by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),E7=d(),Fa=r("li"),d3=r("strong"),gd=r("a"),T7=n("DeBERTa-v2"),_7=n(" (from Microsoft) released with the paper "),Yn=r("a"),m7=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),p7=n(" by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),D7=d(),Wa=r("li"),s3=r("strong"),fd=r("a"),y7=n("DiT"),A7=n(" (from Microsoft Research) released with the paper "),Jn=r("a"),R7=n("DiT: Self-supervised Pre-training for Document Image Transformer"),L7=n(" by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei."),b7=d(),Ya=r("li"),h3=r("strong"),ud=r("a"),S7=n("DeiT"),w7=n(" (from Facebook) released with the paper "),Zn=r("a"),M7=n("Training data-efficient image transformers & distillation through attention"),P7=n(" by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Herv\xE9 J\xE9gou."),G7=d(),Ja=r("li"),c3=r("strong"),vd=r("a"),k7=n("DETR"),B7=n(" (from Facebook) released with the paper "),Vn=r("a"),C7=n("End-to-End Object Detection with Transformers"),N7=n(" by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko."),I7=d(),Za=r("li"),g3=r("strong"),Ed=r("a"),x7=n("DialoGPT"),O7=n(" (from Microsoft Research) released with the paper "),Kn=r("a"),F7=n("DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation"),W7=n(" by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan."),Y7=d(),nt=r("li"),f3=r("strong"),Td=r("a"),J7=n("DistilBERT"),Z7=n(" (from HuggingFace), released together with the paper "),Xn=r("a"),V7=n("DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"),K7=n(" by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into "),zn=r("a"),X7=n("DistilGPT2"),z7=n(", RoBERTa into "),Hn=r("a"),H7=n("DistilRoBERTa"),U7=n(", Multilingual BERT into "),Un=r("a"),q7=n("DistilmBERT"),Q7=n(" and a German version of DistilBERT."),j7=d(),Va=r("li"),u3=r("strong"),_d=r("a"),$7=n("DPR"),e9=n(" (from Facebook) released with the paper "),qn=r("a"),t9=n("Dense Passage Retrieval for Open-Domain Question Answering"),r9=n(" by Vladimir Karpukhin, Barlas O\u011Fuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih."),a9=d(),Ka=r("li"),v3=r("strong"),md=r("a"),l9=n("EncoderDecoder"),n9=n(" (from Google Research) released with the paper "),Qn=r("a"),o9=n("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),i9=n(" by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),d9=d(),Xa=r("li"),E3=r("strong"),pd=r("a"),s9=n("ELECTRA"),h9=n(" (from Google Research/Stanford University) released with the paper "),jn=r("a"),c9=n("ELECTRA: Pre-training text encoders as discriminators rather than generators"),g9=n(" by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning."),f9=d(),za=r("li"),T3=r("strong"),Dd=r("a"),u9=n("FlauBERT"),v9=n(" (from CNRS) released with the paper "),$n=r("a"),E9=n("FlauBERT: Unsupervised Language Model Pre-training for French"),T9=n(" by Hang Le, Lo\xEFc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Beno\xEEt Crabb\xE9, Laurent Besacier, Didier Schwab."),_9=d(),Ha=r("li"),_3=r("strong"),yd=r("a"),m9=n("FNet"),p9=n(" (from Google Research) released with the paper "),eo=r("a"),D9=n("FNet: Mixing Tokens with Fourier Transforms"),y9=n(" by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon."),A9=d(),Ua=r("li"),m3=r("strong"),Ad=r("a"),R9=n("Funnel Transformer"),L9=n(" (from CMU/Google Brain) released with the paper "),to=r("a"),b9=n("Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing"),S9=n(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),w9=d(),qa=r("li"),p3=r("strong"),Rd=r("a"),M9=n("GPT"),P9=n(" (from OpenAI) released with the paper "),ro=r("a"),G9=n("Improving Language Understanding by Generative Pre-Training"),k9=n(" by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever."),B9=d(),st=r("li"),D3=r("strong"),Ld=r("a"),C9=n("GPT-2"),N9=n(" (from OpenAI) released with the paper "),ao=r("a"),I9=n("Language Models are Unsupervised Multitask Learners"),x9=n(" by Alec Radford"),y3=r("em"),O9=n(", Jeffrey Wu"),F9=n(", Rewon Child, David Luan, Dario Amodei"),A3=r("strong"),W9=n("and Ilya Sutskever"),Y9=n("."),J9=d(),Qa=r("li"),R3=r("strong"),bd=r("a"),Z9=n("GPT-J"),V9=n(" (from EleutherAI) released in the repository "),lo=r("a"),K9=n("kingoflolz/mesh-transformer-jax"),X9=n(" by Ben Wang and Aran Komatsuzaki."),z9=d(),ja=r("li"),L3=r("strong"),Sd=r("a"),H9=n("GPT Neo"),U9=n(" (from EleutherAI) released in the repository "),no=r("a"),q9=n("EleutherAI/gpt-neo"),Q9=n(" by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy."),j9=d(),$a=r("li"),b3=r("strong"),wd=r("a"),$9=n("Hubert"),e8=n(" (from Facebook) released with the paper "),oo=r("a"),t8=n("HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),r8=n(" by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed."),a8=d(),el=r("li"),S3=r("strong"),Md=r("a"),l8=n("I-BERT"),n8=n(" (from Berkeley) released with the paper "),io=r("a"),o8=n("I-BERT: Integer-only BERT Quantization"),i8=n(" by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer."),d8=d(),tl=r("li"),w3=r("strong"),Pd=r("a"),s8=n("ImageGPT"),h8=n(" (from OpenAI) released with the paper "),so=r("a"),c8=n("Generative Pretraining from Pixels"),g8=n(" by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever."),f8=d(),rl=r("li"),M3=r("strong"),Gd=r("a"),u8=n("LayoutLM"),v8=n(" (from Microsoft Research Asia) released with the paper "),ho=r("a"),E8=n("LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),T8=n(" by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou."),_8=d(),al=r("li"),P3=r("strong"),kd=r("a"),m8=n("LayoutLMv2"),p8=n(" (from Microsoft Research Asia) released with the paper "),co=r("a"),D8=n("LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding"),y8=n(" by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou."),A8=d(),ll=r("li"),G3=r("strong"),Bd=r("a"),R8=n("LayoutXLM"),L8=n(" (from Microsoft Research Asia) released with the paper "),go=r("a"),b8=n("LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding"),S8=n(" by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei."),w8=d(),nl=r("li"),k3=r("strong"),Cd=r("a"),M8=n("LED"),P8=n(" (from AllenAI) released with the paper "),fo=r("a"),G8=n("Longformer: The Long-Document Transformer"),k8=n(" by Iz Beltagy, Matthew E. Peters, Arman Cohan."),B8=d(),ol=r("li"),B3=r("strong"),Nd=r("a"),C8=n("Longformer"),N8=n(" (from AllenAI) released with the paper "),uo=r("a"),I8=n("Longformer: The Long-Document Transformer"),x8=n(" by Iz Beltagy, Matthew E. Peters, Arman Cohan."),O8=d(),il=r("li"),C3=r("strong"),Id=r("a"),F8=n("LUKE"),W8=n(" (from Studio Ousia) released with the paper "),vo=r("a"),Y8=n("LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),J8=n(" by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto."),Z8=d(),dl=r("li"),N3=r("strong"),xd=r("a"),V8=n("mLUKE"),K8=n(" (from Studio Ousia) released with the paper "),Eo=r("a"),X8=n("mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),z8=n(" by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka."),H8=d(),sl=r("li"),I3=r("strong"),Od=r("a"),U8=n("LXMERT"),q8=n(" (from UNC Chapel Hill) released with the paper "),To=r("a"),Q8=n("LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering"),j8=n(" by Hao Tan and Mohit Bansal."),$8=d(),hl=r("li"),x3=r("strong"),Fd=r("a"),eD=n("M2M100"),tD=n(" (from Facebook) released with the paper "),_o=r("a"),rD=n("Beyond English-Centric Multilingual Machine Translation"),aD=n(" by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin."),lD=d(),aa=r("li"),O3=r("strong"),Wd=r("a"),nD=n("MarianMT"),oD=n(" Machine translation models trained using "),mo=r("a"),iD=n("OPUS"),dD=n(" data by J\xF6rg Tiedemann. The "),po=r("a"),sD=n("Marian Framework"),hD=n(" is being developed by the Microsoft Translator Team."),cD=d(),cl=r("li"),F3=r("strong"),Yd=r("a"),gD=n("MaskFormer"),fD=n(" (from Meta and UIUC) released with the paper "),Do=r("a"),uD=n("Per-Pixel Classification is Not All You Need for Semantic Segmentation"),vD=n(" by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov."),ED=d(),gl=r("li"),W3=r("strong"),Jd=r("a"),TD=n("MBart"),_D=n(" (from Facebook) released with the paper "),yo=r("a"),mD=n("Multilingual Denoising Pre-training for Neural Machine Translation"),pD=n(" by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer."),DD=d(),fl=r("li"),Y3=r("strong"),Zd=r("a"),yD=n("MBart-50"),AD=n(" (from Facebook) released with the paper "),Ao=r("a"),RD=n("Multilingual Translation with Extensible Multilingual Pretraining and Finetuning"),LD=n(" by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan."),bD=d(),ul=r("li"),J3=r("strong"),Vd=r("a"),SD=n("Megatron-BERT"),wD=n(" (from NVIDIA) released with the paper "),Ro=r("a"),MD=n("Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),PD=n(" by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),GD=d(),vl=r("li"),Z3=r("strong"),Kd=r("a"),kD=n("Megatron-GPT2"),BD=n(" (from NVIDIA) released with the paper "),Lo=r("a"),CD=n("Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),ND=n(" by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),ID=d(),El=r("li"),V3=r("strong"),Xd=r("a"),xD=n("MPNet"),OD=n(" (from Microsoft Research) released with the paper "),bo=r("a"),FD=n("MPNet: Masked and Permuted Pre-training for Language Understanding"),WD=n(" by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu."),YD=d(),Tl=r("li"),K3=r("strong"),zd=r("a"),JD=n("MT5"),ZD=n(" (from Google AI) released with the paper "),So=r("a"),VD=n("mT5: A massively multilingual pre-trained text-to-text transformer"),KD=n(" by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel."),XD=d(),_l=r("li"),X3=r("strong"),Hd=r("a"),zD=n("Nystr\xF6mformer"),HD=n(" (from the University of Wisconsin - Madison) released with the paper "),wo=r("a"),UD=n("Nystr\xF6mformer: A Nystr\xF6m-Based Algorithm for Approximating Self-Attention"),qD=n(" by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh."),QD=d(),ml=r("li"),z3=r("strong"),Ud=r("a"),jD=n("Pegasus"),$D=n(" (from Google) released with the paper "),Mo=r("a"),ey=n("PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization"),ty=n(" by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu."),ry=d(),pl=r("li"),H3=r("strong"),qd=r("a"),ay=n("Perceiver IO"),ly=n(" (from Deepmind) released with the paper "),Po=r("a"),ny=n("Perceiver IO: A General Architecture for Structured Inputs & Outputs"),oy=n(" by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier H\xE9naff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, Jo\xE3o Carreira."),iy=d(),Dl=r("li"),U3=r("strong"),Qd=r("a"),dy=n("PhoBERT"),sy=n(" (from VinAI Research) released with the paper "),Go=r("a"),hy=n("PhoBERT: Pre-trained language models for Vietnamese"),cy=n(" by Dat Quoc Nguyen and Anh Tuan Nguyen."),gy=d(),yl=r("li"),q3=r("strong"),jd=r("a"),fy=n("PLBart"),uy=n(" (from UCLA NLP) released with the paper "),ko=r("a"),vy=n("Unified Pre-training for Program Understanding and Generation"),Ey=n(" by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang."),Ty=d(),Al=r("li"),Q3=r("strong"),$d=r("a"),_y=n("PoolFormer"),my=n(" (from Sea AI Labs) released with the paper "),Bo=r("a"),py=n("MetaFormer is Actually What You Need for Vision"),Dy=n(" by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng."),yy=d(),Rl=r("li"),j3=r("strong"),es=r("a"),Ay=n("ProphetNet"),Ry=n(" (from Microsoft Research) released with the paper "),Co=r("a"),Ly=n("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),by=n(" by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),Sy=d(),Ll=r("li"),$3=r("strong"),ts=r("a"),wy=n("QDQBert"),My=n(" (from NVIDIA) released with the paper "),No=r("a"),Py=n("Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation"),Gy=n(" by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius."),ky=d(),bl=r("li"),e4=r("strong"),Io=r("a"),By=n("REALM"),Cy=n(" (from Google Research) released with the paper "),xo=r("a"),Ny=n("REALM: Retrieval-Augmented Language Model Pre-Training"),Iy=n(" by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang."),xy=d(),Sl=r("li"),t4=r("strong"),rs=r("a"),Oy=n("Reformer"),Fy=n(" (from Google Research) released with the paper "),Oo=r("a"),Wy=n("Reformer: The Efficient Transformer"),Yy=n(" by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya."),Jy=d(),wl=r("li"),r4=r("strong"),as=r("a"),Zy=n("RemBERT"),Vy=n(" (from Google Research) released with the paper "),Fo=r("a"),Ky=n("Rethinking embedding coupling in pre-trained language models"),Xy=n(" by Hyung Won Chung, Thibault F\xE9vry, Henry Tsai, M. Johnson, Sebastian Ruder."),zy=d(),Ml=r("li"),a4=r("strong"),ls=r("a"),Hy=n("RoBERTa"),Uy=n(" (from Facebook), released together with the paper "),Wo=r("a"),qy=n("RoBERTa: A Robustly Optimized BERT Pretraining Approach"),Qy=n(" by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov."),jy=d(),Pl=r("li"),l4=r("strong"),ns=r("a"),$y=n("RoFormer"),eA=n(" (from ZhuiyiTechnology), released together with the paper "),Yo=r("a"),tA=n("RoFormer: Enhanced Transformer with Rotary Position Embedding"),rA=n(" by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu."),aA=d(),Gl=r("li"),n4=r("strong"),os=r("a"),lA=n("SegFormer"),nA=n(" (from NVIDIA) released with the paper "),Jo=r("a"),oA=n("SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),iA=n(" by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo."),dA=d(),kl=r("li"),o4=r("strong"),is=r("a"),sA=n("SEW"),hA=n(" (from ASAPP) released with the paper "),Zo=r("a"),cA=n("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),gA=n(" by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),fA=d(),Bl=r("li"),i4=r("strong"),ds=r("a"),uA=n("SEW-D"),vA=n(" (from ASAPP) released with the paper "),Vo=r("a"),EA=n("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),TA=n(" by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),_A=d(),Cl=r("li"),d4=r("strong"),ss=r("a"),mA=n("SpeechToTextTransformer"),pA=n(" (from Facebook), released together with the paper "),Ko=r("a"),DA=n("fairseq S2T: Fast Speech-to-Text Modeling with fairseq"),yA=n(" by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino."),AA=d(),Nl=r("li"),s4=r("strong"),hs=r("a"),RA=n("SpeechToTextTransformer2"),LA=n(" (from Facebook), released together with the paper "),Xo=r("a"),bA=n("Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),SA=n(" by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau."),wA=d(),Il=r("li"),h4=r("strong"),cs=r("a"),MA=n("Splinter"),PA=n(" (from Tel Aviv University), released together with the paper "),zo=r("a"),GA=n("Few-Shot Question Answering by Pretraining Span Selection"),kA=n(" by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy."),BA=d(),xl=r("li"),c4=r("strong"),gs=r("a"),CA=n("SqueezeBert"),NA=n(" (from Berkeley) released with the paper "),Ho=r("a"),IA=n("SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),xA=n(" by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer."),OA=d(),Ol=r("li"),g4=r("strong"),fs=r("a"),FA=n("Swin Transformer"),WA=n(" (from Microsoft) released with the paper "),Uo=r("a"),YA=n("Swin Transformer: Hierarchical Vision Transformer using Shifted Windows"),JA=n(" by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo."),ZA=d(),Fl=r("li"),f4=r("strong"),us=r("a"),VA=n("T5"),KA=n(" (from Google AI) released with the paper "),qo=r("a"),XA=n("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),zA=n(" by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),HA=d(),Wl=r("li"),u4=r("strong"),vs=r("a"),UA=n("T5v1.1"),qA=n(" (from Google AI) released in the repository "),Qo=r("a"),QA=n("google-research/text-to-text-transfer-transformer"),jA=n(" by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),$A=d(),Yl=r("li"),v4=r("strong"),Es=r("a"),eR=n("TAPAS"),tR=n(" (from Google AI) released with the paper "),jo=r("a"),rR=n("TAPAS: Weakly Supervised Table Parsing via Pre-training"),aR=n(" by Jonathan Herzig, Pawe\u0142 Krzysztof Nowak, Thomas M\xFCller, Francesco Piccinno and Julian Martin Eisenschlos."),lR=d(),la=r("li"),E4=r("strong"),Ts=r("a"),nR=n("Transformer-XL"),oR=n(" (from Google/CMU) released with the paper "),$o=r("a"),iR=n("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),dR=n(" by Zihang Dai"),T4=r("em"),sR=n(", Zhilin Yang"),hR=n(", Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov."),cR=d(),Jl=r("li"),_4=r("strong"),_s=r("a"),gR=n("TrOCR"),fR=n(" (from Microsoft), released together with the paper "),ei=r("a"),uR=n("TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),vR=n(" by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei."),ER=d(),Zl=r("li"),m4=r("strong"),ms=r("a"),TR=n("UniSpeech"),_R=n(" (from Microsoft Research) released with the paper "),ti=r("a"),mR=n("UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),pR=n(" by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang."),DR=d(),Vl=r("li"),p4=r("strong"),ps=r("a"),yR=n("UniSpeechSat"),AR=n(" (from Microsoft Research) released with the paper "),ri=r("a"),RR=n("UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING"),LR=n(" by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu."),bR=d(),Kl=r("li"),D4=r("strong"),Ds=r("a"),SR=n("ViLT"),wR=n(" (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper "),ai=r("a"),MR=n("ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision"),PR=n(" by Wonjae Kim, Bokyung Son, Ildoo Kim."),GR=d(),Xl=r("li"),y4=r("strong"),ys=r("a"),kR=n("Vision Transformer (ViT)"),BR=n(" (from Google AI) released with the paper "),li=r("a"),CR=n("An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale"),NR=n(" by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby."),IR=d(),zl=r("li"),A4=r("strong"),As=r("a"),xR=n("ViTMAE"),OR=n(" (from Meta AI) released with the paper "),ni=r("a"),FR=n("Masked Autoencoders Are Scalable Vision Learners"),WR=n(" by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll\xE1r, Ross Girshick."),YR=d(),Hl=r("li"),R4=r("strong"),Rs=r("a"),JR=n("VisualBERT"),ZR=n(" (from UCLA NLP) released with the paper "),oi=r("a"),VR=n("VisualBERT: A Simple and Performant Baseline for Vision and Language"),KR=n(" by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang."),XR=d(),Ul=r("li"),L4=r("strong"),Ls=r("a"),zR=n("WavLM"),HR=n(" (from Microsoft Research) released with the paper "),ii=r("a"),UR=n("WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing"),qR=n(" by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei."),QR=d(),ql=r("li"),b4=r("strong"),bs=r("a"),jR=n("Wav2Vec2"),$R=n(" (from Facebook AI) released with the paper "),di=r("a"),eL=n("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),tL=n(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),rL=d(),Ql=r("li"),S4=r("strong"),si=r("a"),aL=n("Wav2Vec2Phoneme"),lL=n(" (from Facebook AI) released with the paper "),hi=r("a"),nL=n("Simple and Effective Zero-shot Cross-lingual Phoneme Recognition"),oL=n(" by Qiantong Xu, Alexei Baevski, Michael Auli."),iL=d(),jl=r("li"),w4=r("strong"),ci=r("a"),dL=n("XGLM"),sL=n(" (From Facebook AI) released with the paper "),gi=r("a"),hL=n("Few-shot Learning with Multilingual Language Models"),cL=n(" by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O\u2019Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li."),gL=d(),$l=r("li"),M4=r("strong"),Ss=r("a"),fL=n("XLM"),uL=n(" (from Facebook) released together with the paper "),fi=r("a"),vL=n("Cross-lingual Language Model Pretraining"),EL=n(" by Guillaume Lample and Alexis Conneau."),TL=d(),en=r("li"),P4=r("strong"),ws=r("a"),_L=n("XLM-ProphetNet"),mL=n(" (from Microsoft Research) released with the paper "),ui=r("a"),pL=n("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),DL=n(" by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),yL=d(),na=r("li"),G4=r("strong"),Ms=r("a"),AL=n("XLM-RoBERTa"),RL=n(" (from Facebook AI), released together with the paper "),vi=r("a"),LL=n("Unsupervised Cross-lingual Representation Learning at Scale"),bL=n(" by Alexis Conneau"),k4=r("em"),SL=n(", Kartikay Khandelwal"),wL=n(", Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\xE1n, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov."),ML=d(),tn=r("li"),B4=r("strong"),Ps=r("a"),PL=n("XLM-RoBERTa-XL"),GL=n(" (from Facebook AI), released together with the paper "),Ei=r("a"),kL=n("Larger-Scale Transformers for Multilingual Masked Language Modeling"),BL=n(" by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau."),CL=d(),oa=r("li"),C4=r("strong"),Gs=r("a"),NL=n("XLNet"),IL=n(" (from Google/CMU) released with the paper "),Ti=r("a"),xL=n("\u200BXLNet: Generalized Autoregressive Pretraining for Language Understanding"),OL=n(" by Zhilin Yang"),N4=r("em"),FL=n(", Zihang Dai"),WL=n(", Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le."),YL=d(),rn=r("li"),I4=r("strong"),ks=r("a"),JL=n("XLSR-Wav2Vec2"),ZL=n(" (from Facebook AI) released with the paper "),_i=r("a"),VL=n("Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),KL=n(" by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli."),XL=d(),an=r("li"),x4=r("strong"),mi=r("a"),zL=n("XLS-R"),HL=n(" (from Facebook AI) released with the paper "),pi=r("a"),UL=n("XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale"),qL=n(" by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli."),QL=d(),ln=r("li"),O4=r("strong"),Bs=r("a"),jL=n("YOSO"),$L=n(" (from the University of Wisconsin - Madison) released with the paper "),Di=r("a"),eb=n("You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling"),tb=n(" by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh."),qm=d(),fa=r("h3"),nn=r("a"),F4=r("span"),Mm(yi.$$.fragment),rb=d(),W4=r("span"),ab=n("Supported frameworks"),Qm=d(),Cs=r("p"),lb=n(`The table below represents the current support in the library for each of those models, whether they have a Python tokenizer (called \u201Cslow\u201D). A \u201Cfast\u201D tokenizer backed by the \u{1F917} Tokenizers library, whether they have support in Jax (via Flax), PyTorch, and/or TensorFlow.`),jm=d(),on=r("table"),Y4=r("thead"),T=r("tr"),Ns=r("th"),nb=n("Model"),ob=d(),Is=r("th"),ib=n("Tokenizer slow"),db=d(),xs=r("th"),sb=n("Tokenizer fast"),hb=d(),Os=r("th"),cb=n("PyTorch support"),gb=d(),Fs=r("th"),fb=n("TensorFlow support"),ub=d(),Ws=r("th"),vb=n("Flax Support"),Eb=d(),g=r("tbody"),_=r("tr"),Ys=r("td"),Tb=n("ALBERT"),_b=d(),Js=r("td"),mb=n("\u2705"),pb=d(),Zs=r("td"),Db=n("\u2705"),yb=d(),Vs=r("td"),Ab=n("\u2705"),Rb=d(),Ks=r("td"),Lb=n("\u2705"),bb=d(),Xs=r("td"),Sb=n("\u2705"),wb=d(),m=r("tr"),zs=r("td"),Mb=n("BART"),Pb=d(),Hs=r("td"),Gb=n("\u2705"),kb=d(),Us=r("td"),Bb=n("\u2705"),Cb=d(),qs=r("td"),Nb=n("\u2705"),Ib=d(),Qs=r("td"),xb=n("\u2705"),Ob=d(),js=r("td"),Fb=n("\u2705"),Wb=d(),p=r("tr"),$s=r("td"),Yb=n("BEiT"),Jb=d(),eh=r("td"),Zb=n("\u274C"),Vb=d(),th=r("td"),Kb=n("\u274C"),Xb=d(),rh=r("td"),zb=n("\u2705"),Hb=d(),ah=r("td"),Ub=n("\u274C"),qb=d(),lh=r("td"),Qb=n("\u2705"),jb=d(),D=r("tr"),nh=r("td"),$b=n("BERT"),eS=d(),oh=r("td"),tS=n("\u2705"),rS=d(),ih=r("td"),aS=n("\u2705"),lS=d(),dh=r("td"),nS=n("\u2705"),oS=d(),sh=r("td"),iS=n("\u2705"),dS=d(),hh=r("td"),sS=n("\u2705"),hS=d(),y=r("tr"),ch=r("td"),cS=n("Bert Generation"),gS=d(),gh=r("td"),fS=n("\u2705"),uS=d(),fh=r("td"),vS=n("\u274C"),ES=d(),uh=r("td"),TS=n("\u2705"),_S=d(),vh=r("td"),mS=n("\u274C"),pS=d(),Eh=r("td"),DS=n("\u274C"),yS=d(),A=r("tr"),Th=r("td"),AS=n("BigBird"),RS=d(),_h=r("td"),LS=n("\u2705"),bS=d(),mh=r("td"),SS=n("\u2705"),wS=d(),ph=r("td"),MS=n("\u2705"),PS=d(),Dh=r("td"),GS=n("\u274C"),kS=d(),yh=r("td"),BS=n("\u2705"),CS=d(),R=r("tr"),Ah=r("td"),NS=n("BigBirdPegasus"),IS=d(),Rh=r("td"),xS=n("\u274C"),OS=d(),Lh=r("td"),FS=n("\u274C"),WS=d(),bh=r("td"),YS=n("\u2705"),JS=d(),Sh=r("td"),ZS=n("\u274C"),VS=d(),wh=r("td"),KS=n("\u274C"),XS=d(),L=r("tr"),Mh=r("td"),zS=n("Blenderbot"),HS=d(),Ph=r("td"),US=n("\u2705"),qS=d(),Gh=r("td"),QS=n("\u2705"),jS=d(),kh=r("td"),$S=n("\u2705"),ew=d(),Bh=r("td"),tw=n("\u2705"),rw=d(),Ch=r("td"),aw=n("\u2705"),lw=d(),b=r("tr"),Nh=r("td"),nw=n("BlenderbotSmall"),ow=d(),Ih=r("td"),iw=n("\u2705"),dw=d(),xh=r("td"),sw=n("\u2705"),hw=d(),Oh=r("td"),cw=n("\u2705"),gw=d(),Fh=r("td"),fw=n("\u2705"),uw=d(),Wh=r("td"),vw=n("\u2705"),Ew=d(),S=r("tr"),Yh=r("td"),Tw=n("CamemBERT"),_w=d(),Jh=r("td"),mw=n("\u2705"),pw=d(),Zh=r("td"),Dw=n("\u2705"),yw=d(),Vh=r("td"),Aw=n("\u2705"),Rw=d(),Kh=r("td"),Lw=n("\u2705"),bw=d(),Xh=r("td"),Sw=n("\u274C"),ww=d(),w=r("tr"),zh=r("td"),Mw=n("Canine"),Pw=d(),Hh=r("td"),Gw=n("\u2705"),kw=d(),Uh=r("td"),Bw=n("\u274C"),Cw=d(),qh=r("td"),Nw=n("\u2705"),Iw=d(),Qh=r("td"),xw=n("\u274C"),Ow=d(),jh=r("td"),Fw=n("\u274C"),Ww=d(),M=r("tr"),$h=r("td"),Yw=n("CLIP"),Jw=d(),ec=r("td"),Zw=n("\u2705"),Vw=d(),tc=r("td"),Kw=n("\u2705"),Xw=d(),rc=r("td"),zw=n("\u2705"),Hw=d(),ac=r("td"),Uw=n("\u2705"),qw=d(),lc=r("td"),Qw=n("\u2705"),jw=d(),P=r("tr"),nc=r("td"),$w=n("ConvBERT"),eM=d(),oc=r("td"),tM=n("\u2705"),rM=d(),ic=r("td"),aM=n("\u2705"),lM=d(),dc=r("td"),nM=n("\u2705"),oM=d(),sc=r("td"),iM=n("\u2705"),dM=d(),hc=r("td"),sM=n("\u274C"),hM=d(),G=r("tr"),cc=r("td"),cM=n("ConvNext"),gM=d(),gc=r("td"),fM=n("\u274C"),uM=d(),fc=r("td"),vM=n("\u274C"),EM=d(),uc=r("td"),TM=n("\u2705"),_M=d(),vc=r("td"),mM=n("\u2705"),pM=d(),Ec=r("td"),DM=n("\u274C"),yM=d(),k=r("tr"),Tc=r("td"),AM=n("CTRL"),RM=d(),_c=r("td"),LM=n("\u2705"),bM=d(),mc=r("td"),SM=n("\u274C"),wM=d(),pc=r("td"),MM=n("\u2705"),PM=d(),Dc=r("td"),GM=n("\u2705"),kM=d(),yc=r("td"),BM=n("\u274C"),CM=d(),B=r("tr"),Ac=r("td"),NM=n("Data2VecAudio"),IM=d(),Rc=r("td"),xM=n("\u274C"),OM=d(),Lc=r("td"),FM=n("\u274C"),WM=d(),bc=r("td"),YM=n("\u2705"),JM=d(),Sc=r("td"),ZM=n("\u274C"),VM=d(),wc=r("td"),KM=n("\u274C"),XM=d(),C=r("tr"),Mc=r("td"),zM=n("Data2VecText"),HM=d(),Pc=r("td"),UM=n("\u274C"),qM=d(),Gc=r("td"),QM=n("\u274C"),jM=d(),kc=r("td"),$M=n("\u2705"),eP=d(),Bc=r("td"),tP=n("\u274C"),rP=d(),Cc=r("td"),aP=n("\u274C"),lP=d(),N=r("tr"),Nc=r("td"),nP=n("DeBERTa"),oP=d(),Ic=r("td"),iP=n("\u2705"),dP=d(),xc=r("td"),sP=n("\u2705"),hP=d(),Oc=r("td"),cP=n("\u2705"),gP=d(),Fc=r("td"),fP=n("\u2705"),uP=d(),Wc=r("td"),vP=n("\u274C"),EP=d(),I=r("tr"),Yc=r("td"),TP=n("DeBERTa-v2"),_P=d(),Jc=r("td"),mP=n("\u2705"),pP=d(),Zc=r("td"),DP=n("\u274C"),yP=d(),Vc=r("td"),AP=n("\u2705"),RP=d(),Kc=r("td"),LP=n("\u2705"),bP=d(),Xc=r("td"),SP=n("\u274C"),wP=d(),x=r("tr"),zc=r("td"),MP=n("DeiT"),PP=d(),Hc=r("td"),GP=n("\u274C"),kP=d(),Uc=r("td"),BP=n("\u274C"),CP=d(),qc=r("td"),NP=n("\u2705"),IP=d(),Qc=r("td"),xP=n("\u274C"),OP=d(),jc=r("td"),FP=n("\u274C"),WP=d(),O=r("tr"),$c=r("td"),YP=n("DETR"),JP=d(),eg=r("td"),ZP=n("\u274C"),VP=d(),tg=r("td"),KP=n("\u274C"),XP=d(),rg=r("td"),zP=n("\u2705"),HP=d(),ag=r("td"),UP=n("\u274C"),qP=d(),lg=r("td"),QP=n("\u274C"),jP=d(),F=r("tr"),ng=r("td"),$P=n("DistilBERT"),eG=d(),og=r("td"),tG=n("\u2705"),rG=d(),ig=r("td"),aG=n("\u2705"),lG=d(),dg=r("td"),nG=n("\u2705"),oG=d(),sg=r("td"),iG=n("\u2705"),dG=d(),hg=r("td"),sG=n("\u2705"),hG=d(),W=r("tr"),cg=r("td"),cG=n("DPR"),gG=d(),gg=r("td"),fG=n("\u2705"),uG=d(),fg=r("td"),vG=n("\u2705"),EG=d(),ug=r("td"),TG=n("\u2705"),_G=d(),vg=r("td"),mG=n("\u2705"),pG=d(),Eg=r("td"),DG=n("\u274C"),yG=d(),Y=r("tr"),Tg=r("td"),AG=n("ELECTRA"),RG=d(),_g=r("td"),LG=n("\u2705"),bG=d(),mg=r("td"),SG=n("\u2705"),wG=d(),pg=r("td"),MG=n("\u2705"),PG=d(),Dg=r("td"),GG=n("\u2705"),kG=d(),yg=r("td"),BG=n("\u2705"),CG=d(),J=r("tr"),Ag=r("td"),NG=n("Encoder decoder"),IG=d(),Rg=r("td"),xG=n("\u274C"),OG=d(),Lg=r("td"),FG=n("\u274C"),WG=d(),bg=r("td"),YG=n("\u2705"),JG=d(),Sg=r("td"),ZG=n("\u2705"),VG=d(),wg=r("td"),KG=n("\u2705"),XG=d(),Z=r("tr"),Mg=r("td"),zG=n("FairSeq Machine-Translation"),HG=d(),Pg=r("td"),UG=n("\u2705"),qG=d(),Gg=r("td"),QG=n("\u274C"),jG=d(),kg=r("td"),$G=n("\u2705"),ek=d(),Bg=r("td"),tk=n("\u274C"),rk=d(),Cg=r("td"),ak=n("\u274C"),lk=d(),V=r("tr"),Ng=r("td"),nk=n("FlauBERT"),ok=d(),Ig=r("td"),ik=n("\u2705"),dk=d(),xg=r("td"),sk=n("\u274C"),hk=d(),Og=r("td"),ck=n("\u2705"),gk=d(),Fg=r("td"),fk=n("\u2705"),uk=d(),Wg=r("td"),vk=n("\u274C"),Ek=d(),K=r("tr"),Yg=r("td"),Tk=n("FNet"),_k=d(),Jg=r("td"),mk=n("\u2705"),pk=d(),Zg=r("td"),Dk=n("\u2705"),yk=d(),Vg=r("td"),Ak=n("\u2705"),Rk=d(),Kg=r("td"),Lk=n("\u274C"),bk=d(),Xg=r("td"),Sk=n("\u274C"),wk=d(),X=r("tr"),zg=r("td"),Mk=n("Funnel Transformer"),Pk=d(),Hg=r("td"),Gk=n("\u2705"),kk=d(),Ug=r("td"),Bk=n("\u2705"),Ck=d(),qg=r("td"),Nk=n("\u2705"),Ik=d(),Qg=r("td"),xk=n("\u2705"),Ok=d(),jg=r("td"),Fk=n("\u274C"),Wk=d(),z=r("tr"),$g=r("td"),Yk=n("GPT Neo"),Jk=d(),ef=r("td"),Zk=n("\u274C"),Vk=d(),tf=r("td"),Kk=n("\u274C"),Xk=d(),rf=r("td"),zk=n("\u2705"),Hk=d(),af=r("td"),Uk=n("\u274C"),qk=d(),lf=r("td"),Qk=n("\u2705"),jk=d(),H=r("tr"),nf=r("td"),$k=n("GPT-J"),eB=d(),of=r("td"),tB=n("\u274C"),rB=d(),df=r("td"),aB=n("\u274C"),lB=d(),sf=r("td"),nB=n("\u2705"),oB=d(),hf=r("td"),iB=n("\u274C"),dB=d(),cf=r("td"),sB=n("\u2705"),hB=d(),U=r("tr"),gf=r("td"),cB=n("Hubert"),gB=d(),ff=r("td"),fB=n("\u274C"),uB=d(),uf=r("td"),vB=n("\u274C"),EB=d(),vf=r("td"),TB=n("\u2705"),_B=d(),Ef=r("td"),mB=n("\u2705"),pB=d(),Tf=r("td"),DB=n("\u274C"),yB=d(),q=r("tr"),_f=r("td"),AB=n("I-BERT"),RB=d(),mf=r("td"),LB=n("\u274C"),bB=d(),pf=r("td"),SB=n("\u274C"),wB=d(),Df=r("td"),MB=n("\u2705"),PB=d(),yf=r("td"),GB=n("\u274C"),kB=d(),Af=r("td"),BB=n("\u274C"),CB=d(),Q=r("tr"),Rf=r("td"),NB=n("ImageGPT"),IB=d(),Lf=r("td"),xB=n("\u274C"),OB=d(),bf=r("td"),FB=n("\u274C"),WB=d(),Sf=r("td"),YB=n("\u2705"),JB=d(),wf=r("td"),ZB=n("\u274C"),VB=d(),Mf=r("td"),KB=n("\u274C"),XB=d(),j=r("tr"),Pf=r("td"),zB=n("LayoutLM"),HB=d(),Gf=r("td"),UB=n("\u2705"),qB=d(),kf=r("td"),QB=n("\u2705"),jB=d(),Bf=r("td"),$B=n("\u2705"),eC=d(),Cf=r("td"),tC=n("\u2705"),rC=d(),Nf=r("td"),aC=n("\u274C"),lC=d(),$=r("tr"),If=r("td"),nC=n("LayoutLMv2"),oC=d(),xf=r("td"),iC=n("\u2705"),dC=d(),Of=r("td"),sC=n("\u2705"),hC=d(),Ff=r("td"),cC=n("\u2705"),gC=d(),Wf=r("td"),fC=n("\u274C"),uC=d(),Yf=r("td"),vC=n("\u274C"),EC=d(),ee=r("tr"),Jf=r("td"),TC=n("LED"),_C=d(),Zf=r("td"),mC=n("\u2705"),pC=d(),Vf=r("td"),DC=n("\u2705"),yC=d(),Kf=r("td"),AC=n("\u2705"),RC=d(),Xf=r("td"),LC=n("\u2705"),bC=d(),zf=r("td"),SC=n("\u274C"),wC=d(),te=r("tr"),Hf=r("td"),MC=n("Longformer"),PC=d(),Uf=r("td"),GC=n("\u2705"),kC=d(),qf=r("td"),BC=n("\u2705"),CC=d(),Qf=r("td"),NC=n("\u2705"),IC=d(),jf=r("td"),xC=n("\u2705"),OC=d(),$f=r("td"),FC=n("\u274C"),WC=d(),re=r("tr"),e1=r("td"),YC=n("LUKE"),JC=d(),t1=r("td"),ZC=n("\u2705"),VC=d(),r1=r("td"),KC=n("\u274C"),XC=d(),a1=r("td"),zC=n("\u2705"),HC=d(),l1=r("td"),UC=n("\u274C"),qC=d(),n1=r("td"),QC=n("\u274C"),jC=d(),ae=r("tr"),o1=r("td"),$C=n("LXMERT"),eN=d(),i1=r("td"),tN=n("\u2705"),rN=d(),d1=r("td"),aN=n("\u2705"),lN=d(),s1=r("td"),nN=n("\u2705"),oN=d(),h1=r("td"),iN=n("\u2705"),dN=d(),c1=r("td"),sN=n("\u274C"),hN=d(),le=r("tr"),g1=r("td"),cN=n("M2M100"),gN=d(),f1=r("td"),fN=n("\u2705"),uN=d(),u1=r("td"),vN=n("\u274C"),EN=d(),v1=r("td"),TN=n("\u2705"),_N=d(),E1=r("td"),mN=n("\u274C"),pN=d(),T1=r("td"),DN=n("\u274C"),yN=d(),ne=r("tr"),_1=r("td"),AN=n("Marian"),RN=d(),m1=r("td"),LN=n("\u2705"),bN=d(),p1=r("td"),SN=n("\u274C"),wN=d(),D1=r("td"),MN=n("\u2705"),PN=d(),y1=r("td"),GN=n("\u2705"),kN=d(),A1=r("td"),BN=n("\u2705"),CN=d(),oe=r("tr"),R1=r("td"),NN=n("MaskFormer"),IN=d(),L1=r("td"),xN=n("\u274C"),ON=d(),b1=r("td"),FN=n("\u274C"),WN=d(),S1=r("td"),YN=n("\u2705"),JN=d(),w1=r("td"),ZN=n("\u274C"),VN=d(),M1=r("td"),KN=n("\u274C"),XN=d(),ie=r("tr"),P1=r("td"),zN=n("mBART"),HN=d(),G1=r("td"),UN=n("\u2705"),qN=d(),k1=r("td"),QN=n("\u2705"),jN=d(),B1=r("td"),$N=n("\u2705"),eI=d(),C1=r("td"),tI=n("\u2705"),rI=d(),N1=r("td"),aI=n("\u2705"),lI=d(),de=r("tr"),I1=r("td"),nI=n("MegatronBert"),oI=d(),x1=r("td"),iI=n("\u274C"),dI=d(),O1=r("td"),sI=n("\u274C"),hI=d(),F1=r("td"),cI=n("\u2705"),gI=d(),W1=r("td"),fI=n("\u274C"),uI=d(),Y1=r("td"),vI=n("\u274C"),EI=d(),se=r("tr"),J1=r("td"),TI=n("MobileBERT"),_I=d(),Z1=r("td"),mI=n("\u2705"),pI=d(),V1=r("td"),DI=n("\u2705"),yI=d(),K1=r("td"),AI=n("\u2705"),RI=d(),X1=r("td"),LI=n("\u2705"),bI=d(),z1=r("td"),SI=n("\u274C"),wI=d(),he=r("tr"),H1=r("td"),MI=n("MPNet"),PI=d(),U1=r("td"),GI=n("\u2705"),kI=d(),q1=r("td"),BI=n("\u2705"),CI=d(),Q1=r("td"),NI=n("\u2705"),II=d(),j1=r("td"),xI=n("\u2705"),OI=d(),$1=r("td"),FI=n("\u274C"),WI=d(),ce=r("tr"),eu=r("td"),YI=n("mT5"),JI=d(),tu=r("td"),ZI=n("\u2705"),VI=d(),ru=r("td"),KI=n("\u2705"),XI=d(),au=r("td"),zI=n("\u2705"),HI=d(),lu=r("td"),UI=n("\u2705"),qI=d(),nu=r("td"),QI=n("\u2705"),jI=d(),ge=r("tr"),ou=r("td"),$I=n("Nystromformer"),ex=d(),iu=r("td"),tx=n("\u274C"),rx=d(),du=r("td"),ax=n("\u274C"),lx=d(),su=r("td"),nx=n("\u2705"),ox=d(),hu=r("td"),ix=n("\u274C"),dx=d(),cu=r("td"),sx=n("\u274C"),hx=d(),fe=r("tr"),gu=r("td"),cx=n("OpenAI GPT"),gx=d(),fu=r("td"),fx=n("\u2705"),ux=d(),uu=r("td"),vx=n("\u2705"),Ex=d(),vu=r("td"),Tx=n("\u2705"),_x=d(),Eu=r("td"),mx=n("\u2705"),px=d(),Tu=r("td"),Dx=n("\u274C"),yx=d(),ue=r("tr"),_u=r("td"),Ax=n("OpenAI GPT-2"),Rx=d(),mu=r("td"),Lx=n("\u2705"),bx=d(),pu=r("td"),Sx=n("\u2705"),wx=d(),Du=r("td"),Mx=n("\u2705"),Px=d(),yu=r("td"),Gx=n("\u2705"),kx=d(),Au=r("td"),Bx=n("\u2705"),Cx=d(),ve=r("tr"),Ru=r("td"),Nx=n("Pegasus"),Ix=d(),Lu=r("td"),xx=n("\u2705"),Ox=d(),bu=r("td"),Fx=n("\u2705"),Wx=d(),Su=r("td"),Yx=n("\u2705"),Jx=d(),wu=r("td"),Zx=n("\u2705"),Vx=d(),Mu=r("td"),Kx=n("\u2705"),Xx=d(),Ee=r("tr"),Pu=r("td"),zx=n("Perceiver"),Hx=d(),Gu=r("td"),Ux=n("\u2705"),qx=d(),ku=r("td"),Qx=n("\u274C"),jx=d(),Bu=r("td"),$x=n("\u2705"),eO=d(),Cu=r("td"),tO=n("\u274C"),rO=d(),Nu=r("td"),aO=n("\u274C"),lO=d(),Te=r("tr"),Iu=r("td"),nO=n("PLBart"),oO=d(),xu=r("td"),iO=n("\u2705"),dO=d(),Ou=r("td"),sO=n("\u274C"),hO=d(),Fu=r("td"),cO=n("\u2705"),gO=d(),Wu=r("td"),fO=n("\u274C"),uO=d(),Yu=r("td"),vO=n("\u274C"),EO=d(),_e=r("tr"),Ju=r("td"),TO=n("PoolFormer"),_O=d(),Zu=r("td"),mO=n("\u274C"),pO=d(),Vu=r("td"),DO=n("\u274C"),yO=d(),Ku=r("td"),AO=n("\u2705"),RO=d(),Xu=r("td"),LO=n("\u274C"),bO=d(),zu=r("td"),SO=n("\u274C"),wO=d(),me=r("tr"),Hu=r("td"),MO=n("ProphetNet"),PO=d(),Uu=r("td"),GO=n("\u2705"),kO=d(),qu=r("td"),BO=n("\u274C"),CO=d(),Qu=r("td"),NO=n("\u2705"),IO=d(),ju=r("td"),xO=n("\u274C"),OO=d(),$u=r("td"),FO=n("\u274C"),WO=d(),pe=r("tr"),ev=r("td"),YO=n("QDQBert"),JO=d(),tv=r("td"),ZO=n("\u274C"),VO=d(),rv=r("td"),KO=n("\u274C"),XO=d(),av=r("td"),zO=n("\u2705"),HO=d(),lv=r("td"),UO=n("\u274C"),qO=d(),nv=r("td"),QO=n("\u274C"),jO=d(),De=r("tr"),ov=r("td"),$O=n("RAG"),eF=d(),iv=r("td"),tF=n("\u2705"),rF=d(),dv=r("td"),aF=n("\u274C"),lF=d(),sv=r("td"),nF=n("\u2705"),oF=d(),hv=r("td"),iF=n("\u2705"),dF=d(),cv=r("td"),sF=n("\u274C"),hF=d(),ye=r("tr"),gv=r("td"),cF=n("Realm"),gF=d(),fv=r("td"),fF=n("\u2705"),uF=d(),uv=r("td"),vF=n("\u2705"),EF=d(),vv=r("td"),TF=n("\u2705"),_F=d(),Ev=r("td"),mF=n("\u274C"),pF=d(),Tv=r("td"),DF=n("\u274C"),yF=d(),Ae=r("tr"),_v=r("td"),AF=n("Reformer"),RF=d(),mv=r("td"),LF=n("\u2705"),bF=d(),pv=r("td"),SF=n("\u2705"),wF=d(),Dv=r("td"),MF=n("\u2705"),PF=d(),yv=r("td"),GF=n("\u274C"),kF=d(),Av=r("td"),BF=n("\u274C"),CF=d(),Re=r("tr"),Rv=r("td"),NF=n("RemBERT"),IF=d(),Lv=r("td"),xF=n("\u2705"),OF=d(),bv=r("td"),FF=n("\u2705"),WF=d(),Sv=r("td"),YF=n("\u2705"),JF=d(),wv=r("td"),ZF=n("\u2705"),VF=d(),Mv=r("td"),KF=n("\u274C"),XF=d(),Le=r("tr"),Pv=r("td"),zF=n("RetriBERT"),HF=d(),Gv=r("td"),UF=n("\u2705"),qF=d(),kv=r("td"),QF=n("\u2705"),jF=d(),Bv=r("td"),$F=n("\u2705"),eW=d(),Cv=r("td"),tW=n("\u274C"),rW=d(),Nv=r("td"),aW=n("\u274C"),lW=d(),be=r("tr"),Iv=r("td"),nW=n("RoBERTa"),oW=d(),xv=r("td"),iW=n("\u2705"),dW=d(),Ov=r("td"),sW=n("\u2705"),hW=d(),Fv=r("td"),cW=n("\u2705"),gW=d(),Wv=r("td"),fW=n("\u2705"),uW=d(),Yv=r("td"),vW=n("\u2705"),EW=d(),Se=r("tr"),Jv=r("td"),TW=n("RoFormer"),_W=d(),Zv=r("td"),mW=n("\u2705"),pW=d(),Vv=r("td"),DW=n("\u2705"),yW=d(),Kv=r("td"),AW=n("\u2705"),RW=d(),Xv=r("td"),LW=n("\u2705"),bW=d(),zv=r("td"),SW=n("\u2705"),wW=d(),we=r("tr"),Hv=r("td"),MW=n("SegFormer"),PW=d(),Uv=r("td"),GW=n("\u274C"),kW=d(),qv=r("td"),BW=n("\u274C"),CW=d(),Qv=r("td"),NW=n("\u2705"),IW=d(),jv=r("td"),xW=n("\u274C"),OW=d(),$v=r("td"),FW=n("\u274C"),WW=d(),Me=r("tr"),eE=r("td"),YW=n("SEW"),JW=d(),tE=r("td"),ZW=n("\u274C"),VW=d(),rE=r("td"),KW=n("\u274C"),XW=d(),aE=r("td"),zW=n("\u2705"),HW=d(),lE=r("td"),UW=n("\u274C"),qW=d(),nE=r("td"),QW=n("\u274C"),jW=d(),Pe=r("tr"),oE=r("td"),$W=n("SEW-D"),eY=d(),iE=r("td"),tY=n("\u274C"),rY=d(),dE=r("td"),aY=n("\u274C"),lY=d(),sE=r("td"),nY=n("\u2705"),oY=d(),hE=r("td"),iY=n("\u274C"),dY=d(),cE=r("td"),sY=n("\u274C"),hY=d(),Ge=r("tr"),gE=r("td"),cY=n("Speech Encoder decoder"),gY=d(),fE=r("td"),fY=n("\u274C"),uY=d(),uE=r("td"),vY=n("\u274C"),EY=d(),vE=r("td"),TY=n("\u2705"),_Y=d(),EE=r("td"),mY=n("\u274C"),pY=d(),TE=r("td"),DY=n("\u2705"),yY=d(),ke=r("tr"),_E=r("td"),AY=n("Speech2Text"),RY=d(),mE=r("td"),LY=n("\u2705"),bY=d(),pE=r("td"),SY=n("\u274C"),wY=d(),DE=r("td"),MY=n("\u2705"),PY=d(),yE=r("td"),GY=n("\u2705"),kY=d(),AE=r("td"),BY=n("\u274C"),CY=d(),Be=r("tr"),RE=r("td"),NY=n("Speech2Text2"),IY=d(),LE=r("td"),xY=n("\u2705"),OY=d(),bE=r("td"),FY=n("\u274C"),WY=d(),SE=r("td"),YY=n("\u274C"),JY=d(),wE=r("td"),ZY=n("\u274C"),VY=d(),ME=r("td"),KY=n("\u274C"),XY=d(),Ce=r("tr"),PE=r("td"),zY=n("Splinter"),HY=d(),GE=r("td"),UY=n("\u2705"),qY=d(),kE=r("td"),QY=n("\u2705"),jY=d(),BE=r("td"),$Y=n("\u2705"),eJ=d(),CE=r("td"),tJ=n("\u274C"),rJ=d(),NE=r("td"),aJ=n("\u274C"),lJ=d(),Ne=r("tr"),IE=r("td"),nJ=n("SqueezeBERT"),oJ=d(),xE=r("td"),iJ=n("\u2705"),dJ=d(),OE=r("td"),sJ=n("\u2705"),hJ=d(),FE=r("td"),cJ=n("\u2705"),gJ=d(),WE=r("td"),fJ=n("\u274C"),uJ=d(),YE=r("td"),vJ=n("\u274C"),EJ=d(),Ie=r("tr"),JE=r("td"),TJ=n("Swin"),_J=d(),ZE=r("td"),mJ=n("\u274C"),pJ=d(),VE=r("td"),DJ=n("\u274C"),yJ=d(),KE=r("td"),AJ=n("\u2705"),RJ=d(),XE=r("td"),LJ=n("\u274C"),bJ=d(),zE=r("td"),SJ=n("\u274C"),wJ=d(),xe=r("tr"),HE=r("td"),MJ=n("T5"),PJ=d(),UE=r("td"),GJ=n("\u2705"),kJ=d(),qE=r("td"),BJ=n("\u2705"),CJ=d(),QE=r("td"),NJ=n("\u2705"),IJ=d(),jE=r("td"),xJ=n("\u2705"),OJ=d(),$E=r("td"),FJ=n("\u2705"),WJ=d(),Oe=r("tr"),e2=r("td"),YJ=n("TAPAS"),JJ=d(),t2=r("td"),ZJ=n("\u2705"),VJ=d(),r2=r("td"),KJ=n("\u274C"),XJ=d(),a2=r("td"),zJ=n("\u2705"),HJ=d(),l2=r("td"),UJ=n("\u2705"),qJ=d(),n2=r("td"),QJ=n("\u274C"),jJ=d(),Fe=r("tr"),o2=r("td"),$J=n("Transformer-XL"),eZ=d(),i2=r("td"),tZ=n("\u2705"),rZ=d(),d2=r("td"),aZ=n("\u274C"),lZ=d(),s2=r("td"),nZ=n("\u2705"),oZ=d(),h2=r("td"),iZ=n("\u2705"),dZ=d(),c2=r("td"),sZ=n("\u274C"),hZ=d(),We=r("tr"),g2=r("td"),cZ=n("TrOCR"),gZ=d(),f2=r("td"),fZ=n("\u274C"),uZ=d(),u2=r("td"),vZ=n("\u274C"),EZ=d(),v2=r("td"),TZ=n("\u2705"),_Z=d(),E2=r("td"),mZ=n("\u274C"),pZ=d(),T2=r("td"),DZ=n("\u274C"),yZ=d(),Ye=r("tr"),_2=r("td"),AZ=n("UniSpeech"),RZ=d(),m2=r("td"),LZ=n("\u274C"),bZ=d(),p2=r("td"),SZ=n("\u274C"),wZ=d(),D2=r("td"),MZ=n("\u2705"),PZ=d(),y2=r("td"),GZ=n("\u274C"),kZ=d(),A2=r("td"),BZ=n("\u274C"),CZ=d(),Je=r("tr"),R2=r("td"),NZ=n("UniSpeechSat"),IZ=d(),L2=r("td"),xZ=n("\u274C"),OZ=d(),b2=r("td"),FZ=n("\u274C"),WZ=d(),S2=r("td"),YZ=n("\u2705"),JZ=d(),w2=r("td"),ZZ=n("\u274C"),VZ=d(),M2=r("td"),KZ=n("\u274C"),XZ=d(),Ze=r("tr"),P2=r("td"),zZ=n("ViLT"),HZ=d(),G2=r("td"),UZ=n("\u274C"),qZ=d(),k2=r("td"),QZ=n("\u274C"),jZ=d(),B2=r("td"),$Z=n("\u2705"),eV=d(),C2=r("td"),tV=n("\u274C"),rV=d(),N2=r("td"),aV=n("\u274C"),lV=d(),Ve=r("tr"),I2=r("td"),nV=n("Vision Encoder decoder"),oV=d(),x2=r("td"),iV=n("\u274C"),dV=d(),O2=r("td"),sV=n("\u274C"),hV=d(),F2=r("td"),cV=n("\u2705"),gV=d(),W2=r("td"),fV=n("\u2705"),uV=d(),Y2=r("td"),vV=n("\u2705"),EV=d(),Ke=r("tr"),J2=r("td"),TV=n("VisionTextDualEncoder"),_V=d(),Z2=r("td"),mV=n("\u274C"),pV=d(),V2=r("td"),DV=n("\u274C"),yV=d(),K2=r("td"),AV=n("\u2705"),RV=d(),X2=r("td"),LV=n("\u274C"),bV=d(),z2=r("td"),SV=n("\u2705"),wV=d(),Xe=r("tr"),H2=r("td"),MV=n("VisualBert"),PV=d(),U2=r("td"),GV=n("\u274C"),kV=d(),q2=r("td"),BV=n("\u274C"),CV=d(),Q2=r("td"),NV=n("\u2705"),IV=d(),j2=r("td"),xV=n("\u274C"),OV=d(),$2=r("td"),FV=n("\u274C"),WV=d(),ze=r("tr"),eT=r("td"),YV=n("ViT"),JV=d(),tT=r("td"),ZV=n("\u274C"),VV=d(),rT=r("td"),KV=n("\u274C"),XV=d(),aT=r("td"),zV=n("\u2705"),HV=d(),lT=r("td"),UV=n("\u2705"),qV=d(),nT=r("td"),QV=n("\u2705"),jV=d(),He=r("tr"),oT=r("td"),$V=n("ViTMAE"),eK=d(),iT=r("td"),tK=n("\u274C"),rK=d(),dT=r("td"),aK=n("\u274C"),lK=d(),sT=r("td"),nK=n("\u2705"),oK=d(),hT=r("td"),iK=n("\u274C"),dK=d(),cT=r("td"),sK=n("\u274C"),hK=d(),Ue=r("tr"),gT=r("td"),cK=n("Wav2Vec2"),gK=d(),fT=r("td"),fK=n("\u2705"),uK=d(),uT=r("td"),vK=n("\u274C"),EK=d(),vT=r("td"),TK=n("\u2705"),_K=d(),ET=r("td"),mK=n("\u2705"),pK=d(),TT=r("td"),DK=n("\u2705"),yK=d(),qe=r("tr"),_T=r("td"),AK=n("WavLM"),RK=d(),mT=r("td"),LK=n("\u274C"),bK=d(),pT=r("td"),SK=n("\u274C"),wK=d(),DT=r("td"),MK=n("\u2705"),PK=d(),yT=r("td"),GK=n("\u274C"),kK=d(),AT=r("td"),BK=n("\u274C"),CK=d(),Qe=r("tr"),RT=r("td"),NK=n("XGLM"),IK=d(),LT=r("td"),xK=n("\u2705"),OK=d(),bT=r("td"),FK=n("\u2705"),WK=d(),ST=r("td"),YK=n("\u2705"),JK=d(),wT=r("td"),ZK=n("\u274C"),VK=d(),MT=r("td"),KK=n("\u2705"),XK=d(),je=r("tr"),PT=r("td"),zK=n("XLM"),HK=d(),GT=r("td"),UK=n("\u2705"),qK=d(),kT=r("td"),QK=n("\u274C"),jK=d(),BT=r("td"),$K=n("\u2705"),eX=d(),CT=r("td"),tX=n("\u2705"),rX=d(),NT=r("td"),aX=n("\u274C"),lX=d(),$e=r("tr"),IT=r("td"),nX=n("XLM-RoBERTa"),oX=d(),xT=r("td"),iX=n("\u2705"),dX=d(),OT=r("td"),sX=n("\u2705"),hX=d(),FT=r("td"),cX=n("\u2705"),gX=d(),WT=r("td"),fX=n("\u2705"),uX=d(),YT=r("td"),vX=n("\u2705"),EX=d(),et=r("tr"),JT=r("td"),TX=n("XLM-RoBERTa-XL"),_X=d(),ZT=r("td"),mX=n("\u274C"),pX=d(),VT=r("td"),DX=n("\u274C"),yX=d(),KT=r("td"),AX=n("\u2705"),RX=d(),XT=r("td"),LX=n("\u274C"),bX=d(),zT=r("td"),SX=n("\u274C"),wX=d(),tt=r("tr"),HT=r("td"),MX=n("XLMProphetNet"),PX=d(),UT=r("td"),GX=n("\u2705"),kX=d(),qT=r("td"),BX=n("\u274C"),CX=d(),QT=r("td"),NX=n("\u2705"),IX=d(),jT=r("td"),xX=n("\u274C"),OX=d(),$T=r("td"),FX=n("\u274C"),WX=d(),rt=r("tr"),e_=r("td"),YX=n("XLNet"),JX=d(),t_=r("td"),ZX=n("\u2705"),VX=d(),r_=r("td"),KX=n("\u2705"),XX=d(),a_=r("td"),zX=n("\u2705"),HX=d(),l_=r("td"),UX=n("\u2705"),qX=d(),n_=r("td"),QX=n("\u274C"),jX=d(),at=r("tr"),o_=r("td"),$X=n("YOSO"),ez=d(),i_=r("td"),tz=n("\u274C"),rz=d(),d_=r("td"),az=n("\u274C"),lz=d(),s_=r("td"),nz=n("\u2705"),oz=d(),h_=r("td"),iz=n("\u274C"),dz=d(),c_=r("td"),sz=n("\u274C"),this.h()},l(u){const v=Ihe('[data-svelte="svelte-1phssyn"]',document.head);$r=a(v,"META",{name:!0,content:!0}),v.forEach(t),Mi=s(u),ot=a(u,"H1",{class:!0});var e0=l(ot);Qr=a(e0,"A",{id:!0,class:!0,href:!0});var pz=l(Qr);f_=a(pz,"SPAN",{});var Dz=l(f_);Pm(hn.$$.fragment,Dz),Dz.forEach(t),pz.forEach(t),s0=s(e0),u_=a(e0,"SPAN",{});var yz=l(u_);h0=o(yz,"\u{1F917} Transformers"),yz.forEach(t),e0.forEach(t),Im=s(u),Pi=a(u,"P",{});var Az=l(Pi);c0=o(Az,"State-of-the-art Machine Learning for PyTorch, TensorFlow and JAX."),Az.forEach(t),xm=s(u),Gi=a(u,"P",{});var Rz=l(Gi);g0=o(Rz,"\u{1F917} Transformers provides APIs to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you time from training a model from scratch. The models can be used across different modalities such as:"),Rz.forEach(t),Om=s(u),it=a(u,"UL",{});var dn=l(it);v_=a(dn,"LI",{});var Lz=l(v_);f0=o(Lz,"\u{1F4DD} Text: text classification, information extraction, question answering, summarization, translation, and text generation in over 100 languages."),Lz.forEach(t),u0=s(dn),E_=a(dn,"LI",{});var bz=l(E_);v0=o(bz,"\u{1F5BC}\uFE0F Images: image classification, object detection, and segmentation."),bz.forEach(t),E0=s(dn),T_=a(dn,"LI",{});var Sz=l(T_);T0=o(Sz,"\u{1F5E3}\uFE0F Audio: speech recognition and audio classification."),Sz.forEach(t),_0=s(dn),__=a(dn,"LI",{});var wz=l(__);m0=o(wz,"\u{1F419} Multimodal: table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering."),wz.forEach(t),dn.forEach(t),Fm=s(u),dt=a(u,"P",{});var sn=l(dt);p0=o(sn,"Our library supports seamless integration between three of the most popular deep learning libraries: "),cn=a(sn,"A",{href:!0,rel:!0});var Mz=l(cn);D0=o(Mz,"PyTorch"),Mz.forEach(t),y0=o(sn,", "),gn=a(sn,"A",{href:!0,rel:!0});var Pz=l(gn);A0=o(Pz,"TensorFlow"),Pz.forEach(t),R0=o(sn," and "),fn=a(sn,"A",{href:!0,rel:!0});var Gz=l(fn);L0=o(Gz,"JAX"),Gz.forEach(t),b0=o(sn,". Train your model in three lines of code in one framework, and load it for inference with another."),sn.forEach(t),Wm=s(u),ki=a(u,"P",{});var kz=l(ki);S0=o(kz,"Each \u{1F917} Transformers architecture is defined in a standalone Python module so they can be easily customized for research and experiments."),kz.forEach(t),Ym=s(u),da=a(u,"H2",{class:!0});var t0=l(da);va=a(t0,"A",{id:!0,class:!0,href:!0});var Bz=l(va);m_=a(Bz,"SPAN",{});var Cz=l(m_);Pm(un.$$.fragment,Cz),Cz.forEach(t),Bz.forEach(t),w0=s(t0),p_=a(t0,"SPAN",{});var Nz=l(p_);M0=o(Nz,"If you are looking for custom support from the Hugging Face team"),Nz.forEach(t),t0.forEach(t),Jm=s(u),sa=a(u,"A",{target:!0,href:!0});var hz=l(sa);ea=a(hz,"IMG",{alt:!0,src:!0,style:!0}),P0=s(hz),hz.forEach(t),Zm=a(u,"BR",{}),Vm=s(u),ha=a(u,"H2",{class:!0});var r0=l(ha);Ea=a(r0,"A",{id:!0,class:!0,href:!0});var Iz=l(Ea);D_=a(Iz,"SPAN",{});var xz=l(D_);Pm(vn.$$.fragment,xz),xz.forEach(t),Iz.forEach(t),G0=s(r0),y_=a(r0,"SPAN",{});var Oz=l(y_);k0=o(Oz,"Contents"),Oz.forEach(t),r0.forEach(t),Km=s(u),Bi=a(u,"P",{});var Fz=l(Bi);B0=o(Fz,"The documentation is organized in five parts:"),Fz.forEach(t),Xm=s(u),lt=a(u,"UL",{});var ia=l(lt);A_=a(ia,"LI",{});var Wz=l(A_);Ci=a(Wz,"P",{});var cz=l(Ci);R_=a(cz,"STRONG",{});var Yz=l(R_);C0=o(Yz,"GET STARTED"),Yz.forEach(t),N0=o(cz,` contains a quick tour, the installation instructions and some useful information about our philosophy and a glossary.`),cz.forEach(t),Wz.forEach(t),I0=s(ia),L_=a(ia,"LI",{});var Jz=l(L_);Ni=a(Jz,"P",{});var gz=l(Ni);b_=a(gz,"STRONG",{});var Zz=l(b_);x0=o(Zz,"USING \u{1F917} TRANSFORMERS"),Zz.forEach(t),O0=o(gz," contains general tutorials on how to use the library."),gz.forEach(t),Jz.forEach(t),F0=s(ia),S_=a(ia,"LI",{});var Vz=l(S_);Ii=a(Vz,"P",{});var fz=l(Ii);w_=a(fz,"STRONG",{});var Kz=l(w_);W0=o(Kz,"ADVANCED GUIDES"),Kz.forEach(t),Y0=o(fz," contains more advanced guides that are more specific to a given script or part of the library."),fz.forEach(t),Vz.forEach(t),J0=s(ia),M_=a(ia,"LI",{});var Xz=l(M_);xi=a(Xz,"P",{});var uz=l(xi);P_=a(uz,"STRONG",{});var zz=l(P_);Z0=o(zz,"RESEARCH"),zz.forEach(t),V0=o(uz,` focuses on tutorials that have less to do with how to use the library but more about general research in transformers model`),uz.forEach(t),Xz.forEach(t),K0=s(ia),En=a(ia,"LI",{});var a0=l(En);Oi=a(a0,"P",{});var vz=l(Oi);G_=a(vz,"STRONG",{});var Hz=l(G_);X0=o(Hz,"API"),Hz.forEach(t),z0=o(vz," contains the documentation of each public class and function, grouped in:"),vz.forEach(t),H0=s(a0),ca=a(a0,"UL",{});var g_=l(ca);Fi=a(g_,"LI",{});var Ez=l(Fi);k_=a(Ez,"STRONG",{});var Uz=l(k_);U0=o(Uz,"MAIN CLASSES"),Uz.forEach(t),q0=o(Ez," for the main classes exposing the important APIs of the library."),Ez.forEach(t),Q0=s(g_),Wi=a(g_,"LI",{});var Tz=l(Wi);B_=a(Tz,"STRONG",{});var qz=l(B_);j0=o(qz,"MODELS"),qz.forEach(t),$0=o(Tz," for the classes and functions related to each model implemented in the library."),Tz.forEach(t),ep=s(g_),Yi=a(g_,"LI",{});var _z=l(Yi);C_=a(_z,"STRONG",{});var Qz=l(C_);tp=o(Qz,"INTERNAL HELPERS"),Qz.forEach(t),rp=o(_z," for the classes and functions we use internally."),_z.forEach(t),g_.forEach(t),a0.forEach(t),ia.forEach(t),zm=s(u),Ji=a(u,"P",{});var jz=l(Ji);ap=o(jz,`The library currently contains Jax, PyTorch and Tensorflow implementations, pretrained model weights, usage scripts and conversion utilities for the following models.`),jz.forEach(t),Hm=s(u),ga=a(u,"H3",{class:!0});var l0=l(ga);Ta=a(l0,"A",{id:!0,class:!0,href:!0});var $z=l(Ta);N_=a($z,"SPAN",{});var eH=l(N_);Pm(Tn.$$.fragment,eH),eH.forEach(t),$z.forEach(t),lp=s(l0),I_=a(l0,"SPAN",{});var tH=l(I_);np=o(tH,"Supported models"),tH.forEach(t),l0.forEach(t),Um=s(u),h=a(u,"OL",{});var c=l(h);_a=a(c,"LI",{});var J4=l(_a);x_=a(J4,"STRONG",{});var rH=l(x_);Zi=a(rH,"A",{href:!0});var aH=l(Zi);op=o(aH,"ALBERT"),aH.forEach(t),rH.forEach(t),ip=o(J4," (from Google Research and the Toyota Technological Institute at Chicago) released with the paper "),_n=a(J4,"A",{href:!0,rel:!0});var lH=l(_n);dp=o(lH,"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),lH.forEach(t),sp=o(J4,", by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."),J4.forEach(t),hp=s(c),ma=a(c,"LI",{});var Z4=l(ma);O_=a(Z4,"STRONG",{});var nH=l(O_);Vi=a(nH,"A",{href:!0});var oH=l(Vi);cp=o(oH,"BART"),oH.forEach(t),nH.forEach(t),gp=o(Z4," (from Facebook) released with the paper "),mn=a(Z4,"A",{href:!0,rel:!0});var iH=l(mn);fp=o(iH,"BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"),iH.forEach(t),up=o(Z4," by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer."),Z4.forEach(t),vp=s(c),pa=a(c,"LI",{});var V4=l(pa);F_=a(V4,"STRONG",{});var dH=l(F_);Ki=a(dH,"A",{href:!0});var sH=l(Ki);Ep=o(sH,"BARThez"),sH.forEach(t),dH.forEach(t),Tp=o(V4," (from \xC9cole polytechnique) released with the paper "),pn=a(V4,"A",{href:!0,rel:!0});var hH=l(pn);_p=o(hH,"BARThez: a Skilled Pretrained French Sequence-to-Sequence Model"),hH.forEach(t),mp=o(V4," by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis."),V4.forEach(t),pp=s(c),Da=a(c,"LI",{});var K4=l(Da);W_=a(K4,"STRONG",{});var cH=l(W_);Xi=a(cH,"A",{href:!0});var gH=l(Xi);Dp=o(gH,"BARTpho"),gH.forEach(t),cH.forEach(t),yp=o(K4," (from VinAI Research) released with the paper "),Dn=a(K4,"A",{href:!0,rel:!0});var fH=l(Dn);Ap=o(fH,"BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese"),fH.forEach(t),Rp=o(K4," by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen."),K4.forEach(t),Lp=s(c),ya=a(c,"LI",{});var X4=l(ya);Y_=a(X4,"STRONG",{});var uH=l(Y_);zi=a(uH,"A",{href:!0});var vH=l(zi);bp=o(vH,"BEiT"),vH.forEach(t),uH.forEach(t),Sp=o(X4," (from Microsoft) released with the paper "),yn=a(X4,"A",{href:!0,rel:!0});var EH=l(yn);wp=o(EH,"BEiT: BERT Pre-Training of Image Transformers"),EH.forEach(t),Mp=o(X4," by Hangbo Bao, Li Dong, Furu Wei."),X4.forEach(t),Pp=s(c),Aa=a(c,"LI",{});var z4=l(Aa);J_=a(z4,"STRONG",{});var TH=l(J_);Hi=a(TH,"A",{href:!0});var _H=l(Hi);Gp=o(_H,"BERT"),_H.forEach(t),TH.forEach(t),kp=o(z4," (from Google) released with the paper "),An=a(z4,"A",{href:!0,rel:!0});var mH=l(An);Bp=o(mH,"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),mH.forEach(t),Cp=o(z4," by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova."),z4.forEach(t),Np=s(c),Ra=a(c,"LI",{});var H4=l(Ra);Z_=a(H4,"STRONG",{});var pH=l(Z_);Ui=a(pH,"A",{href:!0});var DH=l(Ui);Ip=o(DH,"BERTweet"),DH.forEach(t),pH.forEach(t),xp=o(H4," (from VinAI Research) released with the paper "),Rn=a(H4,"A",{href:!0,rel:!0});var yH=l(Rn);Op=o(yH,"BERTweet: A pre-trained language model for English Tweets"),yH.forEach(t),Fp=o(H4," by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen."),H4.forEach(t),Wp=s(c),La=a(c,"LI",{});var U4=l(La);V_=a(U4,"STRONG",{});var AH=l(V_);qi=a(AH,"A",{href:!0});var RH=l(qi);Yp=o(RH,"BERT For Sequence Generation"),RH.forEach(t),AH.forEach(t),Jp=o(U4," (from Google) released with the paper "),Ln=a(U4,"A",{href:!0,rel:!0});var LH=l(Ln);Zp=o(LH,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),LH.forEach(t),Vp=o(U4," by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),U4.forEach(t),Kp=s(c),ba=a(c,"LI",{});var q4=l(ba);K_=a(q4,"STRONG",{});var bH=l(K_);Qi=a(bH,"A",{href:!0});var SH=l(Qi);Xp=o(SH,"BigBird-RoBERTa"),SH.forEach(t),bH.forEach(t),zp=o(q4," (from Google Research) released with the paper "),bn=a(q4,"A",{href:!0,rel:!0});var wH=l(bn);Hp=o(wH,"Big Bird: Transformers for Longer Sequences"),wH.forEach(t),Up=o(q4," by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),q4.forEach(t),qp=s(c),Sa=a(c,"LI",{});var Q4=l(Sa);X_=a(Q4,"STRONG",{});var MH=l(X_);ji=a(MH,"A",{href:!0});var PH=l(ji);Qp=o(PH,"BigBird-Pegasus"),PH.forEach(t),MH.forEach(t),jp=o(Q4," (from Google Research) released with the paper "),Sn=a(Q4,"A",{href:!0,rel:!0});var GH=l(Sn);$p=o(GH,"Big Bird: Transformers for Longer Sequences"),GH.forEach(t),e6=o(Q4," by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),Q4.forEach(t),t6=s(c),wa=a(c,"LI",{});var j4=l(wa);z_=a(j4,"STRONG",{});var kH=l(z_);$i=a(kH,"A",{href:!0});var BH=l($i);r6=o(BH,"Blenderbot"),BH.forEach(t),kH.forEach(t),a6=o(j4," (from Facebook) released with the paper "),wn=a(j4,"A",{href:!0,rel:!0});var CH=l(wn);l6=o(CH,"Recipes for building an open-domain chatbot"),CH.forEach(t),n6=o(j4," by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),j4.forEach(t),o6=s(c),Ma=a(c,"LI",{});var $4=l(Ma);H_=a($4,"STRONG",{});var NH=l(H_);ed=a(NH,"A",{href:!0});var IH=l(ed);i6=o(IH,"BlenderbotSmall"),IH.forEach(t),NH.forEach(t),d6=o($4," (from Facebook) released with the paper "),Mn=a($4,"A",{href:!0,rel:!0});var xH=l(Mn);s6=o(xH,"Recipes for building an open-domain chatbot"),xH.forEach(t),h6=o($4," by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),$4.forEach(t),c6=s(c),Pa=a(c,"LI",{});var e5=l(Pa);U_=a(e5,"STRONG",{});var OH=l(U_);td=a(OH,"A",{href:!0});var FH=l(td);g6=o(FH,"BORT"),FH.forEach(t),OH.forEach(t),f6=o(e5," (from Alexa) released with the paper "),Pn=a(e5,"A",{href:!0,rel:!0});var WH=l(Pn);u6=o(WH,"Optimal Subarchitecture Extraction For BERT"),WH.forEach(t),v6=o(e5," by Adrian de Wynter and Daniel J. Perry."),e5.forEach(t),E6=s(c),Ga=a(c,"LI",{});var t5=l(Ga);q_=a(t5,"STRONG",{});var YH=l(q_);rd=a(YH,"A",{href:!0});var JH=l(rd);T6=o(JH,"ByT5"),JH.forEach(t),YH.forEach(t),_6=o(t5," (from Google Research) released with the paper "),Gn=a(t5,"A",{href:!0,rel:!0});var ZH=l(Gn);m6=o(ZH,"ByT5: Towards a token-free future with pre-trained byte-to-byte models"),ZH.forEach(t),p6=o(t5," by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel."),t5.forEach(t),D6=s(c),ta=a(c,"LI",{});var Ai=l(ta);Q_=a(Ai,"STRONG",{});var VH=l(Q_);ad=a(VH,"A",{href:!0});var KH=l(ad);y6=o(KH,"CamemBERT"),KH.forEach(t),VH.forEach(t),A6=o(Ai," (from Inria/Facebook/Sorbonne) released with the paper "),kn=a(Ai,"A",{href:!0,rel:!0});var XH=l(kn);R6=o(XH,"CamemBERT: a Tasty French Language Model"),XH.forEach(t),L6=o(Ai," by Louis Martin"),j_=a(Ai,"EM",{});var zH=l(j_);b6=o(zH,", Benjamin Muller"),zH.forEach(t),S6=o(Ai,", Pedro Javier Ortiz Su\xE1rez*, Yoann Dupont, Laurent Romary, \xC9ric Villemonte de la Clergerie, Djam\xE9 Seddah and Beno\xEEt Sagot."),Ai.forEach(t),w6=s(c),ka=a(c,"LI",{});var r5=l(ka);$_=a(r5,"STRONG",{});var HH=l($_);ld=a(HH,"A",{href:!0});var UH=l(ld);M6=o(UH,"CANINE"),UH.forEach(t),HH.forEach(t),P6=o(r5," (from Google Research) released with the paper "),Bn=a(r5,"A",{href:!0,rel:!0});var qH=l(Bn);G6=o(qH,"CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation"),qH.forEach(t),k6=o(r5," by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting."),r5.forEach(t),B6=s(c),Ba=a(c,"LI",{});var a5=l(Ba);e3=a(a5,"STRONG",{});var QH=l(e3);nd=a(QH,"A",{href:!0});var jH=l(nd);C6=o(jH,"ConvNeXT"),jH.forEach(t),QH.forEach(t),N6=o(a5," (from Facebook AI) released with the paper "),Cn=a(a5,"A",{href:!0,rel:!0});var $H=l(Cn);I6=o($H,"A ConvNet for the 2020s"),$H.forEach(t),x6=o(a5," by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie."),a5.forEach(t),O6=s(c),Ca=a(c,"LI",{});var l5=l(Ca);t3=a(l5,"STRONG",{});var eU=l(t3);od=a(eU,"A",{href:!0});var tU=l(od);F6=o(tU,"CLIP"),tU.forEach(t),eU.forEach(t),W6=o(l5," (from OpenAI) released with the paper "),Nn=a(l5,"A",{href:!0,rel:!0});var rU=l(Nn);Y6=o(rU,"Learning Transferable Visual Models From Natural Language Supervision"),rU.forEach(t),J6=o(l5," by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever."),l5.forEach(t),Z6=s(c),Na=a(c,"LI",{});var n5=l(Na);r3=a(n5,"STRONG",{});var aU=l(r3);id=a(aU,"A",{href:!0});var lU=l(id);V6=o(lU,"ConvBERT"),lU.forEach(t),aU.forEach(t),K6=o(n5," (from YituTech) released with the paper "),In=a(n5,"A",{href:!0,rel:!0});var nU=l(In);X6=o(nU,"ConvBERT: Improving BERT with Span-based Dynamic Convolution"),nU.forEach(t),z6=o(n5," by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan."),n5.forEach(t),H6=s(c),Ia=a(c,"LI",{});var o5=l(Ia);a3=a(o5,"STRONG",{});var oU=l(a3);dd=a(oU,"A",{href:!0});var iU=l(dd);U6=o(iU,"CPM"),iU.forEach(t),oU.forEach(t),q6=o(o5," (from Tsinghua University) released with the paper "),xn=a(o5,"A",{href:!0,rel:!0});var dU=l(xn);Q6=o(dU,"CPM: A Large-scale Generative Chinese Pre-trained Language Model"),dU.forEach(t),j6=o(o5," by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun."),o5.forEach(t),$6=s(c),ra=a(c,"LI",{});var Ri=l(ra);l3=a(Ri,"STRONG",{});var sU=l(l3);sd=a(sU,"A",{href:!0});var hU=l(sd);e7=o(hU,"CTRL"),hU.forEach(t),sU.forEach(t),t7=o(Ri," (from Salesforce) released with the paper "),On=a(Ri,"A",{href:!0,rel:!0});var cU=l(On);r7=o(cU,"CTRL: A Conditional Transformer Language Model for Controllable Generation"),cU.forEach(t),a7=o(Ri," by Nitish Shirish Keskar"),n3=a(Ri,"EM",{});var gU=l(n3);l7=o(gU,", Bryan McCann"),gU.forEach(t),n7=o(Ri,", Lav R. Varshney, Caiming Xiong and Richard Socher."),Ri.forEach(t),o7=s(c),xa=a(c,"LI",{});var i5=l(xa);o3=a(i5,"STRONG",{});var fU=l(o3);hd=a(fU,"A",{href:!0});var uU=l(hd);i7=o(uU,"Data2Vec"),uU.forEach(t),fU.forEach(t),d7=o(i5," (from Facebook) released with the paper "),Fn=a(i5,"A",{href:!0,rel:!0});var vU=l(Fn);s7=o(vU,"Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language"),vU.forEach(t),h7=o(i5," by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli."),i5.forEach(t),c7=s(c),Oa=a(c,"LI",{});var d5=l(Oa);i3=a(d5,"STRONG",{});var EU=l(i3);cd=a(EU,"A",{href:!0});var TU=l(cd);g7=o(TU,"DeBERTa"),TU.forEach(t),EU.forEach(t),f7=o(d5," (from Microsoft) released with the paper "),Wn=a(d5,"A",{href:!0,rel:!0});var _U=l(Wn);u7=o(_U,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),_U.forEach(t),v7=o(d5," by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),d5.forEach(t),E7=s(c),Fa=a(c,"LI",{});var s5=l(Fa);d3=a(s5,"STRONG",{});var mU=l(d3);gd=a(mU,"A",{href:!0});var pU=l(gd);T7=o(pU,"DeBERTa-v2"),pU.forEach(t),mU.forEach(t),_7=o(s5," (from Microsoft) released with the paper "),Yn=a(s5,"A",{href:!0,rel:!0});var DU=l(Yn);m7=o(DU,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),DU.forEach(t),p7=o(s5," by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),s5.forEach(t),D7=s(c),Wa=a(c,"LI",{});var h5=l(Wa);s3=a(h5,"STRONG",{});var yU=l(s3);fd=a(yU,"A",{href:!0});var AU=l(fd);y7=o(AU,"DiT"),AU.forEach(t),yU.forEach(t),A7=o(h5," (from Microsoft Research) released with the paper "),Jn=a(h5,"A",{href:!0,rel:!0});var RU=l(Jn);R7=o(RU,"DiT: Self-supervised Pre-training for Document Image Transformer"),RU.forEach(t),L7=o(h5," by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei."),h5.forEach(t),b7=s(c),Ya=a(c,"LI",{});var c5=l(Ya);h3=a(c5,"STRONG",{});var LU=l(h3);ud=a(LU,"A",{href:!0});var bU=l(ud);S7=o(bU,"DeiT"),bU.forEach(t),LU.forEach(t),w7=o(c5," (from Facebook) released with the paper "),Zn=a(c5,"A",{href:!0,rel:!0});var SU=l(Zn);M7=o(SU,"Training data-efficient image transformers & distillation through attention"),SU.forEach(t),P7=o(c5," by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Herv\xE9 J\xE9gou."),c5.forEach(t),G7=s(c),Ja=a(c,"LI",{});var g5=l(Ja);c3=a(g5,"STRONG",{});var wU=l(c3);vd=a(wU,"A",{href:!0});var MU=l(vd);k7=o(MU,"DETR"),MU.forEach(t),wU.forEach(t),B7=o(g5," (from Facebook) released with the paper "),Vn=a(g5,"A",{href:!0,rel:!0});var PU=l(Vn);C7=o(PU,"End-to-End Object Detection with Transformers"),PU.forEach(t),N7=o(g5," by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko."),g5.forEach(t),I7=s(c),Za=a(c,"LI",{});var f5=l(Za);g3=a(f5,"STRONG",{});var GU=l(g3);Ed=a(GU,"A",{href:!0});var kU=l(Ed);x7=o(kU,"DialoGPT"),kU.forEach(t),GU.forEach(t),O7=o(f5," (from Microsoft Research) released with the paper "),Kn=a(f5,"A",{href:!0,rel:!0});var BU=l(Kn);F7=o(BU,"DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation"),BU.forEach(t),W7=o(f5," by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan."),f5.forEach(t),Y7=s(c),nt=a(c,"LI",{});var jr=l(nt);f3=a(jr,"STRONG",{});var CU=l(f3);Td=a(CU,"A",{href:!0});var NU=l(Td);J7=o(NU,"DistilBERT"),NU.forEach(t),CU.forEach(t),Z7=o(jr," (from HuggingFace), released together with the paper "),Xn=a(jr,"A",{href:!0,rel:!0});var IU=l(Xn);V7=o(IU,"DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"),IU.forEach(t),K7=o(jr," by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into "),zn=a(jr,"A",{href:!0,rel:!0});var xU=l(zn);X7=o(xU,"DistilGPT2"),xU.forEach(t),z7=o(jr,", RoBERTa into "),Hn=a(jr,"A",{href:!0,rel:!0});var OU=l(Hn);H7=o(OU,"DistilRoBERTa"),OU.forEach(t),U7=o(jr,", Multilingual BERT into "),Un=a(jr,"A",{href:!0,rel:!0});var FU=l(Un);q7=o(FU,"DistilmBERT"),FU.forEach(t),Q7=o(jr," and a German version of DistilBERT."),jr.forEach(t),j7=s(c),Va=a(c,"LI",{});var u5=l(Va);u3=a(u5,"STRONG",{});var WU=l(u3);_d=a(WU,"A",{href:!0});var YU=l(_d);$7=o(YU,"DPR"),YU.forEach(t),WU.forEach(t),e9=o(u5," (from Facebook) released with the paper "),qn=a(u5,"A",{href:!0,rel:!0});var JU=l(qn);t9=o(JU,"Dense Passage Retrieval for Open-Domain Question Answering"),JU.forEach(t),r9=o(u5," by Vladimir Karpukhin, Barlas O\u011Fuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih."),u5.forEach(t),a9=s(c),Ka=a(c,"LI",{});var v5=l(Ka);v3=a(v5,"STRONG",{});var ZU=l(v3);md=a(ZU,"A",{href:!0});var VU=l(md);l9=o(VU,"EncoderDecoder"),VU.forEach(t),ZU.forEach(t),n9=o(v5," (from Google Research) released with the paper "),Qn=a(v5,"A",{href:!0,rel:!0});var KU=l(Qn);o9=o(KU,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),KU.forEach(t),i9=o(v5," by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),v5.forEach(t),d9=s(c),Xa=a(c,"LI",{});var E5=l(Xa);E3=a(E5,"STRONG",{});var XU=l(E3);pd=a(XU,"A",{href:!0});var zU=l(pd);s9=o(zU,"ELECTRA"),zU.forEach(t),XU.forEach(t),h9=o(E5," (from Google Research/Stanford University) released with the paper "),jn=a(E5,"A",{href:!0,rel:!0});var HU=l(jn);c9=o(HU,"ELECTRA: Pre-training text encoders as discriminators rather than generators"),HU.forEach(t),g9=o(E5," by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning."),E5.forEach(t),f9=s(c),za=a(c,"LI",{});var T5=l(za);T3=a(T5,"STRONG",{});var UU=l(T3);Dd=a(UU,"A",{href:!0});var qU=l(Dd);u9=o(qU,"FlauBERT"),qU.forEach(t),UU.forEach(t),v9=o(T5," (from CNRS) released with the paper "),$n=a(T5,"A",{href:!0,rel:!0});var QU=l($n);E9=o(QU,"FlauBERT: Unsupervised Language Model Pre-training for French"),QU.forEach(t),T9=o(T5," by Hang Le, Lo\xEFc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Beno\xEEt Crabb\xE9, Laurent Besacier, Didier Schwab."),T5.forEach(t),_9=s(c),Ha=a(c,"LI",{});var _5=l(Ha);_3=a(_5,"STRONG",{});var jU=l(_3);yd=a(jU,"A",{href:!0});var $U=l(yd);m9=o($U,"FNet"),$U.forEach(t),jU.forEach(t),p9=o(_5," (from Google Research) released with the paper "),eo=a(_5,"A",{href:!0,rel:!0});var eq=l(eo);D9=o(eq,"FNet: Mixing Tokens with Fourier Transforms"),eq.forEach(t),y9=o(_5," by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon."),_5.forEach(t),A9=s(c),Ua=a(c,"LI",{});var m5=l(Ua);m3=a(m5,"STRONG",{});var tq=l(m3);Ad=a(tq,"A",{href:!0});var rq=l(Ad);R9=o(rq,"Funnel Transformer"),rq.forEach(t),tq.forEach(t),L9=o(m5," (from CMU/Google Brain) released with the paper "),to=a(m5,"A",{href:!0,rel:!0});var aq=l(to);b9=o(aq,"Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing"),aq.forEach(t),S9=o(m5," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),m5.forEach(t),w9=s(c),qa=a(c,"LI",{});var p5=l(qa);p3=a(p5,"STRONG",{});var lq=l(p3);Rd=a(lq,"A",{href:!0});var nq=l(Rd);M9=o(nq,"GPT"),nq.forEach(t),lq.forEach(t),P9=o(p5," (from OpenAI) released with the paper "),ro=a(p5,"A",{href:!0,rel:!0});var oq=l(ro);G9=o(oq,"Improving Language Understanding by Generative Pre-Training"),oq.forEach(t),k9=o(p5," by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever."),p5.forEach(t),B9=s(c),st=a(c,"LI",{});var ua=l(st);D3=a(ua,"STRONG",{});var iq=l(D3);Ld=a(iq,"A",{href:!0});var dq=l(Ld);C9=o(dq,"GPT-2"),dq.forEach(t),iq.forEach(t),N9=o(ua," (from OpenAI) released with the paper "),ao=a(ua,"A",{href:!0,rel:!0});var sq=l(ao);I9=o(sq,"Language Models are Unsupervised Multitask Learners"),sq.forEach(t),x9=o(ua," by Alec Radford"),y3=a(ua,"EM",{});var hq=l(y3);O9=o(hq,", Jeffrey Wu"),hq.forEach(t),F9=o(ua,", Rewon Child, David Luan, Dario Amodei"),A3=a(ua,"STRONG",{});var cq=l(A3);W9=o(cq,"and Ilya Sutskever"),cq.forEach(t),Y9=o(ua,"."),ua.forEach(t),J9=s(c),Qa=a(c,"LI",{});var D5=l(Qa);R3=a(D5,"STRONG",{});var gq=l(R3);bd=a(gq,"A",{href:!0});var fq=l(bd);Z9=o(fq,"GPT-J"),fq.forEach(t),gq.forEach(t),V9=o(D5," (from EleutherAI) released in the repository "),lo=a(D5,"A",{href:!0,rel:!0});var uq=l(lo);K9=o(uq,"kingoflolz/mesh-transformer-jax"),uq.forEach(t),X9=o(D5," by Ben Wang and Aran Komatsuzaki."),D5.forEach(t),z9=s(c),ja=a(c,"LI",{});var y5=l(ja);L3=a(y5,"STRONG",{});var vq=l(L3);Sd=a(vq,"A",{href:!0});var Eq=l(Sd);H9=o(Eq,"GPT Neo"),Eq.forEach(t),vq.forEach(t),U9=o(y5," (from EleutherAI) released in the repository "),no=a(y5,"A",{href:!0,rel:!0});var Tq=l(no);q9=o(Tq,"EleutherAI/gpt-neo"),Tq.forEach(t),Q9=o(y5," by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy."),y5.forEach(t),j9=s(c),$a=a(c,"LI",{});var A5=l($a);b3=a(A5,"STRONG",{});var _q=l(b3);wd=a(_q,"A",{href:!0});var mq=l(wd);$9=o(mq,"Hubert"),mq.forEach(t),_q.forEach(t),e8=o(A5," (from Facebook) released with the paper "),oo=a(A5,"A",{href:!0,rel:!0});var pq=l(oo);t8=o(pq,"HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),pq.forEach(t),r8=o(A5," by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed."),A5.forEach(t),a8=s(c),el=a(c,"LI",{});var R5=l(el);S3=a(R5,"STRONG",{});var Dq=l(S3);Md=a(Dq,"A",{href:!0});var yq=l(Md);l8=o(yq,"I-BERT"),yq.forEach(t),Dq.forEach(t),n8=o(R5," (from Berkeley) released with the paper "),io=a(R5,"A",{href:!0,rel:!0});var Aq=l(io);o8=o(Aq,"I-BERT: Integer-only BERT Quantization"),Aq.forEach(t),i8=o(R5," by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer."),R5.forEach(t),d8=s(c),tl=a(c,"LI",{});var L5=l(tl);w3=a(L5,"STRONG",{});var Rq=l(w3);Pd=a(Rq,"A",{href:!0});var Lq=l(Pd);s8=o(Lq,"ImageGPT"),Lq.forEach(t),Rq.forEach(t),h8=o(L5," (from OpenAI) released with the paper "),so=a(L5,"A",{href:!0,rel:!0});var bq=l(so);c8=o(bq,"Generative Pretraining from Pixels"),bq.forEach(t),g8=o(L5," by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever."),L5.forEach(t),f8=s(c),rl=a(c,"LI",{});var b5=l(rl);M3=a(b5,"STRONG",{});var Sq=l(M3);Gd=a(Sq,"A",{href:!0});var wq=l(Gd);u8=o(wq,"LayoutLM"),wq.forEach(t),Sq.forEach(t),v8=o(b5," (from Microsoft Research Asia) released with the paper "),ho=a(b5,"A",{href:!0,rel:!0});var Mq=l(ho);E8=o(Mq,"LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),Mq.forEach(t),T8=o(b5," by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou."),b5.forEach(t),_8=s(c),al=a(c,"LI",{});var S5=l(al);P3=a(S5,"STRONG",{});var Pq=l(P3);kd=a(Pq,"A",{href:!0});var Gq=l(kd);m8=o(Gq,"LayoutLMv2"),Gq.forEach(t),Pq.forEach(t),p8=o(S5," (from Microsoft Research Asia) released with the paper "),co=a(S5,"A",{href:!0,rel:!0});var kq=l(co);D8=o(kq,"LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding"),kq.forEach(t),y8=o(S5," by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou."),S5.forEach(t),A8=s(c),ll=a(c,"LI",{});var w5=l(ll);G3=a(w5,"STRONG",{});var Bq=l(G3);Bd=a(Bq,"A",{href:!0});var Cq=l(Bd);R8=o(Cq,"LayoutXLM"),Cq.forEach(t),Bq.forEach(t),L8=o(w5," (from Microsoft Research Asia) released with the paper "),go=a(w5,"A",{href:!0,rel:!0});var Nq=l(go);b8=o(Nq,"LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding"),Nq.forEach(t),S8=o(w5," by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei."),w5.forEach(t),w8=s(c),nl=a(c,"LI",{});var M5=l(nl);k3=a(M5,"STRONG",{});var Iq=l(k3);Cd=a(Iq,"A",{href:!0});var xq=l(Cd);M8=o(xq,"LED"),xq.forEach(t),Iq.forEach(t),P8=o(M5," (from AllenAI) released with the paper "),fo=a(M5,"A",{href:!0,rel:!0});var Oq=l(fo);G8=o(Oq,"Longformer: The Long-Document Transformer"),Oq.forEach(t),k8=o(M5," by Iz Beltagy, Matthew E. Peters, Arman Cohan."),M5.forEach(t),B8=s(c),ol=a(c,"LI",{});var P5=l(ol);B3=a(P5,"STRONG",{});var Fq=l(B3);Nd=a(Fq,"A",{href:!0});var Wq=l(Nd);C8=o(Wq,"Longformer"),Wq.forEach(t),Fq.forEach(t),N8=o(P5," (from AllenAI) released with the paper "),uo=a(P5,"A",{href:!0,rel:!0});var Yq=l(uo);I8=o(Yq,"Longformer: The Long-Document Transformer"),Yq.forEach(t),x8=o(P5," by Iz Beltagy, Matthew E. Peters, Arman Cohan."),P5.forEach(t),O8=s(c),il=a(c,"LI",{});var G5=l(il);C3=a(G5,"STRONG",{});var Jq=l(C3);Id=a(Jq,"A",{href:!0});var Zq=l(Id);F8=o(Zq,"LUKE"),Zq.forEach(t),Jq.forEach(t),W8=o(G5," (from Studio Ousia) released with the paper "),vo=a(G5,"A",{href:!0,rel:!0});var Vq=l(vo);Y8=o(Vq,"LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),Vq.forEach(t),J8=o(G5," by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto."),G5.forEach(t),Z8=s(c),dl=a(c,"LI",{});var k5=l(dl);N3=a(k5,"STRONG",{});var Kq=l(N3);xd=a(Kq,"A",{href:!0});var Xq=l(xd);V8=o(Xq,"mLUKE"),Xq.forEach(t),Kq.forEach(t),K8=o(k5," (from Studio Ousia) released with the paper "),Eo=a(k5,"A",{href:!0,rel:!0});var zq=l(Eo);X8=o(zq,"mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),zq.forEach(t),z8=o(k5," by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka."),k5.forEach(t),H8=s(c),sl=a(c,"LI",{});var B5=l(sl);I3=a(B5,"STRONG",{});var Hq=l(I3);Od=a(Hq,"A",{href:!0});var Uq=l(Od);U8=o(Uq,"LXMERT"),Uq.forEach(t),Hq.forEach(t),q8=o(B5," (from UNC Chapel Hill) released with the paper "),To=a(B5,"A",{href:!0,rel:!0});var qq=l(To);Q8=o(qq,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering"),qq.forEach(t),j8=o(B5," by Hao Tan and Mohit Bansal."),B5.forEach(t),$8=s(c),hl=a(c,"LI",{});var C5=l(hl);x3=a(C5,"STRONG",{});var Qq=l(x3);Fd=a(Qq,"A",{href:!0});var jq=l(Fd);eD=o(jq,"M2M100"),jq.forEach(t),Qq.forEach(t),tD=o(C5," (from Facebook) released with the paper "),_o=a(C5,"A",{href:!0,rel:!0});var $q=l(_o);rD=o($q,"Beyond English-Centric Multilingual Machine Translation"),$q.forEach(t),aD=o(C5," by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin."),C5.forEach(t),lD=s(c),aa=a(c,"LI",{});var Li=l(aa);O3=a(Li,"STRONG",{});var eQ=l(O3);Wd=a(eQ,"A",{href:!0});var tQ=l(Wd);nD=o(tQ,"MarianMT"),tQ.forEach(t),eQ.forEach(t),oD=o(Li," Machine translation models trained using "),mo=a(Li,"A",{href:!0,rel:!0});var rQ=l(mo);iD=o(rQ,"OPUS"),rQ.forEach(t),dD=o(Li," data by J\xF6rg Tiedemann. The "),po=a(Li,"A",{href:!0,rel:!0});var aQ=l(po);sD=o(aQ,"Marian Framework"),aQ.forEach(t),hD=o(Li," is being developed by the Microsoft Translator Team."),Li.forEach(t),cD=s(c),cl=a(c,"LI",{});var N5=l(cl);F3=a(N5,"STRONG",{});var lQ=l(F3);Yd=a(lQ,"A",{href:!0});var nQ=l(Yd);gD=o(nQ,"MaskFormer"),nQ.forEach(t),lQ.forEach(t),fD=o(N5," (from Meta and UIUC) released with the paper "),Do=a(N5,"A",{href:!0,rel:!0});var oQ=l(Do);uD=o(oQ,"Per-Pixel Classification is Not All You Need for Semantic Segmentation"),oQ.forEach(t),vD=o(N5," by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov."),N5.forEach(t),ED=s(c),gl=a(c,"LI",{});var I5=l(gl);W3=a(I5,"STRONG",{});var iQ=l(W3);Jd=a(iQ,"A",{href:!0});var dQ=l(Jd);TD=o(dQ,"MBart"),dQ.forEach(t),iQ.forEach(t),_D=o(I5," (from Facebook) released with the paper "),yo=a(I5,"A",{href:!0,rel:!0});var sQ=l(yo);mD=o(sQ,"Multilingual Denoising Pre-training for Neural Machine Translation"),sQ.forEach(t),pD=o(I5," by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer."),I5.forEach(t),DD=s(c),fl=a(c,"LI",{});var x5=l(fl);Y3=a(x5,"STRONG",{});var hQ=l(Y3);Zd=a(hQ,"A",{href:!0});var cQ=l(Zd);yD=o(cQ,"MBart-50"),cQ.forEach(t),hQ.forEach(t),AD=o(x5," (from Facebook) released with the paper "),Ao=a(x5,"A",{href:!0,rel:!0});var gQ=l(Ao);RD=o(gQ,"Multilingual Translation with Extensible Multilingual Pretraining and Finetuning"),gQ.forEach(t),LD=o(x5," by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan."),x5.forEach(t),bD=s(c),ul=a(c,"LI",{});var O5=l(ul);J3=a(O5,"STRONG",{});var fQ=l(J3);Vd=a(fQ,"A",{href:!0});var uQ=l(Vd);SD=o(uQ,"Megatron-BERT"),uQ.forEach(t),fQ.forEach(t),wD=o(O5," (from NVIDIA) released with the paper "),Ro=a(O5,"A",{href:!0,rel:!0});var vQ=l(Ro);MD=o(vQ,"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),vQ.forEach(t),PD=o(O5," by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),O5.forEach(t),GD=s(c),vl=a(c,"LI",{});var F5=l(vl);Z3=a(F5,"STRONG",{});var EQ=l(Z3);Kd=a(EQ,"A",{href:!0});var TQ=l(Kd);kD=o(TQ,"Megatron-GPT2"),TQ.forEach(t),EQ.forEach(t),BD=o(F5," (from NVIDIA) released with the paper "),Lo=a(F5,"A",{href:!0,rel:!0});var _Q=l(Lo);CD=o(_Q,"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),_Q.forEach(t),ND=o(F5," by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),F5.forEach(t),ID=s(c),El=a(c,"LI",{});var W5=l(El);V3=a(W5,"STRONG",{});var mQ=l(V3);Xd=a(mQ,"A",{href:!0});var pQ=l(Xd);xD=o(pQ,"MPNet"),pQ.forEach(t),mQ.forEach(t),OD=o(W5," (from Microsoft Research) released with the paper "),bo=a(W5,"A",{href:!0,rel:!0});var DQ=l(bo);FD=o(DQ,"MPNet: Masked and Permuted Pre-training for Language Understanding"),DQ.forEach(t),WD=o(W5," by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu."),W5.forEach(t),YD=s(c),Tl=a(c,"LI",{});var Y5=l(Tl);K3=a(Y5,"STRONG",{});var yQ=l(K3);zd=a(yQ,"A",{href:!0});var AQ=l(zd);JD=o(AQ,"MT5"),AQ.forEach(t),yQ.forEach(t),ZD=o(Y5," (from Google AI) released with the paper "),So=a(Y5,"A",{href:!0,rel:!0});var RQ=l(So);VD=o(RQ,"mT5: A massively multilingual pre-trained text-to-text transformer"),RQ.forEach(t),KD=o(Y5," by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel."),Y5.forEach(t),XD=s(c),_l=a(c,"LI",{});var J5=l(_l);X3=a(J5,"STRONG",{});var LQ=l(X3);Hd=a(LQ,"A",{href:!0});var bQ=l(Hd);zD=o(bQ,"Nystr\xF6mformer"),bQ.forEach(t),LQ.forEach(t),HD=o(J5," (from the University of Wisconsin - Madison) released with the paper "),wo=a(J5,"A",{href:!0,rel:!0});var SQ=l(wo);UD=o(SQ,"Nystr\xF6mformer: A Nystr\xF6m-Based Algorithm for Approximating Self-Attention"),SQ.forEach(t),qD=o(J5," by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh."),J5.forEach(t),QD=s(c),ml=a(c,"LI",{});var Z5=l(ml);z3=a(Z5,"STRONG",{});var wQ=l(z3);Ud=a(wQ,"A",{href:!0});var MQ=l(Ud);jD=o(MQ,"Pegasus"),MQ.forEach(t),wQ.forEach(t),$D=o(Z5," (from Google) released with the paper "),Mo=a(Z5,"A",{href:!0,rel:!0});var PQ=l(Mo);ey=o(PQ,"PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization"),PQ.forEach(t),ty=o(Z5," by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu."),Z5.forEach(t),ry=s(c),pl=a(c,"LI",{});var V5=l(pl);H3=a(V5,"STRONG",{});var GQ=l(H3);qd=a(GQ,"A",{href:!0});var kQ=l(qd);ay=o(kQ,"Perceiver IO"),kQ.forEach(t),GQ.forEach(t),ly=o(V5," (from Deepmind) released with the paper "),Po=a(V5,"A",{href:!0,rel:!0});var BQ=l(Po);ny=o(BQ,"Perceiver IO: A General Architecture for Structured Inputs & Outputs"),BQ.forEach(t),oy=o(V5," by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier H\xE9naff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, Jo\xE3o Carreira."),V5.forEach(t),iy=s(c),Dl=a(c,"LI",{});var K5=l(Dl);U3=a(K5,"STRONG",{});var CQ=l(U3);Qd=a(CQ,"A",{href:!0});var NQ=l(Qd);dy=o(NQ,"PhoBERT"),NQ.forEach(t),CQ.forEach(t),sy=o(K5," (from VinAI Research) released with the paper "),Go=a(K5,"A",{href:!0,rel:!0});var IQ=l(Go);hy=o(IQ,"PhoBERT: Pre-trained language models for Vietnamese"),IQ.forEach(t),cy=o(K5," by Dat Quoc Nguyen and Anh Tuan Nguyen."),K5.forEach(t),gy=s(c),yl=a(c,"LI",{});var X5=l(yl);q3=a(X5,"STRONG",{});var xQ=l(q3);jd=a(xQ,"A",{href:!0});var OQ=l(jd);fy=o(OQ,"PLBart"),OQ.forEach(t),xQ.forEach(t),uy=o(X5," (from UCLA NLP) released with the paper "),ko=a(X5,"A",{href:!0,rel:!0});var FQ=l(ko);vy=o(FQ,"Unified Pre-training for Program Understanding and Generation"),FQ.forEach(t),Ey=o(X5," by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang."),X5.forEach(t),Ty=s(c),Al=a(c,"LI",{});var z5=l(Al);Q3=a(z5,"STRONG",{});var WQ=l(Q3);$d=a(WQ,"A",{href:!0});var YQ=l($d);_y=o(YQ,"PoolFormer"),YQ.forEach(t),WQ.forEach(t),my=o(z5," (from Sea AI Labs) released with the paper "),Bo=a(z5,"A",{href:!0,rel:!0});var JQ=l(Bo);py=o(JQ,"MetaFormer is Actually What You Need for Vision"),JQ.forEach(t),Dy=o(z5," by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng."),z5.forEach(t),yy=s(c),Rl=a(c,"LI",{});var H5=l(Rl);j3=a(H5,"STRONG",{});var ZQ=l(j3);es=a(ZQ,"A",{href:!0});var VQ=l(es);Ay=o(VQ,"ProphetNet"),VQ.forEach(t),ZQ.forEach(t),Ry=o(H5," (from Microsoft Research) released with the paper "),Co=a(H5,"A",{href:!0,rel:!0});var KQ=l(Co);Ly=o(KQ,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),KQ.forEach(t),by=o(H5," by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),H5.forEach(t),Sy=s(c),Ll=a(c,"LI",{});var U5=l(Ll);$3=a(U5,"STRONG",{});var XQ=l($3);ts=a(XQ,"A",{href:!0});var zQ=l(ts);wy=o(zQ,"QDQBert"),zQ.forEach(t),XQ.forEach(t),My=o(U5," (from NVIDIA) released with the paper "),No=a(U5,"A",{href:!0,rel:!0});var HQ=l(No);Py=o(HQ,"Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation"),HQ.forEach(t),Gy=o(U5," by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius."),U5.forEach(t),ky=s(c),bl=a(c,"LI",{});var q5=l(bl);e4=a(q5,"STRONG",{});var UQ=l(e4);Io=a(UQ,"A",{href:!0,rel:!0});var qQ=l(Io);By=o(qQ,"REALM"),qQ.forEach(t),UQ.forEach(t),Cy=o(q5," (from Google Research) released with the paper "),xo=a(q5,"A",{href:!0,rel:!0});var QQ=l(xo);Ny=o(QQ,"REALM: Retrieval-Augmented Language Model Pre-Training"),QQ.forEach(t),Iy=o(q5," by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang."),q5.forEach(t),xy=s(c),Sl=a(c,"LI",{});var Q5=l(Sl);t4=a(Q5,"STRONG",{});var jQ=l(t4);rs=a(jQ,"A",{href:!0});var $Q=l(rs);Oy=o($Q,"Reformer"),$Q.forEach(t),jQ.forEach(t),Fy=o(Q5," (from Google Research) released with the paper "),Oo=a(Q5,"A",{href:!0,rel:!0});var ej=l(Oo);Wy=o(ej,"Reformer: The Efficient Transformer"),ej.forEach(t),Yy=o(Q5," by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya."),Q5.forEach(t),Jy=s(c),wl=a(c,"LI",{});var j5=l(wl);r4=a(j5,"STRONG",{});var tj=l(r4);as=a(tj,"A",{href:!0});var rj=l(as);Zy=o(rj,"RemBERT"),rj.forEach(t),tj.forEach(t),Vy=o(j5," (from Google Research) released with the paper "),Fo=a(j5,"A",{href:!0,rel:!0});var aj=l(Fo);Ky=o(aj,"Rethinking embedding coupling in pre-trained language models"),aj.forEach(t),Xy=o(j5," by Hyung Won Chung, Thibault F\xE9vry, Henry Tsai, M. Johnson, Sebastian Ruder."),j5.forEach(t),zy=s(c),Ml=a(c,"LI",{});var $5=l(Ml);a4=a($5,"STRONG",{});var lj=l(a4);ls=a(lj,"A",{href:!0});var nj=l(ls);Hy=o(nj,"RoBERTa"),nj.forEach(t),lj.forEach(t),Uy=o($5," (from Facebook), released together with the paper "),Wo=a($5,"A",{href:!0,rel:!0});var oj=l(Wo);qy=o(oj,"RoBERTa: A Robustly Optimized BERT Pretraining Approach"),oj.forEach(t),Qy=o($5," by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov."),$5.forEach(t),jy=s(c),Pl=a(c,"LI",{});var em=l(Pl);l4=a(em,"STRONG",{});var ij=l(l4);ns=a(ij,"A",{href:!0});var dj=l(ns);$y=o(dj,"RoFormer"),dj.forEach(t),ij.forEach(t),eA=o(em," (from ZhuiyiTechnology), released together with the paper "),Yo=a(em,"A",{href:!0,rel:!0});var sj=l(Yo);tA=o(sj,"RoFormer: Enhanced Transformer with Rotary Position Embedding"),sj.forEach(t),rA=o(em," by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu."),em.forEach(t),aA=s(c),Gl=a(c,"LI",{});var tm=l(Gl);n4=a(tm,"STRONG",{});var hj=l(n4);os=a(hj,"A",{href:!0});var cj=l(os);lA=o(cj,"SegFormer"),cj.forEach(t),hj.forEach(t),nA=o(tm," (from NVIDIA) released with the paper "),Jo=a(tm,"A",{href:!0,rel:!0});var gj=l(Jo);oA=o(gj,"SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),gj.forEach(t),iA=o(tm," by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo."),tm.forEach(t),dA=s(c),kl=a(c,"LI",{});var rm=l(kl);o4=a(rm,"STRONG",{});var fj=l(o4);is=a(fj,"A",{href:!0});var uj=l(is);sA=o(uj,"SEW"),uj.forEach(t),fj.forEach(t),hA=o(rm," (from ASAPP) released with the paper "),Zo=a(rm,"A",{href:!0,rel:!0});var vj=l(Zo);cA=o(vj,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),vj.forEach(t),gA=o(rm," by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),rm.forEach(t),fA=s(c),Bl=a(c,"LI",{});var am=l(Bl);i4=a(am,"STRONG",{});var Ej=l(i4);ds=a(Ej,"A",{href:!0});var Tj=l(ds);uA=o(Tj,"SEW-D"),Tj.forEach(t),Ej.forEach(t),vA=o(am," (from ASAPP) released with the paper "),Vo=a(am,"A",{href:!0,rel:!0});var _j=l(Vo);EA=o(_j,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),_j.forEach(t),TA=o(am," by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),am.forEach(t),_A=s(c),Cl=a(c,"LI",{});var lm=l(Cl);d4=a(lm,"STRONG",{});var mj=l(d4);ss=a(mj,"A",{href:!0});var pj=l(ss);mA=o(pj,"SpeechToTextTransformer"),pj.forEach(t),mj.forEach(t),pA=o(lm," (from Facebook), released together with the paper "),Ko=a(lm,"A",{href:!0,rel:!0});var Dj=l(Ko);DA=o(Dj,"fairseq S2T: Fast Speech-to-Text Modeling with fairseq"),Dj.forEach(t),yA=o(lm," by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino."),lm.forEach(t),AA=s(c),Nl=a(c,"LI",{});var nm=l(Nl);s4=a(nm,"STRONG",{});var yj=l(s4);hs=a(yj,"A",{href:!0});var Aj=l(hs);RA=o(Aj,"SpeechToTextTransformer2"),Aj.forEach(t),yj.forEach(t),LA=o(nm," (from Facebook), released together with the paper "),Xo=a(nm,"A",{href:!0,rel:!0});var Rj=l(Xo);bA=o(Rj,"Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),Rj.forEach(t),SA=o(nm," by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau."),nm.forEach(t),wA=s(c),Il=a(c,"LI",{});var om=l(Il);h4=a(om,"STRONG",{});var Lj=l(h4);cs=a(Lj,"A",{href:!0});var bj=l(cs);MA=o(bj,"Splinter"),bj.forEach(t),Lj.forEach(t),PA=o(om," (from Tel Aviv University), released together with the paper "),zo=a(om,"A",{href:!0,rel:!0});var Sj=l(zo);GA=o(Sj,"Few-Shot Question Answering by Pretraining Span Selection"),Sj.forEach(t),kA=o(om," by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy."),om.forEach(t),BA=s(c),xl=a(c,"LI",{});var im=l(xl);c4=a(im,"STRONG",{});var wj=l(c4);gs=a(wj,"A",{href:!0});var Mj=l(gs);CA=o(Mj,"SqueezeBert"),Mj.forEach(t),wj.forEach(t),NA=o(im," (from Berkeley) released with the paper "),Ho=a(im,"A",{href:!0,rel:!0});var Pj=l(Ho);IA=o(Pj,"SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),Pj.forEach(t),xA=o(im," by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer."),im.forEach(t),OA=s(c),Ol=a(c,"LI",{});var dm=l(Ol);g4=a(dm,"STRONG",{});var Gj=l(g4);fs=a(Gj,"A",{href:!0});var kj=l(fs);FA=o(kj,"Swin Transformer"),kj.forEach(t),Gj.forEach(t),WA=o(dm," (from Microsoft) released with the paper "),Uo=a(dm,"A",{href:!0,rel:!0});var Bj=l(Uo);YA=o(Bj,"Swin Transformer: Hierarchical Vision Transformer using Shifted Windows"),Bj.forEach(t),JA=o(dm," by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo."),dm.forEach(t),ZA=s(c),Fl=a(c,"LI",{});var sm=l(Fl);f4=a(sm,"STRONG",{});var Cj=l(f4);us=a(Cj,"A",{href:!0});var Nj=l(us);VA=o(Nj,"T5"),Nj.forEach(t),Cj.forEach(t),KA=o(sm," (from Google AI) released with the paper "),qo=a(sm,"A",{href:!0,rel:!0});var Ij=l(qo);XA=o(Ij,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),Ij.forEach(t),zA=o(sm," by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),sm.forEach(t),HA=s(c),Wl=a(c,"LI",{});var hm=l(Wl);u4=a(hm,"STRONG",{});var xj=l(u4);vs=a(xj,"A",{href:!0});var Oj=l(vs);UA=o(Oj,"T5v1.1"),Oj.forEach(t),xj.forEach(t),qA=o(hm," (from Google AI) released in the repository "),Qo=a(hm,"A",{href:!0,rel:!0});var Fj=l(Qo);QA=o(Fj,"google-research/text-to-text-transfer-transformer"),Fj.forEach(t),jA=o(hm," by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),hm.forEach(t),$A=s(c),Yl=a(c,"LI",{});var cm=l(Yl);v4=a(cm,"STRONG",{});var Wj=l(v4);Es=a(Wj,"A",{href:!0});var Yj=l(Es);eR=o(Yj,"TAPAS"),Yj.forEach(t),Wj.forEach(t),tR=o(cm," (from Google AI) released with the paper "),jo=a(cm,"A",{href:!0,rel:!0});var Jj=l(jo);rR=o(Jj,"TAPAS: Weakly Supervised Table Parsing via Pre-training"),Jj.forEach(t),aR=o(cm," by Jonathan Herzig, Pawe\u0142 Krzysztof Nowak, Thomas M\xFCller, Francesco Piccinno and Julian Martin Eisenschlos."),cm.forEach(t),lR=s(c),la=a(c,"LI",{});var bi=l(la);E4=a(bi,"STRONG",{});var Zj=l(E4);Ts=a(Zj,"A",{href:!0});var Vj=l(Ts);nR=o(Vj,"Transformer-XL"),Vj.forEach(t),Zj.forEach(t),oR=o(bi," (from Google/CMU) released with the paper "),$o=a(bi,"A",{href:!0,rel:!0});var Kj=l($o);iR=o(Kj,"Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),Kj.forEach(t),dR=o(bi," by Zihang Dai"),T4=a(bi,"EM",{});var Xj=l(T4);sR=o(Xj,", Zhilin Yang"),Xj.forEach(t),hR=o(bi,", Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov."),bi.forEach(t),cR=s(c),Jl=a(c,"LI",{});var gm=l(Jl);_4=a(gm,"STRONG",{});var zj=l(_4);_s=a(zj,"A",{href:!0});var Hj=l(_s);gR=o(Hj,"TrOCR"),Hj.forEach(t),zj.forEach(t),fR=o(gm," (from Microsoft), released together with the paper "),ei=a(gm,"A",{href:!0,rel:!0});var Uj=l(ei);uR=o(Uj,"TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Uj.forEach(t),vR=o(gm," by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei."),gm.forEach(t),ER=s(c),Zl=a(c,"LI",{});var fm=l(Zl);m4=a(fm,"STRONG",{});var qj=l(m4);ms=a(qj,"A",{href:!0});var Qj=l(ms);TR=o(Qj,"UniSpeech"),Qj.forEach(t),qj.forEach(t),_R=o(fm," (from Microsoft Research) released with the paper "),ti=a(fm,"A",{href:!0,rel:!0});var jj=l(ti);mR=o(jj,"UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),jj.forEach(t),pR=o(fm," by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang."),fm.forEach(t),DR=s(c),Vl=a(c,"LI",{});var um=l(Vl);p4=a(um,"STRONG",{});var $j=l(p4);ps=a($j,"A",{href:!0});var e$=l(ps);yR=o(e$,"UniSpeechSat"),e$.forEach(t),$j.forEach(t),AR=o(um," (from Microsoft Research) released with the paper "),ri=a(um,"A",{href:!0,rel:!0});var t$=l(ri);RR=o(t$,"UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING"),t$.forEach(t),LR=o(um," by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu."),um.forEach(t),bR=s(c),Kl=a(c,"LI",{});var vm=l(Kl);D4=a(vm,"STRONG",{});var r$=l(D4);Ds=a(r$,"A",{href:!0});var a$=l(Ds);SR=o(a$,"ViLT"),a$.forEach(t),r$.forEach(t),wR=o(vm," (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper "),ai=a(vm,"A",{href:!0,rel:!0});var l$=l(ai);MR=o(l$,"ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision"),l$.forEach(t),PR=o(vm," by Wonjae Kim, Bokyung Son, Ildoo Kim."),vm.forEach(t),GR=s(c),Xl=a(c,"LI",{});var Em=l(Xl);y4=a(Em,"STRONG",{});var n$=l(y4);ys=a(n$,"A",{href:!0});var o$=l(ys);kR=o(o$,"Vision Transformer (ViT)"),o$.forEach(t),n$.forEach(t),BR=o(Em," (from Google AI) released with the paper "),li=a(Em,"A",{href:!0,rel:!0});var i$=l(li);CR=o(i$,"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale"),i$.forEach(t),NR=o(Em," by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby."),Em.forEach(t),IR=s(c),zl=a(c,"LI",{});var Tm=l(zl);A4=a(Tm,"STRONG",{});var d$=l(A4);As=a(d$,"A",{href:!0});var s$=l(As);xR=o(s$,"ViTMAE"),s$.forEach(t),d$.forEach(t),OR=o(Tm," (from Meta AI) released with the paper "),ni=a(Tm,"A",{href:!0,rel:!0});var h$=l(ni);FR=o(h$,"Masked Autoencoders Are Scalable Vision Learners"),h$.forEach(t),WR=o(Tm," by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll\xE1r, Ross Girshick."),Tm.forEach(t),YR=s(c),Hl=a(c,"LI",{});var _m=l(Hl);R4=a(_m,"STRONG",{});var c$=l(R4);Rs=a(c$,"A",{href:!0});var g$=l(Rs);JR=o(g$,"VisualBERT"),g$.forEach(t),c$.forEach(t),ZR=o(_m," (from UCLA NLP) released with the paper "),oi=a(_m,"A",{href:!0,rel:!0});var f$=l(oi);VR=o(f$,"VisualBERT: A Simple and Performant Baseline for Vision and Language"),f$.forEach(t),KR=o(_m," by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang."),_m.forEach(t),XR=s(c),Ul=a(c,"LI",{});var mm=l(Ul);L4=a(mm,"STRONG",{});var u$=l(L4);Ls=a(u$,"A",{href:!0});var v$=l(Ls);zR=o(v$,"WavLM"),v$.forEach(t),u$.forEach(t),HR=o(mm," (from Microsoft Research) released with the paper "),ii=a(mm,"A",{href:!0,rel:!0});var E$=l(ii);UR=o(E$,"WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing"),E$.forEach(t),qR=o(mm," by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei."),mm.forEach(t),QR=s(c),ql=a(c,"LI",{});var pm=l(ql);b4=a(pm,"STRONG",{});var T$=l(b4);bs=a(T$,"A",{href:!0});var _$=l(bs);jR=o(_$,"Wav2Vec2"),_$.forEach(t),T$.forEach(t),$R=o(pm," (from Facebook AI) released with the paper "),di=a(pm,"A",{href:!0,rel:!0});var m$=l(di);eL=o(m$,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),m$.forEach(t),tL=o(pm," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),pm.forEach(t),rL=s(c),Ql=a(c,"LI",{});var Dm=l(Ql);S4=a(Dm,"STRONG",{});var p$=l(S4);si=a(p$,"A",{href:!0,rel:!0});var D$=l(si);aL=o(D$,"Wav2Vec2Phoneme"),D$.forEach(t),p$.forEach(t),lL=o(Dm," (from Facebook AI) released with the paper "),hi=a(Dm,"A",{href:!0,rel:!0});var y$=l(hi);nL=o(y$,"Simple and Effective Zero-shot Cross-lingual Phoneme Recognition"),y$.forEach(t),oL=o(Dm," by Qiantong Xu, Alexei Baevski, Michael Auli."),Dm.forEach(t),iL=s(c),jl=a(c,"LI",{});var ym=l(jl);w4=a(ym,"STRONG",{});var A$=l(w4);ci=a(A$,"A",{href:!0,rel:!0});var R$=l(ci);dL=o(R$,"XGLM"),R$.forEach(t),A$.forEach(t),sL=o(ym," (From Facebook AI) released with the paper "),gi=a(ym,"A",{href:!0,rel:!0});var L$=l(gi);hL=o(L$,"Few-shot Learning with Multilingual Language Models"),L$.forEach(t),cL=o(ym," by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O\u2019Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li."),ym.forEach(t),gL=s(c),$l=a(c,"LI",{});var Am=l($l);M4=a(Am,"STRONG",{});var b$=l(M4);Ss=a(b$,"A",{href:!0});var S$=l(Ss);fL=o(S$,"XLM"),S$.forEach(t),b$.forEach(t),uL=o(Am," (from Facebook) released together with the paper "),fi=a(Am,"A",{href:!0,rel:!0});var w$=l(fi);vL=o(w$,"Cross-lingual Language Model Pretraining"),w$.forEach(t),EL=o(Am," by Guillaume Lample and Alexis Conneau."),Am.forEach(t),TL=s(c),en=a(c,"LI",{});var Rm=l(en);P4=a(Rm,"STRONG",{});var M$=l(P4);ws=a(M$,"A",{href:!0});var P$=l(ws);_L=o(P$,"XLM-ProphetNet"),P$.forEach(t),M$.forEach(t),mL=o(Rm," (from Microsoft Research) released with the paper "),ui=a(Rm,"A",{href:!0,rel:!0});var G$=l(ui);pL=o(G$,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),G$.forEach(t),DL=o(Rm," by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),Rm.forEach(t),yL=s(c),na=a(c,"LI",{});var Si=l(na);G4=a(Si,"STRONG",{});var k$=l(G4);Ms=a(k$,"A",{href:!0});var B$=l(Ms);AL=o(B$,"XLM-RoBERTa"),B$.forEach(t),k$.forEach(t),RL=o(Si," (from Facebook AI), released together with the paper "),vi=a(Si,"A",{href:!0,rel:!0});var C$=l(vi);LL=o(C$,"Unsupervised Cross-lingual Representation Learning at Scale"),C$.forEach(t),bL=o(Si," by Alexis Conneau"),k4=a(Si,"EM",{});var N$=l(k4);SL=o(N$,", Kartikay Khandelwal"),N$.forEach(t),wL=o(Si,", Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\xE1n, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov."),Si.forEach(t),ML=s(c),tn=a(c,"LI",{});var Lm=l(tn);B4=a(Lm,"STRONG",{});var I$=l(B4);Ps=a(I$,"A",{href:!0});var x$=l(Ps);PL=o(x$,"XLM-RoBERTa-XL"),x$.forEach(t),I$.forEach(t),GL=o(Lm," (from Facebook AI), released together with the paper "),Ei=a(Lm,"A",{href:!0,rel:!0});var O$=l(Ei);kL=o(O$,"Larger-Scale Transformers for Multilingual Masked Language Modeling"),O$.forEach(t),BL=o(Lm," by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau."),Lm.forEach(t),CL=s(c),oa=a(c,"LI",{});var wi=l(oa);C4=a(wi,"STRONG",{});var F$=l(C4);Gs=a(F$,"A",{href:!0});var W$=l(Gs);NL=o(W$,"XLNet"),W$.forEach(t),F$.forEach(t),IL=o(wi," (from Google/CMU) released with the paper "),Ti=a(wi,"A",{href:!0,rel:!0});var Y$=l(Ti);xL=o(Y$,"\u200BXLNet: Generalized Autoregressive Pretraining for Language Understanding"),Y$.forEach(t),OL=o(wi," by Zhilin Yang"),N4=a(wi,"EM",{});var J$=l(N4);FL=o(J$,", Zihang Dai"),J$.forEach(t),WL=o(wi,", Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le."),wi.forEach(t),YL=s(c),rn=a(c,"LI",{});var bm=l(rn);I4=a(bm,"STRONG",{});var Z$=l(I4);ks=a(Z$,"A",{href:!0});var V$=l(ks);JL=o(V$,"XLSR-Wav2Vec2"),V$.forEach(t),Z$.forEach(t),ZL=o(bm," (from Facebook AI) released with the paper "),_i=a(bm,"A",{href:!0,rel:!0});var K$=l(_i);VL=o(K$,"Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),K$.forEach(t),KL=o(bm," by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli."),bm.forEach(t),XL=s(c),an=a(c,"LI",{});var Sm=l(an);x4=a(Sm,"STRONG",{});var X$=l(x4);mi=a(X$,"A",{href:!0,rel:!0});var z$=l(mi);zL=o(z$,"XLS-R"),z$.forEach(t),X$.forEach(t),HL=o(Sm," (from Facebook AI) released with the paper "),pi=a(Sm,"A",{href:!0,rel:!0});var H$=l(pi);UL=o(H$,"XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale"),H$.forEach(t),qL=o(Sm," by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli."),Sm.forEach(t),QL=s(c),ln=a(c,"LI",{});var wm=l(ln);O4=a(wm,"STRONG",{});var U$=l(O4);Bs=a(U$,"A",{href:!0});var q$=l(Bs);jL=o(q$,"YOSO"),q$.forEach(t),U$.forEach(t),$L=o(wm," (from the University of Wisconsin - Madison) released with the paper "),Di=a(wm,"A",{href:!0,rel:!0});var Q$=l(Di);eb=o(Q$,"You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling"),Q$.forEach(t),tb=o(wm," by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh."),wm.forEach(t),c.forEach(t),qm=s(u),fa=a(u,"H3",{class:!0});var n0=l(fa);nn=a(n0,"A",{id:!0,class:!0,href:!0});var j$=l(nn);F4=a(j$,"SPAN",{});var $$=l(F4);Pm(yi.$$.fragment,$$),$$.forEach(t),j$.forEach(t),rb=s(n0),W4=a(n0,"SPAN",{});var eee=l(W4);ab=o(eee,"Supported frameworks"),eee.forEach(t),n0.forEach(t),Qm=s(u),Cs=a(u,"P",{});var tee=l(Cs);lb=o(tee,`The table below represents the current support in the library for each of those models, whether they have a Python tokenizer (called \u201Cslow\u201D). A \u201Cfast\u201D tokenizer backed by the \u{1F917} Tokenizers library, whether they have support in Jax (via Flax), PyTorch, and/or TensorFlow.`),tee.forEach(t),jm=s(u),on=a(u,"TABLE",{});var o0=l(on);Y4=a(o0,"THEAD",{});var ree=l(Y4);T=a(ree,"TR",{});var ht=l(T);Ns=a(ht,"TH",{align:!0});var aee=l(Ns);nb=o(aee,"Model"),aee.forEach(t),ob=s(ht),Is=a(ht,"TH",{align:!0});var lee=l(Is);ib=o(lee,"Tokenizer slow"),lee.forEach(t),db=s(ht),xs=a(ht,"TH",{align:!0});var nee=l(xs);sb=o(nee,"Tokenizer fast"),nee.forEach(t),hb=s(ht),Os=a(ht,"TH",{align:!0});var oee=l(Os);cb=o(oee,"PyTorch support"),oee.forEach(t),gb=s(ht),Fs=a(ht,"TH",{align:!0});var iee=l(Fs);fb=o(iee,"TensorFlow support"),iee.forEach(t),ub=s(ht),Ws=a(ht,"TH",{align:!0});var dee=l(Ws);vb=o(dee,"Flax Support"),dee.forEach(t),ht.forEach(t),ree.forEach(t),Eb=s(o0),g=a(o0,"TBODY",{});var f=l(g);_=a(f,"TR",{});var ct=l(_);Ys=a(ct,"TD",{align:!0});var see=l(Ys);Tb=o(see,"ALBERT"),see.forEach(t),_b=s(ct),Js=a(ct,"TD",{align:!0});var hee=l(Js);mb=o(hee,"\u2705"),hee.forEach(t),pb=s(ct),Zs=a(ct,"TD",{align:!0});var cee=l(Zs);Db=o(cee,"\u2705"),cee.forEach(t),yb=s(ct),Vs=a(ct,"TD",{align:!0});var gee=l(Vs);Ab=o(gee,"\u2705"),gee.forEach(t),Rb=s(ct),Ks=a(ct,"TD",{align:!0});var fee=l(Ks);Lb=o(fee,"\u2705"),fee.forEach(t),bb=s(ct),Xs=a(ct,"TD",{align:!0});var uee=l(Xs);Sb=o(uee,"\u2705"),uee.forEach(t),ct.forEach(t),wb=s(f),m=a(f,"TR",{});var gt=l(m);zs=a(gt,"TD",{align:!0});var vee=l(zs);Mb=o(vee,"BART"),vee.forEach(t),Pb=s(gt),Hs=a(gt,"TD",{align:!0});var Eee=l(Hs);Gb=o(Eee,"\u2705"),Eee.forEach(t),kb=s(gt),Us=a(gt,"TD",{align:!0});var Tee=l(Us);Bb=o(Tee,"\u2705"),Tee.forEach(t),Cb=s(gt),qs=a(gt,"TD",{align:!0});var _ee=l(qs);Nb=o(_ee,"\u2705"),_ee.forEach(t),Ib=s(gt),Qs=a(gt,"TD",{align:!0});var mee=l(Qs);xb=o(mee,"\u2705"),mee.forEach(t),Ob=s(gt),js=a(gt,"TD",{align:!0});var pee=l(js);Fb=o(pee,"\u2705"),pee.forEach(t),gt.forEach(t),Wb=s(f),p=a(f,"TR",{});var ft=l(p);$s=a(ft,"TD",{align:!0});var Dee=l($s);Yb=o(Dee,"BEiT"),Dee.forEach(t),Jb=s(ft),eh=a(ft,"TD",{align:!0});var yee=l(eh);Zb=o(yee,"\u274C"),yee.forEach(t),Vb=s(ft),th=a(ft,"TD",{align:!0});var Aee=l(th);Kb=o(Aee,"\u274C"),Aee.forEach(t),Xb=s(ft),rh=a(ft,"TD",{align:!0});var Ree=l(rh);zb=o(Ree,"\u2705"),Ree.forEach(t),Hb=s(ft),ah=a(ft,"TD",{align:!0});var Lee=l(ah);Ub=o(Lee,"\u274C"),Lee.forEach(t),qb=s(ft),lh=a(ft,"TD",{align:!0});var bee=l(lh);Qb=o(bee,"\u2705"),bee.forEach(t),ft.forEach(t),jb=s(f),D=a(f,"TR",{});var ut=l(D);nh=a(ut,"TD",{align:!0});var See=l(nh);$b=o(See,"BERT"),See.forEach(t),eS=s(ut),oh=a(ut,"TD",{align:!0});var wee=l(oh);tS=o(wee,"\u2705"),wee.forEach(t),rS=s(ut),ih=a(ut,"TD",{align:!0});var Mee=l(ih);aS=o(Mee,"\u2705"),Mee.forEach(t),lS=s(ut),dh=a(ut,"TD",{align:!0});var Pee=l(dh);nS=o(Pee,"\u2705"),Pee.forEach(t),oS=s(ut),sh=a(ut,"TD",{align:!0});var Gee=l(sh);iS=o(Gee,"\u2705"),Gee.forEach(t),dS=s(ut),hh=a(ut,"TD",{align:!0});var kee=l(hh);sS=o(kee,"\u2705"),kee.forEach(t),ut.forEach(t),hS=s(f),y=a(f,"TR",{});var vt=l(y);ch=a(vt,"TD",{align:!0});var Bee=l(ch);cS=o(Bee,"Bert Generation"),Bee.forEach(t),gS=s(vt),gh=a(vt,"TD",{align:!0});var Cee=l(gh);fS=o(Cee,"\u2705"),Cee.forEach(t),uS=s(vt),fh=a(vt,"TD",{align:!0});var Nee=l(fh);vS=o(Nee,"\u274C"),Nee.forEach(t),ES=s(vt),uh=a(vt,"TD",{align:!0});var Iee=l(uh);TS=o(Iee,"\u2705"),Iee.forEach(t),_S=s(vt),vh=a(vt,"TD",{align:!0});var xee=l(vh);mS=o(xee,"\u274C"),xee.forEach(t),pS=s(vt),Eh=a(vt,"TD",{align:!0});var Oee=l(Eh);DS=o(Oee,"\u274C"),Oee.forEach(t),vt.forEach(t),yS=s(f),A=a(f,"TR",{});var Et=l(A);Th=a(Et,"TD",{align:!0});var Fee=l(Th);AS=o(Fee,"BigBird"),Fee.forEach(t),RS=s(Et),_h=a(Et,"TD",{align:!0});var Wee=l(_h);LS=o(Wee,"\u2705"),Wee.forEach(t),bS=s(Et),mh=a(Et,"TD",{align:!0});var Yee=l(mh);SS=o(Yee,"\u2705"),Yee.forEach(t),wS=s(Et),ph=a(Et,"TD",{align:!0});var Jee=l(ph);MS=o(Jee,"\u2705"),Jee.forEach(t),PS=s(Et),Dh=a(Et,"TD",{align:!0});var Zee=l(Dh);GS=o(Zee,"\u274C"),Zee.forEach(t),kS=s(Et),yh=a(Et,"TD",{align:!0});var Vee=l(yh);BS=o(Vee,"\u2705"),Vee.forEach(t),Et.forEach(t),CS=s(f),R=a(f,"TR",{});var Tt=l(R);Ah=a(Tt,"TD",{align:!0});var Kee=l(Ah);NS=o(Kee,"BigBirdPegasus"),Kee.forEach(t),IS=s(Tt),Rh=a(Tt,"TD",{align:!0});var Xee=l(Rh);xS=o(Xee,"\u274C"),Xee.forEach(t),OS=s(Tt),Lh=a(Tt,"TD",{align:!0});var zee=l(Lh);FS=o(zee,"\u274C"),zee.forEach(t),WS=s(Tt),bh=a(Tt,"TD",{align:!0});var Hee=l(bh);YS=o(Hee,"\u2705"),Hee.forEach(t),JS=s(Tt),Sh=a(Tt,"TD",{align:!0});var Uee=l(Sh);ZS=o(Uee,"\u274C"),Uee.forEach(t),VS=s(Tt),wh=a(Tt,"TD",{align:!0});var qee=l(wh);KS=o(qee,"\u274C"),qee.forEach(t),Tt.forEach(t),XS=s(f),L=a(f,"TR",{});var _t=l(L);Mh=a(_t,"TD",{align:!0});var Qee=l(Mh);zS=o(Qee,"Blenderbot"),Qee.forEach(t),HS=s(_t),Ph=a(_t,"TD",{align:!0});var jee=l(Ph);US=o(jee,"\u2705"),jee.forEach(t),qS=s(_t),Gh=a(_t,"TD",{align:!0});var $ee=l(Gh);QS=o($ee,"\u2705"),$ee.forEach(t),jS=s(_t),kh=a(_t,"TD",{align:!0});var ete=l(kh);$S=o(ete,"\u2705"),ete.forEach(t),ew=s(_t),Bh=a(_t,"TD",{align:!0});var tte=l(Bh);tw=o(tte,"\u2705"),tte.forEach(t),rw=s(_t),Ch=a(_t,"TD",{align:!0});var rte=l(Ch);aw=o(rte,"\u2705"),rte.forEach(t),_t.forEach(t),lw=s(f),b=a(f,"TR",{});var mt=l(b);Nh=a(mt,"TD",{align:!0});var ate=l(Nh);nw=o(ate,"BlenderbotSmall"),ate.forEach(t),ow=s(mt),Ih=a(mt,"TD",{align:!0});var lte=l(Ih);iw=o(lte,"\u2705"),lte.forEach(t),dw=s(mt),xh=a(mt,"TD",{align:!0});var nte=l(xh);sw=o(nte,"\u2705"),nte.forEach(t),hw=s(mt),Oh=a(mt,"TD",{align:!0});var ote=l(Oh);cw=o(ote,"\u2705"),ote.forEach(t),gw=s(mt),Fh=a(mt,"TD",{align:!0});var ite=l(Fh);fw=o(ite,"\u2705"),ite.forEach(t),uw=s(mt),Wh=a(mt,"TD",{align:!0});var dte=l(Wh);vw=o(dte,"\u2705"),dte.forEach(t),mt.forEach(t),Ew=s(f),S=a(f,"TR",{});var pt=l(S);Yh=a(pt,"TD",{align:!0});var ste=l(Yh);Tw=o(ste,"CamemBERT"),ste.forEach(t),_w=s(pt),Jh=a(pt,"TD",{align:!0});var hte=l(Jh);mw=o(hte,"\u2705"),hte.forEach(t),pw=s(pt),Zh=a(pt,"TD",{align:!0});var cte=l(Zh);Dw=o(cte,"\u2705"),cte.forEach(t),yw=s(pt),Vh=a(pt,"TD",{align:!0});var gte=l(Vh);Aw=o(gte,"\u2705"),gte.forEach(t),Rw=s(pt),Kh=a(pt,"TD",{align:!0});var fte=l(Kh);Lw=o(fte,"\u2705"),fte.forEach(t),bw=s(pt),Xh=a(pt,"TD",{align:!0});var ute=l(Xh);Sw=o(ute,"\u274C"),ute.forEach(t),pt.forEach(t),ww=s(f),w=a(f,"TR",{});var Dt=l(w);zh=a(Dt,"TD",{align:!0});var vte=l(zh);Mw=o(vte,"Canine"),vte.forEach(t),Pw=s(Dt),Hh=a(Dt,"TD",{align:!0});var Ete=l(Hh);Gw=o(Ete,"\u2705"),Ete.forEach(t),kw=s(Dt),Uh=a(Dt,"TD",{align:!0});var Tte=l(Uh);Bw=o(Tte,"\u274C"),Tte.forEach(t),Cw=s(Dt),qh=a(Dt,"TD",{align:!0});var _te=l(qh);Nw=o(_te,"\u2705"),_te.forEach(t),Iw=s(Dt),Qh=a(Dt,"TD",{align:!0});var mte=l(Qh);xw=o(mte,"\u274C"),mte.forEach(t),Ow=s(Dt),jh=a(Dt,"TD",{align:!0});var pte=l(jh);Fw=o(pte,"\u274C"),pte.forEach(t),Dt.forEach(t),Ww=s(f),M=a(f,"TR",{});var yt=l(M);$h=a(yt,"TD",{align:!0});var Dte=l($h);Yw=o(Dte,"CLIP"),Dte.forEach(t),Jw=s(yt),ec=a(yt,"TD",{align:!0});var yte=l(ec);Zw=o(yte,"\u2705"),yte.forEach(t),Vw=s(yt),tc=a(yt,"TD",{align:!0});var Ate=l(tc);Kw=o(Ate,"\u2705"),Ate.forEach(t),Xw=s(yt),rc=a(yt,"TD",{align:!0});var Rte=l(rc);zw=o(Rte,"\u2705"),Rte.forEach(t),Hw=s(yt),ac=a(yt,"TD",{align:!0});var Lte=l(ac);Uw=o(Lte,"\u2705"),Lte.forEach(t),qw=s(yt),lc=a(yt,"TD",{align:!0});var bte=l(lc);Qw=o(bte,"\u2705"),bte.forEach(t),yt.forEach(t),jw=s(f),P=a(f,"TR",{});var At=l(P);nc=a(At,"TD",{align:!0});var Ste=l(nc);$w=o(Ste,"ConvBERT"),Ste.forEach(t),eM=s(At),oc=a(At,"TD",{align:!0});var wte=l(oc);tM=o(wte,"\u2705"),wte.forEach(t),rM=s(At),ic=a(At,"TD",{align:!0});var Mte=l(ic);aM=o(Mte,"\u2705"),Mte.forEach(t),lM=s(At),dc=a(At,"TD",{align:!0});var Pte=l(dc);nM=o(Pte,"\u2705"),Pte.forEach(t),oM=s(At),sc=a(At,"TD",{align:!0});var Gte=l(sc);iM=o(Gte,"\u2705"),Gte.forEach(t),dM=s(At),hc=a(At,"TD",{align:!0});var kte=l(hc);sM=o(kte,"\u274C"),kte.forEach(t),At.forEach(t),hM=s(f),G=a(f,"TR",{});var Rt=l(G);cc=a(Rt,"TD",{align:!0});var Bte=l(cc);cM=o(Bte,"ConvNext"),Bte.forEach(t),gM=s(Rt),gc=a(Rt,"TD",{align:!0});var Cte=l(gc);fM=o(Cte,"\u274C"),Cte.forEach(t),uM=s(Rt),fc=a(Rt,"TD",{align:!0});var Nte=l(fc);vM=o(Nte,"\u274C"),Nte.forEach(t),EM=s(Rt),uc=a(Rt,"TD",{align:!0});var Ite=l(uc);TM=o(Ite,"\u2705"),Ite.forEach(t),_M=s(Rt),vc=a(Rt,"TD",{align:!0});var xte=l(vc);mM=o(xte,"\u2705"),xte.forEach(t),pM=s(Rt),Ec=a(Rt,"TD",{align:!0});var Ote=l(Ec);DM=o(Ote,"\u274C"),Ote.forEach(t),Rt.forEach(t),yM=s(f),k=a(f,"TR",{});var Lt=l(k);Tc=a(Lt,"TD",{align:!0});var Fte=l(Tc);AM=o(Fte,"CTRL"),Fte.forEach(t),RM=s(Lt),_c=a(Lt,"TD",{align:!0});var Wte=l(_c);LM=o(Wte,"\u2705"),Wte.forEach(t),bM=s(Lt),mc=a(Lt,"TD",{align:!0});var Yte=l(mc);SM=o(Yte,"\u274C"),Yte.forEach(t),wM=s(Lt),pc=a(Lt,"TD",{align:!0});var Jte=l(pc);MM=o(Jte,"\u2705"),Jte.forEach(t),PM=s(Lt),Dc=a(Lt,"TD",{align:!0});var Zte=l(Dc);GM=o(Zte,"\u2705"),Zte.forEach(t),kM=s(Lt),yc=a(Lt,"TD",{align:!0});var Vte=l(yc);BM=o(Vte,"\u274C"),Vte.forEach(t),Lt.forEach(t),CM=s(f),B=a(f,"TR",{});var bt=l(B);Ac=a(bt,"TD",{align:!0});var Kte=l(Ac);NM=o(Kte,"Data2VecAudio"),Kte.forEach(t),IM=s(bt),Rc=a(bt,"TD",{align:!0});var Xte=l(Rc);xM=o(Xte,"\u274C"),Xte.forEach(t),OM=s(bt),Lc=a(bt,"TD",{align:!0});var zte=l(Lc);FM=o(zte,"\u274C"),zte.forEach(t),WM=s(bt),bc=a(bt,"TD",{align:!0});var Hte=l(bc);YM=o(Hte,"\u2705"),Hte.forEach(t),JM=s(bt),Sc=a(bt,"TD",{align:!0});var Ute=l(Sc);ZM=o(Ute,"\u274C"),Ute.forEach(t),VM=s(bt),wc=a(bt,"TD",{align:!0});var qte=l(wc);KM=o(qte,"\u274C"),qte.forEach(t),bt.forEach(t),XM=s(f),C=a(f,"TR",{});var St=l(C);Mc=a(St,"TD",{align:!0});var Qte=l(Mc);zM=o(Qte,"Data2VecText"),Qte.forEach(t),HM=s(St),Pc=a(St,"TD",{align:!0});var jte=l(Pc);UM=o(jte,"\u274C"),jte.forEach(t),qM=s(St),Gc=a(St,"TD",{align:!0});var $te=l(Gc);QM=o($te,"\u274C"),$te.forEach(t),jM=s(St),kc=a(St,"TD",{align:!0});var ere=l(kc);$M=o(ere,"\u2705"),ere.forEach(t),eP=s(St),Bc=a(St,"TD",{align:!0});var tre=l(Bc);tP=o(tre,"\u274C"),tre.forEach(t),rP=s(St),Cc=a(St,"TD",{align:!0});var rre=l(Cc);aP=o(rre,"\u274C"),rre.forEach(t),St.forEach(t),lP=s(f),N=a(f,"TR",{});var wt=l(N);Nc=a(wt,"TD",{align:!0});var are=l(Nc);nP=o(are,"DeBERTa"),are.forEach(t),oP=s(wt),Ic=a(wt,"TD",{align:!0});var lre=l(Ic);iP=o(lre,"\u2705"),lre.forEach(t),dP=s(wt),xc=a(wt,"TD",{align:!0});var nre=l(xc);sP=o(nre,"\u2705"),nre.forEach(t),hP=s(wt),Oc=a(wt,"TD",{align:!0});var ore=l(Oc);cP=o(ore,"\u2705"),ore.forEach(t),gP=s(wt),Fc=a(wt,"TD",{align:!0});var ire=l(Fc);fP=o(ire,"\u2705"),ire.forEach(t),uP=s(wt),Wc=a(wt,"TD",{align:!0});var dre=l(Wc);vP=o(dre,"\u274C"),dre.forEach(t),wt.forEach(t),EP=s(f),I=a(f,"TR",{});var Mt=l(I);Yc=a(Mt,"TD",{align:!0});var sre=l(Yc);TP=o(sre,"DeBERTa-v2"),sre.forEach(t),_P=s(Mt),Jc=a(Mt,"TD",{align:!0});var hre=l(Jc);mP=o(hre,"\u2705"),hre.forEach(t),pP=s(Mt),Zc=a(Mt,"TD",{align:!0});var cre=l(Zc);DP=o(cre,"\u274C"),cre.forEach(t),yP=s(Mt),Vc=a(Mt,"TD",{align:!0});var gre=l(Vc);AP=o(gre,"\u2705"),gre.forEach(t),RP=s(Mt),Kc=a(Mt,"TD",{align:!0});var fre=l(Kc);LP=o(fre,"\u2705"),fre.forEach(t),bP=s(Mt),Xc=a(Mt,"TD",{align:!0});var ure=l(Xc);SP=o(ure,"\u274C"),ure.forEach(t),Mt.forEach(t),wP=s(f),x=a(f,"TR",{});var Pt=l(x);zc=a(Pt,"TD",{align:!0});var vre=l(zc);MP=o(vre,"DeiT"),vre.forEach(t),PP=s(Pt),Hc=a(Pt,"TD",{align:!0});var Ere=l(Hc);GP=o(Ere,"\u274C"),Ere.forEach(t),kP=s(Pt),Uc=a(Pt,"TD",{align:!0});var Tre=l(Uc);BP=o(Tre,"\u274C"),Tre.forEach(t),CP=s(Pt),qc=a(Pt,"TD",{align:!0});var _re=l(qc);NP=o(_re,"\u2705"),_re.forEach(t),IP=s(Pt),Qc=a(Pt,"TD",{align:!0});var mre=l(Qc);xP=o(mre,"\u274C"),mre.forEach(t),OP=s(Pt),jc=a(Pt,"TD",{align:!0});var pre=l(jc);FP=o(pre,"\u274C"),pre.forEach(t),Pt.forEach(t),WP=s(f),O=a(f,"TR",{});var Gt=l(O);$c=a(Gt,"TD",{align:!0});var Dre=l($c);YP=o(Dre,"DETR"),Dre.forEach(t),JP=s(Gt),eg=a(Gt,"TD",{align:!0});var yre=l(eg);ZP=o(yre,"\u274C"),yre.forEach(t),VP=s(Gt),tg=a(Gt,"TD",{align:!0});var Are=l(tg);KP=o(Are,"\u274C"),Are.forEach(t),XP=s(Gt),rg=a(Gt,"TD",{align:!0});var Rre=l(rg);zP=o(Rre,"\u2705"),Rre.forEach(t),HP=s(Gt),ag=a(Gt,"TD",{align:!0});var Lre=l(ag);UP=o(Lre,"\u274C"),Lre.forEach(t),qP=s(Gt),lg=a(Gt,"TD",{align:!0});var bre=l(lg);QP=o(bre,"\u274C"),bre.forEach(t),Gt.forEach(t),jP=s(f),F=a(f,"TR",{});var kt=l(F);ng=a(kt,"TD",{align:!0});var Sre=l(ng);$P=o(Sre,"DistilBERT"),Sre.forEach(t),eG=s(kt),og=a(kt,"TD",{align:!0});var wre=l(og);tG=o(wre,"\u2705"),wre.forEach(t),rG=s(kt),ig=a(kt,"TD",{align:!0});var Mre=l(ig);aG=o(Mre,"\u2705"),Mre.forEach(t),lG=s(kt),dg=a(kt,"TD",{align:!0});var Pre=l(dg);nG=o(Pre,"\u2705"),Pre.forEach(t),oG=s(kt),sg=a(kt,"TD",{align:!0});var Gre=l(sg);iG=o(Gre,"\u2705"),Gre.forEach(t),dG=s(kt),hg=a(kt,"TD",{align:!0});var kre=l(hg);sG=o(kre,"\u2705"),kre.forEach(t),kt.forEach(t),hG=s(f),W=a(f,"TR",{});var Bt=l(W);cg=a(Bt,"TD",{align:!0});var Bre=l(cg);cG=o(Bre,"DPR"),Bre.forEach(t),gG=s(Bt),gg=a(Bt,"TD",{align:!0});var Cre=l(gg);fG=o(Cre,"\u2705"),Cre.forEach(t),uG=s(Bt),fg=a(Bt,"TD",{align:!0});var Nre=l(fg);vG=o(Nre,"\u2705"),Nre.forEach(t),EG=s(Bt),ug=a(Bt,"TD",{align:!0});var Ire=l(ug);TG=o(Ire,"\u2705"),Ire.forEach(t),_G=s(Bt),vg=a(Bt,"TD",{align:!0});var xre=l(vg);mG=o(xre,"\u2705"),xre.forEach(t),pG=s(Bt),Eg=a(Bt,"TD",{align:!0});var Ore=l(Eg);DG=o(Ore,"\u274C"),Ore.forEach(t),Bt.forEach(t),yG=s(f),Y=a(f,"TR",{});var Ct=l(Y);Tg=a(Ct,"TD",{align:!0});var Fre=l(Tg);AG=o(Fre,"ELECTRA"),Fre.forEach(t),RG=s(Ct),_g=a(Ct,"TD",{align:!0});var Wre=l(_g);LG=o(Wre,"\u2705"),Wre.forEach(t),bG=s(Ct),mg=a(Ct,"TD",{align:!0});var Yre=l(mg);SG=o(Yre,"\u2705"),Yre.forEach(t),wG=s(Ct),pg=a(Ct,"TD",{align:!0});var Jre=l(pg);MG=o(Jre,"\u2705"),Jre.forEach(t),PG=s(Ct),Dg=a(Ct,"TD",{align:!0});var Zre=l(Dg);GG=o(Zre,"\u2705"),Zre.forEach(t),kG=s(Ct),yg=a(Ct,"TD",{align:!0});var Vre=l(yg);BG=o(Vre,"\u2705"),Vre.forEach(t),Ct.forEach(t),CG=s(f),J=a(f,"TR",{});var Nt=l(J);Ag=a(Nt,"TD",{align:!0});var Kre=l(Ag);NG=o(Kre,"Encoder decoder"),Kre.forEach(t),IG=s(Nt),Rg=a(Nt,"TD",{align:!0});var Xre=l(Rg);xG=o(Xre,"\u274C"),Xre.forEach(t),OG=s(Nt),Lg=a(Nt,"TD",{align:!0});var zre=l(Lg);FG=o(zre,"\u274C"),zre.forEach(t),WG=s(Nt),bg=a(Nt,"TD",{align:!0});var Hre=l(bg);YG=o(Hre,"\u2705"),Hre.forEach(t),JG=s(Nt),Sg=a(Nt,"TD",{align:!0});var Ure=l(Sg);ZG=o(Ure,"\u2705"),Ure.forEach(t),VG=s(Nt),wg=a(Nt,"TD",{align:!0});var qre=l(wg);KG=o(qre,"\u2705"),qre.forEach(t),Nt.forEach(t),XG=s(f),Z=a(f,"TR",{});var It=l(Z);Mg=a(It,"TD",{align:!0});var Qre=l(Mg);zG=o(Qre,"FairSeq Machine-Translation"),Qre.forEach(t),HG=s(It),Pg=a(It,"TD",{align:!0});var jre=l(Pg);UG=o(jre,"\u2705"),jre.forEach(t),qG=s(It),Gg=a(It,"TD",{align:!0});var $re=l(Gg);QG=o($re,"\u274C"),$re.forEach(t),jG=s(It),kg=a(It,"TD",{align:!0});var eae=l(kg);$G=o(eae,"\u2705"),eae.forEach(t),ek=s(It),Bg=a(It,"TD",{align:!0});var tae=l(Bg);tk=o(tae,"\u274C"),tae.forEach(t),rk=s(It),Cg=a(It,"TD",{align:!0});var rae=l(Cg);ak=o(rae,"\u274C"),rae.forEach(t),It.forEach(t),lk=s(f),V=a(f,"TR",{});var xt=l(V);Ng=a(xt,"TD",{align:!0});var aae=l(Ng);nk=o(aae,"FlauBERT"),aae.forEach(t),ok=s(xt),Ig=a(xt,"TD",{align:!0});var lae=l(Ig);ik=o(lae,"\u2705"),lae.forEach(t),dk=s(xt),xg=a(xt,"TD",{align:!0});var nae=l(xg);sk=o(nae,"\u274C"),nae.forEach(t),hk=s(xt),Og=a(xt,"TD",{align:!0});var oae=l(Og);ck=o(oae,"\u2705"),oae.forEach(t),gk=s(xt),Fg=a(xt,"TD",{align:!0});var iae=l(Fg);fk=o(iae,"\u2705"),iae.forEach(t),uk=s(xt),Wg=a(xt,"TD",{align:!0});var dae=l(Wg);vk=o(dae,"\u274C"),dae.forEach(t),xt.forEach(t),Ek=s(f),K=a(f,"TR",{});var Ot=l(K);Yg=a(Ot,"TD",{align:!0});var sae=l(Yg);Tk=o(sae,"FNet"),sae.forEach(t),_k=s(Ot),Jg=a(Ot,"TD",{align:!0});var hae=l(Jg);mk=o(hae,"\u2705"),hae.forEach(t),pk=s(Ot),Zg=a(Ot,"TD",{align:!0});var cae=l(Zg);Dk=o(cae,"\u2705"),cae.forEach(t),yk=s(Ot),Vg=a(Ot,"TD",{align:!0});var gae=l(Vg);Ak=o(gae,"\u2705"),gae.forEach(t),Rk=s(Ot),Kg=a(Ot,"TD",{align:!0});var fae=l(Kg);Lk=o(fae,"\u274C"),fae.forEach(t),bk=s(Ot),Xg=a(Ot,"TD",{align:!0});var uae=l(Xg);Sk=o(uae,"\u274C"),uae.forEach(t),Ot.forEach(t),wk=s(f),X=a(f,"TR",{});var Ft=l(X);zg=a(Ft,"TD",{align:!0});var vae=l(zg);Mk=o(vae,"Funnel Transformer"),vae.forEach(t),Pk=s(Ft),Hg=a(Ft,"TD",{align:!0});var Eae=l(Hg);Gk=o(Eae,"\u2705"),Eae.forEach(t),kk=s(Ft),Ug=a(Ft,"TD",{align:!0});var Tae=l(Ug);Bk=o(Tae,"\u2705"),Tae.forEach(t),Ck=s(Ft),qg=a(Ft,"TD",{align:!0});var _ae=l(qg);Nk=o(_ae,"\u2705"),_ae.forEach(t),Ik=s(Ft),Qg=a(Ft,"TD",{align:!0});var mae=l(Qg);xk=o(mae,"\u2705"),mae.forEach(t),Ok=s(Ft),jg=a(Ft,"TD",{align:!0});var pae=l(jg);Fk=o(pae,"\u274C"),pae.forEach(t),Ft.forEach(t),Wk=s(f),z=a(f,"TR",{});var Wt=l(z);$g=a(Wt,"TD",{align:!0});var Dae=l($g);Yk=o(Dae,"GPT Neo"),Dae.forEach(t),Jk=s(Wt),ef=a(Wt,"TD",{align:!0});var yae=l(ef);Zk=o(yae,"\u274C"),yae.forEach(t),Vk=s(Wt),tf=a(Wt,"TD",{align:!0});var Aae=l(tf);Kk=o(Aae,"\u274C"),Aae.forEach(t),Xk=s(Wt),rf=a(Wt,"TD",{align:!0});var Rae=l(rf);zk=o(Rae,"\u2705"),Rae.forEach(t),Hk=s(Wt),af=a(Wt,"TD",{align:!0});var Lae=l(af);Uk=o(Lae,"\u274C"),Lae.forEach(t),qk=s(Wt),lf=a(Wt,"TD",{align:!0});var bae=l(lf);Qk=o(bae,"\u2705"),bae.forEach(t),Wt.forEach(t),jk=s(f),H=a(f,"TR",{});var Yt=l(H);nf=a(Yt,"TD",{align:!0});var Sae=l(nf);$k=o(Sae,"GPT-J"),Sae.forEach(t),eB=s(Yt),of=a(Yt,"TD",{align:!0});var wae=l(of);tB=o(wae,"\u274C"),wae.forEach(t),rB=s(Yt),df=a(Yt,"TD",{align:!0});var Mae=l(df);aB=o(Mae,"\u274C"),Mae.forEach(t),lB=s(Yt),sf=a(Yt,"TD",{align:!0});var Pae=l(sf);nB=o(Pae,"\u2705"),Pae.forEach(t),oB=s(Yt),hf=a(Yt,"TD",{align:!0});var Gae=l(hf);iB=o(Gae,"\u274C"),Gae.forEach(t),dB=s(Yt),cf=a(Yt,"TD",{align:!0});var kae=l(cf);sB=o(kae,"\u2705"),kae.forEach(t),Yt.forEach(t),hB=s(f),U=a(f,"TR",{});var Jt=l(U);gf=a(Jt,"TD",{align:!0});var Bae=l(gf);cB=o(Bae,"Hubert"),Bae.forEach(t),gB=s(Jt),ff=a(Jt,"TD",{align:!0});var Cae=l(ff);fB=o(Cae,"\u274C"),Cae.forEach(t),uB=s(Jt),uf=a(Jt,"TD",{align:!0});var Nae=l(uf);vB=o(Nae,"\u274C"),Nae.forEach(t),EB=s(Jt),vf=a(Jt,"TD",{align:!0});var Iae=l(vf);TB=o(Iae,"\u2705"),Iae.forEach(t),_B=s(Jt),Ef=a(Jt,"TD",{align:!0});var xae=l(Ef);mB=o(xae,"\u2705"),xae.forEach(t),pB=s(Jt),Tf=a(Jt,"TD",{align:!0});var Oae=l(Tf);DB=o(Oae,"\u274C"),Oae.forEach(t),Jt.forEach(t),yB=s(f),q=a(f,"TR",{});var Zt=l(q);_f=a(Zt,"TD",{align:!0});var Fae=l(_f);AB=o(Fae,"I-BERT"),Fae.forEach(t),RB=s(Zt),mf=a(Zt,"TD",{align:!0});var Wae=l(mf);LB=o(Wae,"\u274C"),Wae.forEach(t),bB=s(Zt),pf=a(Zt,"TD",{align:!0});var Yae=l(pf);SB=o(Yae,"\u274C"),Yae.forEach(t),wB=s(Zt),Df=a(Zt,"TD",{align:!0});var Jae=l(Df);MB=o(Jae,"\u2705"),Jae.forEach(t),PB=s(Zt),yf=a(Zt,"TD",{align:!0});var Zae=l(yf);GB=o(Zae,"\u274C"),Zae.forEach(t),kB=s(Zt),Af=a(Zt,"TD",{align:!0});var Vae=l(Af);BB=o(Vae,"\u274C"),Vae.forEach(t),Zt.forEach(t),CB=s(f),Q=a(f,"TR",{});var Vt=l(Q);Rf=a(Vt,"TD",{align:!0});var Kae=l(Rf);NB=o(Kae,"ImageGPT"),Kae.forEach(t),IB=s(Vt),Lf=a(Vt,"TD",{align:!0});var Xae=l(Lf);xB=o(Xae,"\u274C"),Xae.forEach(t),OB=s(Vt),bf=a(Vt,"TD",{align:!0});var zae=l(bf);FB=o(zae,"\u274C"),zae.forEach(t),WB=s(Vt),Sf=a(Vt,"TD",{align:!0});var Hae=l(Sf);YB=o(Hae,"\u2705"),Hae.forEach(t),JB=s(Vt),wf=a(Vt,"TD",{align:!0});var Uae=l(wf);ZB=o(Uae,"\u274C"),Uae.forEach(t),VB=s(Vt),Mf=a(Vt,"TD",{align:!0});var qae=l(Mf);KB=o(qae,"\u274C"),qae.forEach(t),Vt.forEach(t),XB=s(f),j=a(f,"TR",{});var Kt=l(j);Pf=a(Kt,"TD",{align:!0});var Qae=l(Pf);zB=o(Qae,"LayoutLM"),Qae.forEach(t),HB=s(Kt),Gf=a(Kt,"TD",{align:!0});var jae=l(Gf);UB=o(jae,"\u2705"),jae.forEach(t),qB=s(Kt),kf=a(Kt,"TD",{align:!0});var $ae=l(kf);QB=o($ae,"\u2705"),$ae.forEach(t),jB=s(Kt),Bf=a(Kt,"TD",{align:!0});var ele=l(Bf);$B=o(ele,"\u2705"),ele.forEach(t),eC=s(Kt),Cf=a(Kt,"TD",{align:!0});var tle=l(Cf);tC=o(tle,"\u2705"),tle.forEach(t),rC=s(Kt),Nf=a(Kt,"TD",{align:!0});var rle=l(Nf);aC=o(rle,"\u274C"),rle.forEach(t),Kt.forEach(t),lC=s(f),$=a(f,"TR",{});var Xt=l($);If=a(Xt,"TD",{align:!0});var ale=l(If);nC=o(ale,"LayoutLMv2"),ale.forEach(t),oC=s(Xt),xf=a(Xt,"TD",{align:!0});var lle=l(xf);iC=o(lle,"\u2705"),lle.forEach(t),dC=s(Xt),Of=a(Xt,"TD",{align:!0});var nle=l(Of);sC=o(nle,"\u2705"),nle.forEach(t),hC=s(Xt),Ff=a(Xt,"TD",{align:!0});var ole=l(Ff);cC=o(ole,"\u2705"),ole.forEach(t),gC=s(Xt),Wf=a(Xt,"TD",{align:!0});var ile=l(Wf);fC=o(ile,"\u274C"),ile.forEach(t),uC=s(Xt),Yf=a(Xt,"TD",{align:!0});var dle=l(Yf);vC=o(dle,"\u274C"),dle.forEach(t),Xt.forEach(t),EC=s(f),ee=a(f,"TR",{});var zt=l(ee);Jf=a(zt,"TD",{align:!0});var sle=l(Jf);TC=o(sle,"LED"),sle.forEach(t),_C=s(zt),Zf=a(zt,"TD",{align:!0});var hle=l(Zf);mC=o(hle,"\u2705"),hle.forEach(t),pC=s(zt),Vf=a(zt,"TD",{align:!0});var cle=l(Vf);DC=o(cle,"\u2705"),cle.forEach(t),yC=s(zt),Kf=a(zt,"TD",{align:!0});var gle=l(Kf);AC=o(gle,"\u2705"),gle.forEach(t),RC=s(zt),Xf=a(zt,"TD",{align:!0});var fle=l(Xf);LC=o(fle,"\u2705"),fle.forEach(t),bC=s(zt),zf=a(zt,"TD",{align:!0});var ule=l(zf);SC=o(ule,"\u274C"),ule.forEach(t),zt.forEach(t),wC=s(f),te=a(f,"TR",{});var Ht=l(te);Hf=a(Ht,"TD",{align:!0});var vle=l(Hf);MC=o(vle,"Longformer"),vle.forEach(t),PC=s(Ht),Uf=a(Ht,"TD",{align:!0});var Ele=l(Uf);GC=o(Ele,"\u2705"),Ele.forEach(t),kC=s(Ht),qf=a(Ht,"TD",{align:!0});var Tle=l(qf);BC=o(Tle,"\u2705"),Tle.forEach(t),CC=s(Ht),Qf=a(Ht,"TD",{align:!0});var _le=l(Qf);NC=o(_le,"\u2705"),_le.forEach(t),IC=s(Ht),jf=a(Ht,"TD",{align:!0});var mle=l(jf);xC=o(mle,"\u2705"),mle.forEach(t),OC=s(Ht),$f=a(Ht,"TD",{align:!0});var ple=l($f);FC=o(ple,"\u274C"),ple.forEach(t),Ht.forEach(t),WC=s(f),re=a(f,"TR",{});var Ut=l(re);e1=a(Ut,"TD",{align:!0});var Dle=l(e1);YC=o(Dle,"LUKE"),Dle.forEach(t),JC=s(Ut),t1=a(Ut,"TD",{align:!0});var yle=l(t1);ZC=o(yle,"\u2705"),yle.forEach(t),VC=s(Ut),r1=a(Ut,"TD",{align:!0});var Ale=l(r1);KC=o(Ale,"\u274C"),Ale.forEach(t),XC=s(Ut),a1=a(Ut,"TD",{align:!0});var Rle=l(a1);zC=o(Rle,"\u2705"),Rle.forEach(t),HC=s(Ut),l1=a(Ut,"TD",{align:!0});var Lle=l(l1);UC=o(Lle,"\u274C"),Lle.forEach(t),qC=s(Ut),n1=a(Ut,"TD",{align:!0});var ble=l(n1);QC=o(ble,"\u274C"),ble.forEach(t),Ut.forEach(t),jC=s(f),ae=a(f,"TR",{});var qt=l(ae);o1=a(qt,"TD",{align:!0});var Sle=l(o1);$C=o(Sle,"LXMERT"),Sle.forEach(t),eN=s(qt),i1=a(qt,"TD",{align:!0});var wle=l(i1);tN=o(wle,"\u2705"),wle.forEach(t),rN=s(qt),d1=a(qt,"TD",{align:!0});var Mle=l(d1);aN=o(Mle,"\u2705"),Mle.forEach(t),lN=s(qt),s1=a(qt,"TD",{align:!0});var Ple=l(s1);nN=o(Ple,"\u2705"),Ple.forEach(t),oN=s(qt),h1=a(qt,"TD",{align:!0});var Gle=l(h1);iN=o(Gle,"\u2705"),Gle.forEach(t),dN=s(qt),c1=a(qt,"TD",{align:!0});var kle=l(c1);sN=o(kle,"\u274C"),kle.forEach(t),qt.forEach(t),hN=s(f),le=a(f,"TR",{});var Qt=l(le);g1=a(Qt,"TD",{align:!0});var Ble=l(g1);cN=o(Ble,"M2M100"),Ble.forEach(t),gN=s(Qt),f1=a(Qt,"TD",{align:!0});var Cle=l(f1);fN=o(Cle,"\u2705"),Cle.forEach(t),uN=s(Qt),u1=a(Qt,"TD",{align:!0});var Nle=l(u1);vN=o(Nle,"\u274C"),Nle.forEach(t),EN=s(Qt),v1=a(Qt,"TD",{align:!0});var Ile=l(v1);TN=o(Ile,"\u2705"),Ile.forEach(t),_N=s(Qt),E1=a(Qt,"TD",{align:!0});var xle=l(E1);mN=o(xle,"\u274C"),xle.forEach(t),pN=s(Qt),T1=a(Qt,"TD",{align:!0});var Ole=l(T1);DN=o(Ole,"\u274C"),Ole.forEach(t),Qt.forEach(t),yN=s(f),ne=a(f,"TR",{});var jt=l(ne);_1=a(jt,"TD",{align:!0});var Fle=l(_1);AN=o(Fle,"Marian"),Fle.forEach(t),RN=s(jt),m1=a(jt,"TD",{align:!0});var Wle=l(m1);LN=o(Wle,"\u2705"),Wle.forEach(t),bN=s(jt),p1=a(jt,"TD",{align:!0});var Yle=l(p1);SN=o(Yle,"\u274C"),Yle.forEach(t),wN=s(jt),D1=a(jt,"TD",{align:!0});var Jle=l(D1);MN=o(Jle,"\u2705"),Jle.forEach(t),PN=s(jt),y1=a(jt,"TD",{align:!0});var Zle=l(y1);GN=o(Zle,"\u2705"),Zle.forEach(t),kN=s(jt),A1=a(jt,"TD",{align:!0});var Vle=l(A1);BN=o(Vle,"\u2705"),Vle.forEach(t),jt.forEach(t),CN=s(f),oe=a(f,"TR",{});var $t=l(oe);R1=a($t,"TD",{align:!0});var Kle=l(R1);NN=o(Kle,"MaskFormer"),Kle.forEach(t),IN=s($t),L1=a($t,"TD",{align:!0});var Xle=l(L1);xN=o(Xle,"\u274C"),Xle.forEach(t),ON=s($t),b1=a($t,"TD",{align:!0});var zle=l(b1);FN=o(zle,"\u274C"),zle.forEach(t),WN=s($t),S1=a($t,"TD",{align:!0});var Hle=l(S1);YN=o(Hle,"\u2705"),Hle.forEach(t),JN=s($t),w1=a($t,"TD",{align:!0});var Ule=l(w1);ZN=o(Ule,"\u274C"),Ule.forEach(t),VN=s($t),M1=a($t,"TD",{align:!0});var qle=l(M1);KN=o(qle,"\u274C"),qle.forEach(t),$t.forEach(t),XN=s(f),ie=a(f,"TR",{});var er=l(ie);P1=a(er,"TD",{align:!0});var Qle=l(P1);zN=o(Qle,"mBART"),Qle.forEach(t),HN=s(er),G1=a(er,"TD",{align:!0});var jle=l(G1);UN=o(jle,"\u2705"),jle.forEach(t),qN=s(er),k1=a(er,"TD",{align:!0});var $le=l(k1);QN=o($le,"\u2705"),$le.forEach(t),jN=s(er),B1=a(er,"TD",{align:!0});var ene=l(B1);$N=o(ene,"\u2705"),ene.forEach(t),eI=s(er),C1=a(er,"TD",{align:!0});var tne=l(C1);tI=o(tne,"\u2705"),tne.forEach(t),rI=s(er),N1=a(er,"TD",{align:!0});var rne=l(N1);aI=o(rne,"\u2705"),rne.forEach(t),er.forEach(t),lI=s(f),de=a(f,"TR",{});var tr=l(de);I1=a(tr,"TD",{align:!0});var ane=l(I1);nI=o(ane,"MegatronBert"),ane.forEach(t),oI=s(tr),x1=a(tr,"TD",{align:!0});var lne=l(x1);iI=o(lne,"\u274C"),lne.forEach(t),dI=s(tr),O1=a(tr,"TD",{align:!0});var nne=l(O1);sI=o(nne,"\u274C"),nne.forEach(t),hI=s(tr),F1=a(tr,"TD",{align:!0});var one=l(F1);cI=o(one,"\u2705"),one.forEach(t),gI=s(tr),W1=a(tr,"TD",{align:!0});var ine=l(W1);fI=o(ine,"\u274C"),ine.forEach(t),uI=s(tr),Y1=a(tr,"TD",{align:!0});var dne=l(Y1);vI=o(dne,"\u274C"),dne.forEach(t),tr.forEach(t),EI=s(f),se=a(f,"TR",{});var rr=l(se);J1=a(rr,"TD",{align:!0});var sne=l(J1);TI=o(sne,"MobileBERT"),sne.forEach(t),_I=s(rr),Z1=a(rr,"TD",{align:!0});var hne=l(Z1);mI=o(hne,"\u2705"),hne.forEach(t),pI=s(rr),V1=a(rr,"TD",{align:!0});var cne=l(V1);DI=o(cne,"\u2705"),cne.forEach(t),yI=s(rr),K1=a(rr,"TD",{align:!0});var gne=l(K1);AI=o(gne,"\u2705"),gne.forEach(t),RI=s(rr),X1=a(rr,"TD",{align:!0});var fne=l(X1);LI=o(fne,"\u2705"),fne.forEach(t),bI=s(rr),z1=a(rr,"TD",{align:!0});var une=l(z1);SI=o(une,"\u274C"),une.forEach(t),rr.forEach(t),wI=s(f),he=a(f,"TR",{});var ar=l(he);H1=a(ar,"TD",{align:!0});var vne=l(H1);MI=o(vne,"MPNet"),vne.forEach(t),PI=s(ar),U1=a(ar,"TD",{align:!0});var Ene=l(U1);GI=o(Ene,"\u2705"),Ene.forEach(t),kI=s(ar),q1=a(ar,"TD",{align:!0});var Tne=l(q1);BI=o(Tne,"\u2705"),Tne.forEach(t),CI=s(ar),Q1=a(ar,"TD",{align:!0});var _ne=l(Q1);NI=o(_ne,"\u2705"),_ne.forEach(t),II=s(ar),j1=a(ar,"TD",{align:!0});var mne=l(j1);xI=o(mne,"\u2705"),mne.forEach(t),OI=s(ar),$1=a(ar,"TD",{align:!0});var pne=l($1);FI=o(pne,"\u274C"),pne.forEach(t),ar.forEach(t),WI=s(f),ce=a(f,"TR",{});var lr=l(ce);eu=a(lr,"TD",{align:!0});var Dne=l(eu);YI=o(Dne,"mT5"),Dne.forEach(t),JI=s(lr),tu=a(lr,"TD",{align:!0});var yne=l(tu);ZI=o(yne,"\u2705"),yne.forEach(t),VI=s(lr),ru=a(lr,"TD",{align:!0});var Ane=l(ru);KI=o(Ane,"\u2705"),Ane.forEach(t),XI=s(lr),au=a(lr,"TD",{align:!0});var Rne=l(au);zI=o(Rne,"\u2705"),Rne.forEach(t),HI=s(lr),lu=a(lr,"TD",{align:!0});var Lne=l(lu);UI=o(Lne,"\u2705"),Lne.forEach(t),qI=s(lr),nu=a(lr,"TD",{align:!0});var bne=l(nu);QI=o(bne,"\u2705"),bne.forEach(t),lr.forEach(t),jI=s(f),ge=a(f,"TR",{});var nr=l(ge);ou=a(nr,"TD",{align:!0});var Sne=l(ou);$I=o(Sne,"Nystromformer"),Sne.forEach(t),ex=s(nr),iu=a(nr,"TD",{align:!0});var wne=l(iu);tx=o(wne,"\u274C"),wne.forEach(t),rx=s(nr),du=a(nr,"TD",{align:!0});var Mne=l(du);ax=o(Mne,"\u274C"),Mne.forEach(t),lx=s(nr),su=a(nr,"TD",{align:!0});var Pne=l(su);nx=o(Pne,"\u2705"),Pne.forEach(t),ox=s(nr),hu=a(nr,"TD",{align:!0});var Gne=l(hu);ix=o(Gne,"\u274C"),Gne.forEach(t),dx=s(nr),cu=a(nr,"TD",{align:!0});var kne=l(cu);sx=o(kne,"\u274C"),kne.forEach(t),nr.forEach(t),hx=s(f),fe=a(f,"TR",{});var or=l(fe);gu=a(or,"TD",{align:!0});var Bne=l(gu);cx=o(Bne,"OpenAI GPT"),Bne.forEach(t),gx=s(or),fu=a(or,"TD",{align:!0});var Cne=l(fu);fx=o(Cne,"\u2705"),Cne.forEach(t),ux=s(or),uu=a(or,"TD",{align:!0});var Nne=l(uu);vx=o(Nne,"\u2705"),Nne.forEach(t),Ex=s(or),vu=a(or,"TD",{align:!0});var Ine=l(vu);Tx=o(Ine,"\u2705"),Ine.forEach(t),_x=s(or),Eu=a(or,"TD",{align:!0});var xne=l(Eu);mx=o(xne,"\u2705"),xne.forEach(t),px=s(or),Tu=a(or,"TD",{align:!0});var One=l(Tu);Dx=o(One,"\u274C"),One.forEach(t),or.forEach(t),yx=s(f),ue=a(f,"TR",{});var ir=l(ue);_u=a(ir,"TD",{align:!0});var Fne=l(_u);Ax=o(Fne,"OpenAI GPT-2"),Fne.forEach(t),Rx=s(ir),mu=a(ir,"TD",{align:!0});var Wne=l(mu);Lx=o(Wne,"\u2705"),Wne.forEach(t),bx=s(ir),pu=a(ir,"TD",{align:!0});var Yne=l(pu);Sx=o(Yne,"\u2705"),Yne.forEach(t),wx=s(ir),Du=a(ir,"TD",{align:!0});var Jne=l(Du);Mx=o(Jne,"\u2705"),Jne.forEach(t),Px=s(ir),yu=a(ir,"TD",{align:!0});var Zne=l(yu);Gx=o(Zne,"\u2705"),Zne.forEach(t),kx=s(ir),Au=a(ir,"TD",{align:!0});var Vne=l(Au);Bx=o(Vne,"\u2705"),Vne.forEach(t),ir.forEach(t),Cx=s(f),ve=a(f,"TR",{});var dr=l(ve);Ru=a(dr,"TD",{align:!0});var Kne=l(Ru);Nx=o(Kne,"Pegasus"),Kne.forEach(t),Ix=s(dr),Lu=a(dr,"TD",{align:!0});var Xne=l(Lu);xx=o(Xne,"\u2705"),Xne.forEach(t),Ox=s(dr),bu=a(dr,"TD",{align:!0});var zne=l(bu);Fx=o(zne,"\u2705"),zne.forEach(t),Wx=s(dr),Su=a(dr,"TD",{align:!0});var Hne=l(Su);Yx=o(Hne,"\u2705"),Hne.forEach(t),Jx=s(dr),wu=a(dr,"TD",{align:!0});var Une=l(wu);Zx=o(Une,"\u2705"),Une.forEach(t),Vx=s(dr),Mu=a(dr,"TD",{align:!0});var qne=l(Mu);Kx=o(qne,"\u2705"),qne.forEach(t),dr.forEach(t),Xx=s(f),Ee=a(f,"TR",{});var sr=l(Ee);Pu=a(sr,"TD",{align:!0});var Qne=l(Pu);zx=o(Qne,"Perceiver"),Qne.forEach(t),Hx=s(sr),Gu=a(sr,"TD",{align:!0});var jne=l(Gu);Ux=o(jne,"\u2705"),jne.forEach(t),qx=s(sr),ku=a(sr,"TD",{align:!0});var $ne=l(ku);Qx=o($ne,"\u274C"),$ne.forEach(t),jx=s(sr),Bu=a(sr,"TD",{align:!0});var eoe=l(Bu);$x=o(eoe,"\u2705"),eoe.forEach(t),eO=s(sr),Cu=a(sr,"TD",{align:!0});var toe=l(Cu);tO=o(toe,"\u274C"),toe.forEach(t),rO=s(sr),Nu=a(sr,"TD",{align:!0});var roe=l(Nu);aO=o(roe,"\u274C"),roe.forEach(t),sr.forEach(t),lO=s(f),Te=a(f,"TR",{});var hr=l(Te);Iu=a(hr,"TD",{align:!0});var aoe=l(Iu);nO=o(aoe,"PLBart"),aoe.forEach(t),oO=s(hr),xu=a(hr,"TD",{align:!0});var loe=l(xu);iO=o(loe,"\u2705"),loe.forEach(t),dO=s(hr),Ou=a(hr,"TD",{align:!0});var noe=l(Ou);sO=o(noe,"\u274C"),noe.forEach(t),hO=s(hr),Fu=a(hr,"TD",{align:!0});var ooe=l(Fu);cO=o(ooe,"\u2705"),ooe.forEach(t),gO=s(hr),Wu=a(hr,"TD",{align:!0});var ioe=l(Wu);fO=o(ioe,"\u274C"),ioe.forEach(t),uO=s(hr),Yu=a(hr,"TD",{align:!0});var doe=l(Yu);vO=o(doe,"\u274C"),doe.forEach(t),hr.forEach(t),EO=s(f),_e=a(f,"TR",{});var cr=l(_e);Ju=a(cr,"TD",{align:!0});var soe=l(Ju);TO=o(soe,"PoolFormer"),soe.forEach(t),_O=s(cr),Zu=a(cr,"TD",{align:!0});var hoe=l(Zu);mO=o(hoe,"\u274C"),hoe.forEach(t),pO=s(cr),Vu=a(cr,"TD",{align:!0});var coe=l(Vu);DO=o(coe,"\u274C"),coe.forEach(t),yO=s(cr),Ku=a(cr,"TD",{align:!0});var goe=l(Ku);AO=o(goe,"\u2705"),goe.forEach(t),RO=s(cr),Xu=a(cr,"TD",{align:!0});var foe=l(Xu);LO=o(foe,"\u274C"),foe.forEach(t),bO=s(cr),zu=a(cr,"TD",{align:!0});var uoe=l(zu);SO=o(uoe,"\u274C"),uoe.forEach(t),cr.forEach(t),wO=s(f),me=a(f,"TR",{});var gr=l(me);Hu=a(gr,"TD",{align:!0});var voe=l(Hu);MO=o(voe,"ProphetNet"),voe.forEach(t),PO=s(gr),Uu=a(gr,"TD",{align:!0});var Eoe=l(Uu);GO=o(Eoe,"\u2705"),Eoe.forEach(t),kO=s(gr),qu=a(gr,"TD",{align:!0});var Toe=l(qu);BO=o(Toe,"\u274C"),Toe.forEach(t),CO=s(gr),Qu=a(gr,"TD",{align:!0});var _oe=l(Qu);NO=o(_oe,"\u2705"),_oe.forEach(t),IO=s(gr),ju=a(gr,"TD",{align:!0});var moe=l(ju);xO=o(moe,"\u274C"),moe.forEach(t),OO=s(gr),$u=a(gr,"TD",{align:!0});var poe=l($u);FO=o(poe,"\u274C"),poe.forEach(t),gr.forEach(t),WO=s(f),pe=a(f,"TR",{});var fr=l(pe);ev=a(fr,"TD",{align:!0});var Doe=l(ev);YO=o(Doe,"QDQBert"),Doe.forEach(t),JO=s(fr),tv=a(fr,"TD",{align:!0});var yoe=l(tv);ZO=o(yoe,"\u274C"),yoe.forEach(t),VO=s(fr),rv=a(fr,"TD",{align:!0});var Aoe=l(rv);KO=o(Aoe,"\u274C"),Aoe.forEach(t),XO=s(fr),av=a(fr,"TD",{align:!0});var Roe=l(av);zO=o(Roe,"\u2705"),Roe.forEach(t),HO=s(fr),lv=a(fr,"TD",{align:!0});var Loe=l(lv);UO=o(Loe,"\u274C"),Loe.forEach(t),qO=s(fr),nv=a(fr,"TD",{align:!0});var boe=l(nv);QO=o(boe,"\u274C"),boe.forEach(t),fr.forEach(t),jO=s(f),De=a(f,"TR",{});var ur=l(De);ov=a(ur,"TD",{align:!0});var Soe=l(ov);$O=o(Soe,"RAG"),Soe.forEach(t),eF=s(ur),iv=a(ur,"TD",{align:!0});var woe=l(iv);tF=o(woe,"\u2705"),woe.forEach(t),rF=s(ur),dv=a(ur,"TD",{align:!0});var Moe=l(dv);aF=o(Moe,"\u274C"),Moe.forEach(t),lF=s(ur),sv=a(ur,"TD",{align:!0});var Poe=l(sv);nF=o(Poe,"\u2705"),Poe.forEach(t),oF=s(ur),hv=a(ur,"TD",{align:!0});var Goe=l(hv);iF=o(Goe,"\u2705"),Goe.forEach(t),dF=s(ur),cv=a(ur,"TD",{align:!0});var koe=l(cv);sF=o(koe,"\u274C"),koe.forEach(t),ur.forEach(t),hF=s(f),ye=a(f,"TR",{});var vr=l(ye);gv=a(vr,"TD",{align:!0});var Boe=l(gv);cF=o(Boe,"Realm"),Boe.forEach(t),gF=s(vr),fv=a(vr,"TD",{align:!0});var Coe=l(fv);fF=o(Coe,"\u2705"),Coe.forEach(t),uF=s(vr),uv=a(vr,"TD",{align:!0});var Noe=l(uv);vF=o(Noe,"\u2705"),Noe.forEach(t),EF=s(vr),vv=a(vr,"TD",{align:!0});var Ioe=l(vv);TF=o(Ioe,"\u2705"),Ioe.forEach(t),_F=s(vr),Ev=a(vr,"TD",{align:!0});var xoe=l(Ev);mF=o(xoe,"\u274C"),xoe.forEach(t),pF=s(vr),Tv=a(vr,"TD",{align:!0});var Ooe=l(Tv);DF=o(Ooe,"\u274C"),Ooe.forEach(t),vr.forEach(t),yF=s(f),Ae=a(f,"TR",{});var Er=l(Ae);_v=a(Er,"TD",{align:!0});var Foe=l(_v);AF=o(Foe,"Reformer"),Foe.forEach(t),RF=s(Er),mv=a(Er,"TD",{align:!0});var Woe=l(mv);LF=o(Woe,"\u2705"),Woe.forEach(t),bF=s(Er),pv=a(Er,"TD",{align:!0});var Yoe=l(pv);SF=o(Yoe,"\u2705"),Yoe.forEach(t),wF=s(Er),Dv=a(Er,"TD",{align:!0});var Joe=l(Dv);MF=o(Joe,"\u2705"),Joe.forEach(t),PF=s(Er),yv=a(Er,"TD",{align:!0});var Zoe=l(yv);GF=o(Zoe,"\u274C"),Zoe.forEach(t),kF=s(Er),Av=a(Er,"TD",{align:!0});var Voe=l(Av);BF=o(Voe,"\u274C"),Voe.forEach(t),Er.forEach(t),CF=s(f),Re=a(f,"TR",{});var Tr=l(Re);Rv=a(Tr,"TD",{align:!0});var Koe=l(Rv);NF=o(Koe,"RemBERT"),Koe.forEach(t),IF=s(Tr),Lv=a(Tr,"TD",{align:!0});var Xoe=l(Lv);xF=o(Xoe,"\u2705"),Xoe.forEach(t),OF=s(Tr),bv=a(Tr,"TD",{align:!0});var zoe=l(bv);FF=o(zoe,"\u2705"),zoe.forEach(t),WF=s(Tr),Sv=a(Tr,"TD",{align:!0});var Hoe=l(Sv);YF=o(Hoe,"\u2705"),Hoe.forEach(t),JF=s(Tr),wv=a(Tr,"TD",{align:!0});var Uoe=l(wv);ZF=o(Uoe,"\u2705"),Uoe.forEach(t),VF=s(Tr),Mv=a(Tr,"TD",{align:!0});var qoe=l(Mv);KF=o(qoe,"\u274C"),qoe.forEach(t),Tr.forEach(t),XF=s(f),Le=a(f,"TR",{});var _r=l(Le);Pv=a(_r,"TD",{align:!0});var Qoe=l(Pv);zF=o(Qoe,"RetriBERT"),Qoe.forEach(t),HF=s(_r),Gv=a(_r,"TD",{align:!0});var joe=l(Gv);UF=o(joe,"\u2705"),joe.forEach(t),qF=s(_r),kv=a(_r,"TD",{align:!0});var $oe=l(kv);QF=o($oe,"\u2705"),$oe.forEach(t),jF=s(_r),Bv=a(_r,"TD",{align:!0});var eie=l(Bv);$F=o(eie,"\u2705"),eie.forEach(t),eW=s(_r),Cv=a(_r,"TD",{align:!0});var tie=l(Cv);tW=o(tie,"\u274C"),tie.forEach(t),rW=s(_r),Nv=a(_r,"TD",{align:!0});var rie=l(Nv);aW=o(rie,"\u274C"),rie.forEach(t),_r.forEach(t),lW=s(f),be=a(f,"TR",{});var mr=l(be);Iv=a(mr,"TD",{align:!0});var aie=l(Iv);nW=o(aie,"RoBERTa"),aie.forEach(t),oW=s(mr),xv=a(mr,"TD",{align:!0});var lie=l(xv);iW=o(lie,"\u2705"),lie.forEach(t),dW=s(mr),Ov=a(mr,"TD",{align:!0});var nie=l(Ov);sW=o(nie,"\u2705"),nie.forEach(t),hW=s(mr),Fv=a(mr,"TD",{align:!0});var oie=l(Fv);cW=o(oie,"\u2705"),oie.forEach(t),gW=s(mr),Wv=a(mr,"TD",{align:!0});var iie=l(Wv);fW=o(iie,"\u2705"),iie.forEach(t),uW=s(mr),Yv=a(mr,"TD",{align:!0});var die=l(Yv);vW=o(die,"\u2705"),die.forEach(t),mr.forEach(t),EW=s(f),Se=a(f,"TR",{});var pr=l(Se);Jv=a(pr,"TD",{align:!0});var sie=l(Jv);TW=o(sie,"RoFormer"),sie.forEach(t),_W=s(pr),Zv=a(pr,"TD",{align:!0});var hie=l(Zv);mW=o(hie,"\u2705"),hie.forEach(t),pW=s(pr),Vv=a(pr,"TD",{align:!0});var cie=l(Vv);DW=o(cie,"\u2705"),cie.forEach(t),yW=s(pr),Kv=a(pr,"TD",{align:!0});var gie=l(Kv);AW=o(gie,"\u2705"),gie.forEach(t),RW=s(pr),Xv=a(pr,"TD",{align:!0});var fie=l(Xv);LW=o(fie,"\u2705"),fie.forEach(t),bW=s(pr),zv=a(pr,"TD",{align:!0});var uie=l(zv);SW=o(uie,"\u2705"),uie.forEach(t),pr.forEach(t),wW=s(f),we=a(f,"TR",{});var Dr=l(we);Hv=a(Dr,"TD",{align:!0});var vie=l(Hv);MW=o(vie,"SegFormer"),vie.forEach(t),PW=s(Dr),Uv=a(Dr,"TD",{align:!0});var Eie=l(Uv);GW=o(Eie,"\u274C"),Eie.forEach(t),kW=s(Dr),qv=a(Dr,"TD",{align:!0});var Tie=l(qv);BW=o(Tie,"\u274C"),Tie.forEach(t),CW=s(Dr),Qv=a(Dr,"TD",{align:!0});var _ie=l(Qv);NW=o(_ie,"\u2705"),_ie.forEach(t),IW=s(Dr),jv=a(Dr,"TD",{align:!0});var mie=l(jv);xW=o(mie,"\u274C"),mie.forEach(t),OW=s(Dr),$v=a(Dr,"TD",{align:!0});var pie=l($v);FW=o(pie,"\u274C"),pie.forEach(t),Dr.forEach(t),WW=s(f),Me=a(f,"TR",{});var yr=l(Me);eE=a(yr,"TD",{align:!0});var Die=l(eE);YW=o(Die,"SEW"),Die.forEach(t),JW=s(yr),tE=a(yr,"TD",{align:!0});var yie=l(tE);ZW=o(yie,"\u274C"),yie.forEach(t),VW=s(yr),rE=a(yr,"TD",{align:!0});var Aie=l(rE);KW=o(Aie,"\u274C"),Aie.forEach(t),XW=s(yr),aE=a(yr,"TD",{align:!0});var Rie=l(aE);zW=o(Rie,"\u2705"),Rie.forEach(t),HW=s(yr),lE=a(yr,"TD",{align:!0});var Lie=l(lE);UW=o(Lie,"\u274C"),Lie.forEach(t),qW=s(yr),nE=a(yr,"TD",{align:!0});var bie=l(nE);QW=o(bie,"\u274C"),bie.forEach(t),yr.forEach(t),jW=s(f),Pe=a(f,"TR",{});var Ar=l(Pe);oE=a(Ar,"TD",{align:!0});var Sie=l(oE);$W=o(Sie,"SEW-D"),Sie.forEach(t),eY=s(Ar),iE=a(Ar,"TD",{align:!0});var wie=l(iE);tY=o(wie,"\u274C"),wie.forEach(t),rY=s(Ar),dE=a(Ar,"TD",{align:!0});var Mie=l(dE);aY=o(Mie,"\u274C"),Mie.forEach(t),lY=s(Ar),sE=a(Ar,"TD",{align:!0});var Pie=l(sE);nY=o(Pie,"\u2705"),Pie.forEach(t),oY=s(Ar),hE=a(Ar,"TD",{align:!0});var Gie=l(hE);iY=o(Gie,"\u274C"),Gie.forEach(t),dY=s(Ar),cE=a(Ar,"TD",{align:!0});var kie=l(cE);sY=o(kie,"\u274C"),kie.forEach(t),Ar.forEach(t),hY=s(f),Ge=a(f,"TR",{});var Rr=l(Ge);gE=a(Rr,"TD",{align:!0});var Bie=l(gE);cY=o(Bie,"Speech Encoder decoder"),Bie.forEach(t),gY=s(Rr),fE=a(Rr,"TD",{align:!0});var Cie=l(fE);fY=o(Cie,"\u274C"),Cie.forEach(t),uY=s(Rr),uE=a(Rr,"TD",{align:!0});var Nie=l(uE);vY=o(Nie,"\u274C"),Nie.forEach(t),EY=s(Rr),vE=a(Rr,"TD",{align:!0});var Iie=l(vE);TY=o(Iie,"\u2705"),Iie.forEach(t),_Y=s(Rr),EE=a(Rr,"TD",{align:!0});var xie=l(EE);mY=o(xie,"\u274C"),xie.forEach(t),pY=s(Rr),TE=a(Rr,"TD",{align:!0});var Oie=l(TE);DY=o(Oie,"\u2705"),Oie.forEach(t),Rr.forEach(t),yY=s(f),ke=a(f,"TR",{});var Lr=l(ke);_E=a(Lr,"TD",{align:!0});var Fie=l(_E);AY=o(Fie,"Speech2Text"),Fie.forEach(t),RY=s(Lr),mE=a(Lr,"TD",{align:!0});var Wie=l(mE);LY=o(Wie,"\u2705"),Wie.forEach(t),bY=s(Lr),pE=a(Lr,"TD",{align:!0});var Yie=l(pE);SY=o(Yie,"\u274C"),Yie.forEach(t),wY=s(Lr),DE=a(Lr,"TD",{align:!0});var Jie=l(DE);MY=o(Jie,"\u2705"),Jie.forEach(t),PY=s(Lr),yE=a(Lr,"TD",{align:!0});var Zie=l(yE);GY=o(Zie,"\u2705"),Zie.forEach(t),kY=s(Lr),AE=a(Lr,"TD",{align:!0});var Vie=l(AE);BY=o(Vie,"\u274C"),Vie.forEach(t),Lr.forEach(t),CY=s(f),Be=a(f,"TR",{});var br=l(Be);RE=a(br,"TD",{align:!0});var Kie=l(RE);NY=o(Kie,"Speech2Text2"),Kie.forEach(t),IY=s(br),LE=a(br,"TD",{align:!0});var Xie=l(LE);xY=o(Xie,"\u2705"),Xie.forEach(t),OY=s(br),bE=a(br,"TD",{align:!0});var zie=l(bE);FY=o(zie,"\u274C"),zie.forEach(t),WY=s(br),SE=a(br,"TD",{align:!0});var Hie=l(SE);YY=o(Hie,"\u274C"),Hie.forEach(t),JY=s(br),wE=a(br,"TD",{align:!0});var Uie=l(wE);ZY=o(Uie,"\u274C"),Uie.forEach(t),VY=s(br),ME=a(br,"TD",{align:!0});var qie=l(ME);KY=o(qie,"\u274C"),qie.forEach(t),br.forEach(t),XY=s(f),Ce=a(f,"TR",{});var Sr=l(Ce);PE=a(Sr,"TD",{align:!0});var Qie=l(PE);zY=o(Qie,"Splinter"),Qie.forEach(t),HY=s(Sr),GE=a(Sr,"TD",{align:!0});var jie=l(GE);UY=o(jie,"\u2705"),jie.forEach(t),qY=s(Sr),kE=a(Sr,"TD",{align:!0});var $ie=l(kE);QY=o($ie,"\u2705"),$ie.forEach(t),jY=s(Sr),BE=a(Sr,"TD",{align:!0});var ede=l(BE);$Y=o(ede,"\u2705"),ede.forEach(t),eJ=s(Sr),CE=a(Sr,"TD",{align:!0});var tde=l(CE);tJ=o(tde,"\u274C"),tde.forEach(t),rJ=s(Sr),NE=a(Sr,"TD",{align:!0});var rde=l(NE);aJ=o(rde,"\u274C"),rde.forEach(t),Sr.forEach(t),lJ=s(f),Ne=a(f,"TR",{});var wr=l(Ne);IE=a(wr,"TD",{align:!0});var ade=l(IE);nJ=o(ade,"SqueezeBERT"),ade.forEach(t),oJ=s(wr),xE=a(wr,"TD",{align:!0});var lde=l(xE);iJ=o(lde,"\u2705"),lde.forEach(t),dJ=s(wr),OE=a(wr,"TD",{align:!0});var nde=l(OE);sJ=o(nde,"\u2705"),nde.forEach(t),hJ=s(wr),FE=a(wr,"TD",{align:!0});var ode=l(FE);cJ=o(ode,"\u2705"),ode.forEach(t),gJ=s(wr),WE=a(wr,"TD",{align:!0});var ide=l(WE);fJ=o(ide,"\u274C"),ide.forEach(t),uJ=s(wr),YE=a(wr,"TD",{align:!0});var dde=l(YE);vJ=o(dde,"\u274C"),dde.forEach(t),wr.forEach(t),EJ=s(f),Ie=a(f,"TR",{});var Mr=l(Ie);JE=a(Mr,"TD",{align:!0});var sde=l(JE);TJ=o(sde,"Swin"),sde.forEach(t),_J=s(Mr),ZE=a(Mr,"TD",{align:!0});var hde=l(ZE);mJ=o(hde,"\u274C"),hde.forEach(t),pJ=s(Mr),VE=a(Mr,"TD",{align:!0});var cde=l(VE);DJ=o(cde,"\u274C"),cde.forEach(t),yJ=s(Mr),KE=a(Mr,"TD",{align:!0});var gde=l(KE);AJ=o(gde,"\u2705"),gde.forEach(t),RJ=s(Mr),XE=a(Mr,"TD",{align:!0});var fde=l(XE);LJ=o(fde,"\u274C"),fde.forEach(t),bJ=s(Mr),zE=a(Mr,"TD",{align:!0});var ude=l(zE);SJ=o(ude,"\u274C"),ude.forEach(t),Mr.forEach(t),wJ=s(f),xe=a(f,"TR",{});var Pr=l(xe);HE=a(Pr,"TD",{align:!0});var vde=l(HE);MJ=o(vde,"T5"),vde.forEach(t),PJ=s(Pr),UE=a(Pr,"TD",{align:!0});var Ede=l(UE);GJ=o(Ede,"\u2705"),Ede.forEach(t),kJ=s(Pr),qE=a(Pr,"TD",{align:!0});var Tde=l(qE);BJ=o(Tde,"\u2705"),Tde.forEach(t),CJ=s(Pr),QE=a(Pr,"TD",{align:!0});var _de=l(QE);NJ=o(_de,"\u2705"),_de.forEach(t),IJ=s(Pr),jE=a(Pr,"TD",{align:!0});var mde=l(jE);xJ=o(mde,"\u2705"),mde.forEach(t),OJ=s(Pr),$E=a(Pr,"TD",{align:!0});var pde=l($E);FJ=o(pde,"\u2705"),pde.forEach(t),Pr.forEach(t),WJ=s(f),Oe=a(f,"TR",{});var Gr=l(Oe);e2=a(Gr,"TD",{align:!0});var Dde=l(e2);YJ=o(Dde,"TAPAS"),Dde.forEach(t),JJ=s(Gr),t2=a(Gr,"TD",{align:!0});var yde=l(t2);ZJ=o(yde,"\u2705"),yde.forEach(t),VJ=s(Gr),r2=a(Gr,"TD",{align:!0});var Ade=l(r2);KJ=o(Ade,"\u274C"),Ade.forEach(t),XJ=s(Gr),a2=a(Gr,"TD",{align:!0});var Rde=l(a2);zJ=o(Rde,"\u2705"),Rde.forEach(t),HJ=s(Gr),l2=a(Gr,"TD",{align:!0});var Lde=l(l2);UJ=o(Lde,"\u2705"),Lde.forEach(t),qJ=s(Gr),n2=a(Gr,"TD",{align:!0});var bde=l(n2);QJ=o(bde,"\u274C"),bde.forEach(t),Gr.forEach(t),jJ=s(f),Fe=a(f,"TR",{});var kr=l(Fe);o2=a(kr,"TD",{align:!0});var Sde=l(o2);$J=o(Sde,"Transformer-XL"),Sde.forEach(t),eZ=s(kr),i2=a(kr,"TD",{align:!0});var wde=l(i2);tZ=o(wde,"\u2705"),wde.forEach(t),rZ=s(kr),d2=a(kr,"TD",{align:!0});var Mde=l(d2);aZ=o(Mde,"\u274C"),Mde.forEach(t),lZ=s(kr),s2=a(kr,"TD",{align:!0});var Pde=l(s2);nZ=o(Pde,"\u2705"),Pde.forEach(t),oZ=s(kr),h2=a(kr,"TD",{align:!0});var Gde=l(h2);iZ=o(Gde,"\u2705"),Gde.forEach(t),dZ=s(kr),c2=a(kr,"TD",{align:!0});var kde=l(c2);sZ=o(kde,"\u274C"),kde.forEach(t),kr.forEach(t),hZ=s(f),We=a(f,"TR",{});var Br=l(We);g2=a(Br,"TD",{align:!0});var Bde=l(g2);cZ=o(Bde,"TrOCR"),Bde.forEach(t),gZ=s(Br),f2=a(Br,"TD",{align:!0});var Cde=l(f2);fZ=o(Cde,"\u274C"),Cde.forEach(t),uZ=s(Br),u2=a(Br,"TD",{align:!0});var Nde=l(u2);vZ=o(Nde,"\u274C"),Nde.forEach(t),EZ=s(Br),v2=a(Br,"TD",{align:!0});var Ide=l(v2);TZ=o(Ide,"\u2705"),Ide.forEach(t),_Z=s(Br),E2=a(Br,"TD",{align:!0});var xde=l(E2);mZ=o(xde,"\u274C"),xde.forEach(t),pZ=s(Br),T2=a(Br,"TD",{align:!0});var Ode=l(T2);DZ=o(Ode,"\u274C"),Ode.forEach(t),Br.forEach(t),yZ=s(f),Ye=a(f,"TR",{});var Cr=l(Ye);_2=a(Cr,"TD",{align:!0});var Fde=l(_2);AZ=o(Fde,"UniSpeech"),Fde.forEach(t),RZ=s(Cr),m2=a(Cr,"TD",{align:!0});var Wde=l(m2);LZ=o(Wde,"\u274C"),Wde.forEach(t),bZ=s(Cr),p2=a(Cr,"TD",{align:!0});var Yde=l(p2);SZ=o(Yde,"\u274C"),Yde.forEach(t),wZ=s(Cr),D2=a(Cr,"TD",{align:!0});var Jde=l(D2);MZ=o(Jde,"\u2705"),Jde.forEach(t),PZ=s(Cr),y2=a(Cr,"TD",{align:!0});var Zde=l(y2);GZ=o(Zde,"\u274C"),Zde.forEach(t),kZ=s(Cr),A2=a(Cr,"TD",{align:!0});var Vde=l(A2);BZ=o(Vde,"\u274C"),Vde.forEach(t),Cr.forEach(t),CZ=s(f),Je=a(f,"TR",{});var Nr=l(Je);R2=a(Nr,"TD",{align:!0});var Kde=l(R2);NZ=o(Kde,"UniSpeechSat"),Kde.forEach(t),IZ=s(Nr),L2=a(Nr,"TD",{align:!0});var Xde=l(L2);xZ=o(Xde,"\u274C"),Xde.forEach(t),OZ=s(Nr),b2=a(Nr,"TD",{align:!0});var zde=l(b2);FZ=o(zde,"\u274C"),zde.forEach(t),WZ=s(Nr),S2=a(Nr,"TD",{align:!0});var Hde=l(S2);YZ=o(Hde,"\u2705"),Hde.forEach(t),JZ=s(Nr),w2=a(Nr,"TD",{align:!0});var Ude=l(w2);ZZ=o(Ude,"\u274C"),Ude.forEach(t),VZ=s(Nr),M2=a(Nr,"TD",{align:!0});var qde=l(M2);KZ=o(qde,"\u274C"),qde.forEach(t),Nr.forEach(t),XZ=s(f),Ze=a(f,"TR",{});var Ir=l(Ze);P2=a(Ir,"TD",{align:!0});var Qde=l(P2);zZ=o(Qde,"ViLT"),Qde.forEach(t),HZ=s(Ir),G2=a(Ir,"TD",{align:!0});var jde=l(G2);UZ=o(jde,"\u274C"),jde.forEach(t),qZ=s(Ir),k2=a(Ir,"TD",{align:!0});var $de=l(k2);QZ=o($de,"\u274C"),$de.forEach(t),jZ=s(Ir),B2=a(Ir,"TD",{align:!0});var ese=l(B2);$Z=o(ese,"\u2705"),ese.forEach(t),eV=s(Ir),C2=a(Ir,"TD",{align:!0});var tse=l(C2);tV=o(tse,"\u274C"),tse.forEach(t),rV=s(Ir),N2=a(Ir,"TD",{align:!0});var rse=l(N2);aV=o(rse,"\u274C"),rse.forEach(t),Ir.forEach(t),lV=s(f),Ve=a(f,"TR",{});var xr=l(Ve);I2=a(xr,"TD",{align:!0});var ase=l(I2);nV=o(ase,"Vision Encoder decoder"),ase.forEach(t),oV=s(xr),x2=a(xr,"TD",{align:!0});var lse=l(x2);iV=o(lse,"\u274C"),lse.forEach(t),dV=s(xr),O2=a(xr,"TD",{align:!0});var nse=l(O2);sV=o(nse,"\u274C"),nse.forEach(t),hV=s(xr),F2=a(xr,"TD",{align:!0});var ose=l(F2);cV=o(ose,"\u2705"),ose.forEach(t),gV=s(xr),W2=a(xr,"TD",{align:!0});var ise=l(W2);fV=o(ise,"\u2705"),ise.forEach(t),uV=s(xr),Y2=a(xr,"TD",{align:!0});var dse=l(Y2);vV=o(dse,"\u2705"),dse.forEach(t),xr.forEach(t),EV=s(f),Ke=a(f,"TR",{});var Or=l(Ke);J2=a(Or,"TD",{align:!0});var sse=l(J2);TV=o(sse,"VisionTextDualEncoder"),sse.forEach(t),_V=s(Or),Z2=a(Or,"TD",{align:!0});var hse=l(Z2);mV=o(hse,"\u274C"),hse.forEach(t),pV=s(Or),V2=a(Or,"TD",{align:!0});var cse=l(V2);DV=o(cse,"\u274C"),cse.forEach(t),yV=s(Or),K2=a(Or,"TD",{align:!0});var gse=l(K2);AV=o(gse,"\u2705"),gse.forEach(t),RV=s(Or),X2=a(Or,"TD",{align:!0});var fse=l(X2);LV=o(fse,"\u274C"),fse.forEach(t),bV=s(Or),z2=a(Or,"TD",{align:!0});var use=l(z2);SV=o(use,"\u2705"),use.forEach(t),Or.forEach(t),wV=s(f),Xe=a(f,"TR",{});var Fr=l(Xe);H2=a(Fr,"TD",{align:!0});var vse=l(H2);MV=o(vse,"VisualBert"),vse.forEach(t),PV=s(Fr),U2=a(Fr,"TD",{align:!0});var Ese=l(U2);GV=o(Ese,"\u274C"),Ese.forEach(t),kV=s(Fr),q2=a(Fr,"TD",{align:!0});var Tse=l(q2);BV=o(Tse,"\u274C"),Tse.forEach(t),CV=s(Fr),Q2=a(Fr,"TD",{align:!0});var _se=l(Q2);NV=o(_se,"\u2705"),_se.forEach(t),IV=s(Fr),j2=a(Fr,"TD",{align:!0});var mse=l(j2);xV=o(mse,"\u274C"),mse.forEach(t),OV=s(Fr),$2=a(Fr,"TD",{align:!0});var pse=l($2);FV=o(pse,"\u274C"),pse.forEach(t),Fr.forEach(t),WV=s(f),ze=a(f,"TR",{});var Wr=l(ze);eT=a(Wr,"TD",{align:!0});var Dse=l(eT);YV=o(Dse,"ViT"),Dse.forEach(t),JV=s(Wr),tT=a(Wr,"TD",{align:!0});var yse=l(tT);ZV=o(yse,"\u274C"),yse.forEach(t),VV=s(Wr),rT=a(Wr,"TD",{align:!0});var Ase=l(rT);KV=o(Ase,"\u274C"),Ase.forEach(t),XV=s(Wr),aT=a(Wr,"TD",{align:!0});var Rse=l(aT);zV=o(Rse,"\u2705"),Rse.forEach(t),HV=s(Wr),lT=a(Wr,"TD",{align:!0});var Lse=l(lT);UV=o(Lse,"\u2705"),Lse.forEach(t),qV=s(Wr),nT=a(Wr,"TD",{align:!0});var bse=l(nT);QV=o(bse,"\u2705"),bse.forEach(t),Wr.forEach(t),jV=s(f),He=a(f,"TR",{});var Yr=l(He);oT=a(Yr,"TD",{align:!0});var Sse=l(oT);$V=o(Sse,"ViTMAE"),Sse.forEach(t),eK=s(Yr),iT=a(Yr,"TD",{align:!0});var wse=l(iT);tK=o(wse,"\u274C"),wse.forEach(t),rK=s(Yr),dT=a(Yr,"TD",{align:!0});var Mse=l(dT);aK=o(Mse,"\u274C"),Mse.forEach(t),lK=s(Yr),sT=a(Yr,"TD",{align:!0});var Pse=l(sT);nK=o(Pse,"\u2705"),Pse.forEach(t),oK=s(Yr),hT=a(Yr,"TD",{align:!0});var Gse=l(hT);iK=o(Gse,"\u274C"),Gse.forEach(t),dK=s(Yr),cT=a(Yr,"TD",{align:!0});var kse=l(cT);sK=o(kse,"\u274C"),kse.forEach(t),Yr.forEach(t),hK=s(f),Ue=a(f,"TR",{});var Jr=l(Ue);gT=a(Jr,"TD",{align:!0});var Bse=l(gT);cK=o(Bse,"Wav2Vec2"),Bse.forEach(t),gK=s(Jr),fT=a(Jr,"TD",{align:!0});var Cse=l(fT);fK=o(Cse,"\u2705"),Cse.forEach(t),uK=s(Jr),uT=a(Jr,"TD",{align:!0});var Nse=l(uT);vK=o(Nse,"\u274C"),Nse.forEach(t),EK=s(Jr),vT=a(Jr,"TD",{align:!0});var Ise=l(vT);TK=o(Ise,"\u2705"),Ise.forEach(t),_K=s(Jr),ET=a(Jr,"TD",{align:!0});var xse=l(ET);mK=o(xse,"\u2705"),xse.forEach(t),pK=s(Jr),TT=a(Jr,"TD",{align:!0});var Ose=l(TT);DK=o(Ose,"\u2705"),Ose.forEach(t),Jr.forEach(t),yK=s(f),qe=a(f,"TR",{});var Zr=l(qe);_T=a(Zr,"TD",{align:!0});var Fse=l(_T);AK=o(Fse,"WavLM"),Fse.forEach(t),RK=s(Zr),mT=a(Zr,"TD",{align:!0});var Wse=l(mT);LK=o(Wse,"\u274C"),Wse.forEach(t),bK=s(Zr),pT=a(Zr,"TD",{align:!0});var Yse=l(pT);SK=o(Yse,"\u274C"),Yse.forEach(t),wK=s(Zr),DT=a(Zr,"TD",{align:!0});var Jse=l(DT);MK=o(Jse,"\u2705"),Jse.forEach(t),PK=s(Zr),yT=a(Zr,"TD",{align:!0});var Zse=l(yT);GK=o(Zse,"\u274C"),Zse.forEach(t),kK=s(Zr),AT=a(Zr,"TD",{align:!0});var Vse=l(AT);BK=o(Vse,"\u274C"),Vse.forEach(t),Zr.forEach(t),CK=s(f),Qe=a(f,"TR",{});var Vr=l(Qe);RT=a(Vr,"TD",{align:!0});var Kse=l(RT);NK=o(Kse,"XGLM"),Kse.forEach(t),IK=s(Vr),LT=a(Vr,"TD",{align:!0});var Xse=l(LT);xK=o(Xse,"\u2705"),Xse.forEach(t),OK=s(Vr),bT=a(Vr,"TD",{align:!0});var zse=l(bT);FK=o(zse,"\u2705"),zse.forEach(t),WK=s(Vr),ST=a(Vr,"TD",{align:!0});var Hse=l(ST);YK=o(Hse,"\u2705"),Hse.forEach(t),JK=s(Vr),wT=a(Vr,"TD",{align:!0});var Use=l(wT);ZK=o(Use,"\u274C"),Use.forEach(t),VK=s(Vr),MT=a(Vr,"TD",{align:!0});var qse=l(MT);KK=o(qse,"\u2705"),qse.forEach(t),Vr.forEach(t),XK=s(f),je=a(f,"TR",{});var Kr=l(je);PT=a(Kr,"TD",{align:!0});var Qse=l(PT);zK=o(Qse,"XLM"),Qse.forEach(t),HK=s(Kr),GT=a(Kr,"TD",{align:!0});var jse=l(GT);UK=o(jse,"\u2705"),jse.forEach(t),qK=s(Kr),kT=a(Kr,"TD",{align:!0});var $se=l(kT);QK=o($se,"\u274C"),$se.forEach(t),jK=s(Kr),BT=a(Kr,"TD",{align:!0});var ehe=l(BT);$K=o(ehe,"\u2705"),ehe.forEach(t),eX=s(Kr),CT=a(Kr,"TD",{align:!0});var the=l(CT);tX=o(the,"\u2705"),the.forEach(t),rX=s(Kr),NT=a(Kr,"TD",{align:!0});var rhe=l(NT);aX=o(rhe,"\u274C"),rhe.forEach(t),Kr.forEach(t),lX=s(f),$e=a(f,"TR",{});var Xr=l($e);IT=a(Xr,"TD",{align:!0});var ahe=l(IT);nX=o(ahe,"XLM-RoBERTa"),ahe.forEach(t),oX=s(Xr),xT=a(Xr,"TD",{align:!0});var lhe=l(xT);iX=o(lhe,"\u2705"),lhe.forEach(t),dX=s(Xr),OT=a(Xr,"TD",{align:!0});var nhe=l(OT);sX=o(nhe,"\u2705"),nhe.forEach(t),hX=s(Xr),FT=a(Xr,"TD",{align:!0});var ohe=l(FT);cX=o(ohe,"\u2705"),ohe.forEach(t),gX=s(Xr),WT=a(Xr,"TD",{align:!0});var ihe=l(WT);fX=o(ihe,"\u2705"),ihe.forEach(t),uX=s(Xr),YT=a(Xr,"TD",{align:!0});var dhe=l(YT);vX=o(dhe,"\u2705"),dhe.forEach(t),Xr.forEach(t),EX=s(f),et=a(f,"TR",{});var zr=l(et);JT=a(zr,"TD",{align:!0});var she=l(JT);TX=o(she,"XLM-RoBERTa-XL"),she.forEach(t),_X=s(zr),ZT=a(zr,"TD",{align:!0});var hhe=l(ZT);mX=o(hhe,"\u274C"),hhe.forEach(t),pX=s(zr),VT=a(zr,"TD",{align:!0});var che=l(VT);DX=o(che,"\u274C"),che.forEach(t),yX=s(zr),KT=a(zr,"TD",{align:!0});var ghe=l(KT);AX=o(ghe,"\u2705"),ghe.forEach(t),RX=s(zr),XT=a(zr,"TD",{align:!0});var fhe=l(XT);LX=o(fhe,"\u274C"),fhe.forEach(t),bX=s(zr),zT=a(zr,"TD",{align:!0});var uhe=l(zT);SX=o(uhe,"\u274C"),uhe.forEach(t),zr.forEach(t),wX=s(f),tt=a(f,"TR",{});var Hr=l(tt);HT=a(Hr,"TD",{align:!0});var vhe=l(HT);MX=o(vhe,"XLMProphetNet"),vhe.forEach(t),PX=s(Hr),UT=a(Hr,"TD",{align:!0});var Ehe=l(UT);GX=o(Ehe,"\u2705"),Ehe.forEach(t),kX=s(Hr),qT=a(Hr,"TD",{align:!0});var The=l(qT);BX=o(The,"\u274C"),The.forEach(t),CX=s(Hr),QT=a(Hr,"TD",{align:!0});var _he=l(QT);NX=o(_he,"\u2705"),_he.forEach(t),IX=s(Hr),jT=a(Hr,"TD",{align:!0});var mhe=l(jT);xX=o(mhe,"\u274C"),mhe.forEach(t),OX=s(Hr),$T=a(Hr,"TD",{align:!0});var phe=l($T);FX=o(phe,"\u274C"),phe.forEach(t),Hr.forEach(t),WX=s(f),rt=a(f,"TR",{});var Ur=l(rt);e_=a(Ur,"TD",{align:!0});var Dhe=l(e_);YX=o(Dhe,"XLNet"),Dhe.forEach(t),JX=s(Ur),t_=a(Ur,"TD",{align:!0});var yhe=l(t_);ZX=o(yhe,"\u2705"),yhe.forEach(t),VX=s(Ur),r_=a(Ur,"TD",{align:!0});var Ahe=l(r_);KX=o(Ahe,"\u2705"),Ahe.forEach(t),XX=s(Ur),a_=a(Ur,"TD",{align:!0});var Rhe=l(a_);zX=o(Rhe,"\u2705"),Rhe.forEach(t),HX=s(Ur),l_=a(Ur,"TD",{align:!0});var Lhe=l(l_);UX=o(Lhe,"\u2705"),Lhe.forEach(t),qX=s(Ur),n_=a(Ur,"TD",{align:!0});var bhe=l(n_);QX=o(bhe,"\u274C"),bhe.forEach(t),Ur.forEach(t),jX=s(f),at=a(f,"TR",{});var qr=l(at);o_=a(qr,"TD",{align:!0});var She=l(o_);$X=o(She,"YOSO"),She.forEach(t),ez=s(qr),i_=a(qr,"TD",{align:!0});var whe=l(i_);tz=o(whe,"\u274C"),whe.forEach(t),rz=s(qr),d_=a(qr,"TD",{align:!0});var Mhe=l(d_);az=o(Mhe,"\u274C"),Mhe.forEach(t),lz=s(qr),s_=a(qr,"TD",{align:!0});var Phe=l(s_);nz=o(Phe,"\u2705"),Phe.forEach(t),oz=s(qr),h_=a(qr,"TD",{align:!0});var Ghe=l(h_);iz=o(Ghe,"\u274C"),Ghe.forEach(t),dz=s(qr),c_=a(qr,"TD",{align:!0});var khe=l(c_);sz=o(khe,"\u274C"),khe.forEach(t),qr.forEach(t),f.forEach(t),o0.forEach(t),this.h()},h(){i($r,"name","hf:doc:metadata"),i($r,"content",JSON.stringify(Whe)),i(Qr,"id","transformers"),i(Qr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Qr,"href","#transformers"),i(ot,"class","relative group"),i(cn,"href","https://pytorch.org/"),i(cn,"rel","nofollow"),i(gn,"href","https://www.tensorflow.org/"),i(gn,"rel","nofollow"),i(fn,"href","https://jax.readthedocs.io/en/latest/"),i(fn,"rel","nofollow"),i(va,"id","if-you-are-looking-for-custom-support-from-the-hugging-face-team"),i(va,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(va,"href","#if-you-are-looking-for-custom-support-from-the-hugging-face-team"),i(da,"class","relative group"),i(ea,"alt","HuggingFace Expert Acceleration Program"),xhe(ea.src,mz="https://huggingface.co/front/thumbnails/support.png")||i(ea,"src",mz),i0(ea,"max-width","600px"),i0(ea,"border","1px solid #eee"),i0(ea,"border-radius","4px"),i0(ea,"box-shadow","0 1px 2px 0 rgba(0, 0, 0, 0.05)"),i(sa,"target","_blank"),i(sa,"href","https://huggingface.co/support"),i(Ea,"id","contents"),i(Ea,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ea,"href","#contents"),i(ha,"class","relative group"),i(Ta,"id","supported-models"),i(Ta,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ta,"href","#supported-models"),i(ga,"class","relative group"),i(Zi,"href","model_doc/albert"),i(_n,"href","https://arxiv.org/abs/1909.11942"),i(_n,"rel","nofollow"),i(Vi,"href","model_doc/bart"),i(mn,"href","https://arxiv.org/abs/1910.13461"),i(mn,"rel","nofollow"),i(Ki,"href","model_doc/barthez"),i(pn,"href","https://arxiv.org/abs/2010.12321"),i(pn,"rel","nofollow"),i(Xi,"href","model_doc/bartpho"),i(Dn,"href","https://arxiv.org/abs/2109.09701"),i(Dn,"rel","nofollow"),i(zi,"href","model_doc/beit"),i(yn,"href","https://arxiv.org/abs/2106.08254"),i(yn,"rel","nofollow"),i(Hi,"href","model_doc/bert"),i(An,"href","https://arxiv.org/abs/1810.04805"),i(An,"rel","nofollow"),i(Ui,"href","model_doc/bertweet"),i(Rn,"href","https://aclanthology.org/2020.emnlp-demos.2/"),i(Rn,"rel","nofollow"),i(qi,"href","model_doc/bert-generation"),i(Ln,"href","https://arxiv.org/abs/1907.12461"),i(Ln,"rel","nofollow"),i(Qi,"href","model_doc/big_bird"),i(bn,"href","https://arxiv.org/abs/2007.14062"),i(bn,"rel","nofollow"),i(ji,"href","model_doc/bigbird_pegasus"),i(Sn,"href","https://arxiv.org/abs/2007.14062"),i(Sn,"rel","nofollow"),i($i,"href","model_doc/blenderbot"),i(wn,"href","https://arxiv.org/abs/2004.13637"),i(wn,"rel","nofollow"),i(ed,"href","model_doc/blenderbot-small"),i(Mn,"href","https://arxiv.org/abs/2004.13637"),i(Mn,"rel","nofollow"),i(td,"href","model_doc/bort"),i(Pn,"href","https://arxiv.org/abs/2010.10499"),i(Pn,"rel","nofollow"),i(rd,"href","model_doc/byt5"),i(Gn,"href","https://arxiv.org/abs/2105.13626"),i(Gn,"rel","nofollow"),i(ad,"href","model_doc/camembert"),i(kn,"href","https://arxiv.org/abs/1911.03894"),i(kn,"rel","nofollow"),i(ld,"href","model_doc/canine"),i(Bn,"href","https://arxiv.org/abs/2103.06874"),i(Bn,"rel","nofollow"),i(nd,"href","model_doc/convnext"),i(Cn,"href","https://arxiv.org/abs/2201.03545"),i(Cn,"rel","nofollow"),i(od,"href","model_doc/clip"),i(Nn,"href","https://arxiv.org/abs/2103.00020"),i(Nn,"rel","nofollow"),i(id,"href","model_doc/convbert"),i(In,"href","https://arxiv.org/abs/2008.02496"),i(In,"rel","nofollow"),i(dd,"href","model_doc/cpm"),i(xn,"href","https://arxiv.org/abs/2012.00413"),i(xn,"rel","nofollow"),i(sd,"href","model_doc/ctrl"),i(On,"href","https://arxiv.org/abs/1909.05858"),i(On,"rel","nofollow"),i(hd,"href","model_doc/data2vec"),i(Fn,"href","https://arxiv.org/abs/2202.03555"),i(Fn,"rel","nofollow"),i(cd,"href","model_doc/deberta"),i(Wn,"href","https://arxiv.org/abs/2006.03654"),i(Wn,"rel","nofollow"),i(gd,"href","model_doc/deberta-v2"),i(Yn,"href","https://arxiv.org/abs/2006.03654"),i(Yn,"rel","nofollow"),i(fd,"href","model_doc/dit"),i(Jn,"href","https://arxiv.org/abs/2203.02378"),i(Jn,"rel","nofollow"),i(ud,"href","model_doc/deit"),i(Zn,"href","https://arxiv.org/abs/2012.12877"),i(Zn,"rel","nofollow"),i(vd,"href","model_doc/detr"),i(Vn,"href","https://arxiv.org/abs/2005.12872"),i(Vn,"rel","nofollow"),i(Ed,"href","model_doc/dialogpt"),i(Kn,"href","https://arxiv.org/abs/1911.00536"),i(Kn,"rel","nofollow"),i(Td,"href","model_doc/distilbert"),i(Xn,"href","https://arxiv.org/abs/1910.01108"),i(Xn,"rel","nofollow"),i(zn,"href","https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation"),i(zn,"rel","nofollow"),i(Hn,"href","https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation"),i(Hn,"rel","nofollow"),i(Un,"href","https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation"),i(Un,"rel","nofollow"),i(_d,"href","model_doc/dpr"),i(qn,"href","https://arxiv.org/abs/2004.04906"),i(qn,"rel","nofollow"),i(md,"href","model_doc/encoder-decoder"),i(Qn,"href","https://arxiv.org/abs/1907.12461"),i(Qn,"rel","nofollow"),i(pd,"href","model_doc/electra"),i(jn,"href","https://arxiv.org/abs/2003.10555"),i(jn,"rel","nofollow"),i(Dd,"href","model_doc/flaubert"),i($n,"href","https://arxiv.org/abs/1912.05372"),i($n,"rel","nofollow"),i(yd,"href","model_doc/fnet"),i(eo,"href","https://arxiv.org/abs/2105.03824"),i(eo,"rel","nofollow"),i(Ad,"href","model_doc/funnel"),i(to,"href","https://arxiv.org/abs/2006.03236"),i(to,"rel","nofollow"),i(Rd,"href","model_doc/openai-gpt"),i(ro,"href","https://blog.openai.com/language-unsupervised/"),i(ro,"rel","nofollow"),i(Ld,"href","model_doc/gpt2"),i(ao,"href","https://blog.openai.com/better-language-models/"),i(ao,"rel","nofollow"),i(bd,"href","model_doc/gptj"),i(lo,"href","https://github.com/kingoflolz/mesh-transformer-jax/"),i(lo,"rel","nofollow"),i(Sd,"href","model_doc/gpt_neo"),i(no,"href","https://github.com/EleutherAI/gpt-neo"),i(no,"rel","nofollow"),i(wd,"href","model_doc/hubert"),i(oo,"href","https://arxiv.org/abs/2106.07447"),i(oo,"rel","nofollow"),i(Md,"href","model_doc/ibert"),i(io,"href","https://arxiv.org/abs/2101.01321"),i(io,"rel","nofollow"),i(Pd,"href","model_doc/imagegpt"),i(so,"href","https://openai.com/blog/image-gpt/"),i(so,"rel","nofollow"),i(Gd,"href","model_doc/layoutlm"),i(ho,"href","https://arxiv.org/abs/1912.13318"),i(ho,"rel","nofollow"),i(kd,"href","model_doc/layoutlmv2"),i(co,"href","https://arxiv.org/abs/2012.14740"),i(co,"rel","nofollow"),i(Bd,"href","model_doc/layoutlmv2"),i(go,"href","https://arxiv.org/abs/2104.08836"),i(go,"rel","nofollow"),i(Cd,"href","model_doc/led"),i(fo,"href","https://arxiv.org/abs/2004.05150"),i(fo,"rel","nofollow"),i(Nd,"href","model_doc/longformer"),i(uo,"href","https://arxiv.org/abs/2004.05150"),i(uo,"rel","nofollow"),i(Id,"href","model_doc/luke"),i(vo,"href","https://arxiv.org/abs/2010.01057"),i(vo,"rel","nofollow"),i(xd,"href","model_doc/mluke"),i(Eo,"href","https://arxiv.org/abs/2110.08151"),i(Eo,"rel","nofollow"),i(Od,"href","model_doc/lxmert"),i(To,"href","https://arxiv.org/abs/1908.07490"),i(To,"rel","nofollow"),i(Fd,"href","model_doc/m2m_100"),i(_o,"href","https://arxiv.org/abs/2010.11125"),i(_o,"rel","nofollow"),i(Wd,"href","model_doc/marian"),i(mo,"href","http://opus.nlpl.eu/"),i(mo,"rel","nofollow"),i(po,"href","https://marian-nmt.github.io/"),i(po,"rel","nofollow"),i(Yd,"href","model_doc/maskformer"),i(Do,"href","https://arxiv.org/abs/2107.06278"),i(Do,"rel","nofollow"),i(Jd,"href","model_doc/mbart"),i(yo,"href","https://arxiv.org/abs/2001.08210"),i(yo,"rel","nofollow"),i(Zd,"href","model_doc/mbart"),i(Ao,"href","https://arxiv.org/abs/2008.00401"),i(Ao,"rel","nofollow"),i(Vd,"href","model_doc/megatron-bert"),i(Ro,"href","https://arxiv.org/abs/1909.08053"),i(Ro,"rel","nofollow"),i(Kd,"href","model_doc/megatron_gpt2"),i(Lo,"href","https://arxiv.org/abs/1909.08053"),i(Lo,"rel","nofollow"),i(Xd,"href","model_doc/mpnet"),i(bo,"href","https://arxiv.org/abs/2004.09297"),i(bo,"rel","nofollow"),i(zd,"href","model_doc/mt5"),i(So,"href","https://arxiv.org/abs/2010.11934"),i(So,"rel","nofollow"),i(Hd,"href","model_doc/nystromformer"),i(wo,"href","https://arxiv.org/abs/2102.03902"),i(wo,"rel","nofollow"),i(Ud,"href","model_doc/pegasus"),i(Mo,"href","https://arxiv.org/abs/1912.08777"),i(Mo,"rel","nofollow"),i(qd,"href","model_doc/perceiver"),i(Po,"href","https://arxiv.org/abs/2107.14795"),i(Po,"rel","nofollow"),i(Qd,"href","model_doc/phobert"),i(Go,"href","https://www.aclweb.org/anthology/2020.findings-emnlp.92/"),i(Go,"rel","nofollow"),i(jd,"href","model_doc/plbart"),i(ko,"href","https://arxiv.org/abs/2103.06333"),i(ko,"rel","nofollow"),i($d,"href","model_doc/poolformer"),i(Bo,"href","https://arxiv.org/abs/2111.11418"),i(Bo,"rel","nofollow"),i(es,"href","model_doc/prophetnet"),i(Co,"href","https://arxiv.org/abs/2001.04063"),i(Co,"rel","nofollow"),i(ts,"href","model_doc/qdqbert"),i(No,"href","https://arxiv.org/abs/2004.09602"),i(No,"rel","nofollow"),i(Io,"href","https://huggingface.co/transformers/model_doc/realm.html"),i(Io,"rel","nofollow"),i(xo,"href","https://arxiv.org/abs/2002.08909"),i(xo,"rel","nofollow"),i(rs,"href","model_doc/reformer"),i(Oo,"href","https://arxiv.org/abs/2001.04451"),i(Oo,"rel","nofollow"),i(as,"href","model_doc/rembert"),i(Fo,"href","https://arxiv.org/abs/2010.12821"),i(Fo,"rel","nofollow"),i(ls,"href","model_doc/roberta"),i(Wo,"href","https://arxiv.org/abs/1907.11692"),i(Wo,"rel","nofollow"),i(ns,"href","model_doc/roformer"),i(Yo,"href","https://arxiv.org/abs/2104.09864"),i(Yo,"rel","nofollow"),i(os,"href","model_doc/segformer"),i(Jo,"href","https://arxiv.org/abs/2105.15203"),i(Jo,"rel","nofollow"),i(is,"href","model_doc/sew"),i(Zo,"href","https://arxiv.org/abs/2109.06870"),i(Zo,"rel","nofollow"),i(ds,"href","model_doc/sew_d"),i(Vo,"href","https://arxiv.org/abs/2109.06870"),i(Vo,"rel","nofollow"),i(ss,"href","model_doc/speech_to_text"),i(Ko,"href","https://arxiv.org/abs/2010.05171"),i(Ko,"rel","nofollow"),i(hs,"href","model_doc/speech_to_text_2"),i(Xo,"href","https://arxiv.org/abs/2104.06678"),i(Xo,"rel","nofollow"),i(cs,"href","model_doc/splinter"),i(zo,"href","https://arxiv.org/abs/2101.00438"),i(zo,"rel","nofollow"),i(gs,"href","model_doc/squeezebert"),i(Ho,"href","https://arxiv.org/abs/2006.11316"),i(Ho,"rel","nofollow"),i(fs,"href","model_doc/swin"),i(Uo,"href","https://arxiv.org/abs/2103.14030"),i(Uo,"rel","nofollow"),i(us,"href","model_doc/t5"),i(qo,"href","https://arxiv.org/abs/1910.10683"),i(qo,"rel","nofollow"),i(vs,"href","model_doc/t5v1.1"),i(Qo,"href","https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511"),i(Qo,"rel","nofollow"),i(Es,"href","model_doc/tapas"),i(jo,"href","https://arxiv.org/abs/2004.02349"),i(jo,"rel","nofollow"),i(Ts,"href","model_doc/transfo-xl"),i($o,"href","https://arxiv.org/abs/1901.02860"),i($o,"rel","nofollow"),i(_s,"href","model_doc/trocr"),i(ei,"href","https://arxiv.org/abs/2109.10282"),i(ei,"rel","nofollow"),i(ms,"href","model_doc/unispeech"),i(ti,"href","https://arxiv.org/abs/2101.07597"),i(ti,"rel","nofollow"),i(ps,"href","model_doc/unispeech-sat"),i(ri,"href","https://arxiv.org/abs/2110.05752"),i(ri,"rel","nofollow"),i(Ds,"href","model_doc/vilt"),i(ai,"href","https://arxiv.org/abs/2102.03334"),i(ai,"rel","nofollow"),i(ys,"href","model_doc/vit"),i(li,"href","https://arxiv.org/abs/2010.11929"),i(li,"rel","nofollow"),i(As,"href","model_doc/vit_mae"),i(ni,"href","https://arxiv.org/abs/2111.06377"),i(ni,"rel","nofollow"),i(Rs,"href","model_doc/visual_bert"),i(oi,"href","https://arxiv.org/pdf/1908.03557"),i(oi,"rel","nofollow"),i(Ls,"href","model_doc/wavlm"),i(ii,"href","https://arxiv.org/abs/2110.13900"),i(ii,"rel","nofollow"),i(bs,"href","model_doc/wav2vec2"),i(di,"href","https://arxiv.org/abs/2006.11477"),i(di,"rel","nofollow"),i(si,"href","https://huggingface.co/docs/master/transformers/model_doc/wav2vec2_phoneme"),i(si,"rel","nofollow"),i(hi,"href","https://arxiv.org/abs/2109.11680"),i(hi,"rel","nofollow"),i(ci,"href","https://huggingface.co/docs/master/transformers/model_doc/xglm"),i(ci,"rel","nofollow"),i(gi,"href","https://arxiv.org/abs/2112.10668"),i(gi,"rel","nofollow"),i(Ss,"href","model_doc/xlm"),i(fi,"href","https://arxiv.org/abs/1901.07291"),i(fi,"rel","nofollow"),i(ws,"href","model_doc/xlm-prophetnet"),i(ui,"href","https://arxiv.org/abs/2001.04063"),i(ui,"rel","nofollow"),i(Ms,"href","model_doc/xlm-roberta"),i(vi,"href","https://arxiv.org/abs/1911.02116"),i(vi,"rel","nofollow"),i(Ps,"href","model_doc/xlm-roberta-xl"),i(Ei,"href","https://arxiv.org/abs/2105.00572"),i(Ei,"rel","nofollow"),i(Gs,"href","model_doc/xlnet"),i(Ti,"href","https://arxiv.org/abs/1906.08237"),i(Ti,"rel","nofollow"),i(ks,"href","model_doc/xlsr_wav2vec2"),i(_i,"href","https://arxiv.org/abs/2006.13979"),i(_i,"rel","nofollow"),i(mi,"href","https://huggingface.co/docs/master/transformers/model_doc/xls_r"),i(mi,"rel","nofollow"),i(pi,"href","https://arxiv.org/abs/2111.09296"),i(pi,"rel","nofollow"),i(Bs,"href","model_doc/yoso"),i(Di,"href","https://arxiv.org/abs/2111.09714"),i(Di,"rel","nofollow"),i(nn,"id","supported-frameworks"),i(nn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(nn,"href","#supported-frameworks"),i(fa,"class","relative group"),i(Ns,"align","center"),i(Is,"align","center"),i(xs,"align","center"),i(Os,"align","center"),i(Fs,"align","center"),i(Ws,"align","center"),i(Ys,"align","center"),i(Js,"align","center"),i(Zs,"align","center"),i(Vs,"align","center"),i(Ks,"align","center"),i(Xs,"align","center"),i(zs,"align","center"),i(Hs,"align","center"),i(Us,"align","center"),i(qs,"align","center"),i(Qs,"align","center"),i(js,"align","center"),i($s,"align","center"),i(eh,"align","center"),i(th,"align","center"),i(rh,"align","center"),i(ah,"align","center"),i(lh,"align","center"),i(nh,"align","center"),i(oh,"align","center"),i(ih,"align","center"),i(dh,"align","center"),i(sh,"align","center"),i(hh,"align","center"),i(ch,"align","center"),i(gh,"align","center"),i(fh,"align","center"),i(uh,"align","center"),i(vh,"align","center"),i(Eh,"align","center"),i(Th,"align","center"),i(_h,"align","center"),i(mh,"align","center"),i(ph,"align","center"),i(Dh,"align","center"),i(yh,"align","center"),i(Ah,"align","center"),i(Rh,"align","center"),i(Lh,"align","center"),i(bh,"align","center"),i(Sh,"align","center"),i(wh,"align","center"),i(Mh,"align","center"),i(Ph,"align","center"),i(Gh,"align","center"),i(kh,"align","center"),i(Bh,"align","center"),i(Ch,"align","center"),i(Nh,"align","center"),i(Ih,"align","center"),i(xh,"align","center"),i(Oh,"align","center"),i(Fh,"align","center"),i(Wh,"align","center"),i(Yh,"align","center"),i(Jh,"align","center"),i(Zh,"align","center"),i(Vh,"align","center"),i(Kh,"align","center"),i(Xh,"align","center"),i(zh,"align","center"),i(Hh,"align","center"),i(Uh,"align","center"),i(qh,"align","center"),i(Qh,"align","center"),i(jh,"align","center"),i($h,"align","center"),i(ec,"align","center"),i(tc,"align","center"),i(rc,"align","center"),i(ac,"align","center"),i(lc,"align","center"),i(nc,"align","center"),i(oc,"align","center"),i(ic,"align","center"),i(dc,"align","center"),i(sc,"align","center"),i(hc,"align","center"),i(cc,"align","center"),i(gc,"align","center"),i(fc,"align","center"),i(uc,"align","center"),i(vc,"align","center"),i(Ec,"align","center"),i(Tc,"align","center"),i(_c,"align","center"),i(mc,"align","center"),i(pc,"align","center"),i(Dc,"align","center"),i(yc,"align","center"),i(Ac,"align","center"),i(Rc,"align","center"),i(Lc,"align","center"),i(bc,"align","center"),i(Sc,"align","center"),i(wc,"align","center"),i(Mc,"align","center"),i(Pc,"align","center"),i(Gc,"align","center"),i(kc,"align","center"),i(Bc,"align","center"),i(Cc,"align","center"),i(Nc,"align","center"),i(Ic,"align","center"),i(xc,"align","center"),i(Oc,"align","center"),i(Fc,"align","center"),i(Wc,"align","center"),i(Yc,"align","center"),i(Jc,"align","center"),i(Zc,"align","center"),i(Vc,"align","center"),i(Kc,"align","center"),i(Xc,"align","center"),i(zc,"align","center"),i(Hc,"align","center"),i(Uc,"align","center"),i(qc,"align","center"),i(Qc,"align","center"),i(jc,"align","center"),i($c,"align","center"),i(eg,"align","center"),i(tg,"align","center"),i(rg,"align","center"),i(ag,"align","center"),i(lg,"align","center"),i(ng,"align","center"),i(og,"align","center"),i(ig,"align","center"),i(dg,"align","center"),i(sg,"align","center"),i(hg,"align","center"),i(cg,"align","center"),i(gg,"align","center"),i(fg,"align","center"),i(ug,"align","center"),i(vg,"align","center"),i(Eg,"align","center"),i(Tg,"align","center"),i(_g,"align","center"),i(mg,"align","center"),i(pg,"align","center"),i(Dg,"align","center"),i(yg,"align","center"),i(Ag,"align","center"),i(Rg,"align","center"),i(Lg,"align","center"),i(bg,"align","center"),i(Sg,"align","center"),i(wg,"align","center"),i(Mg,"align","center"),i(Pg,"align","center"),i(Gg,"align","center"),i(kg,"align","center"),i(Bg,"align","center"),i(Cg,"align","center"),i(Ng,"align","center"),i(Ig,"align","center"),i(xg,"align","center"),i(Og,"align","center"),i(Fg,"align","center"),i(Wg,"align","center"),i(Yg,"align","center"),i(Jg,"align","center"),i(Zg,"align","center"),i(Vg,"align","center"),i(Kg,"align","center"),i(Xg,"align","center"),i(zg,"align","center"),i(Hg,"align","center"),i(Ug,"align","center"),i(qg,"align","center"),i(Qg,"align","center"),i(jg,"align","center"),i($g,"align","center"),i(ef,"align","center"),i(tf,"align","center"),i(rf,"align","center"),i(af,"align","center"),i(lf,"align","center"),i(nf,"align","center"),i(of,"align","center"),i(df,"align","center"),i(sf,"align","center"),i(hf,"align","center"),i(cf,"align","center"),i(gf,"align","center"),i(ff,"align","center"),i(uf,"align","center"),i(vf,"align","center"),i(Ef,"align","center"),i(Tf,"align","center"),i(_f,"align","center"),i(mf,"align","center"),i(pf,"align","center"),i(Df,"align","center"),i(yf,"align","center"),i(Af,"align","center"),i(Rf,"align","center"),i(Lf,"align","center"),i(bf,"align","center"),i(Sf,"align","center"),i(wf,"align","center"),i(Mf,"align","center"),i(Pf,"align","center"),i(Gf,"align","center"),i(kf,"align","center"),i(Bf,"align","center"),i(Cf,"align","center"),i(Nf,"align","center"),i(If,"align","center"),i(xf,"align","center"),i(Of,"align","center"),i(Ff,"align","center"),i(Wf,"align","center"),i(Yf,"align","center"),i(Jf,"align","center"),i(Zf,"align","center"),i(Vf,"align","center"),i(Kf,"align","center"),i(Xf,"align","center"),i(zf,"align","center"),i(Hf,"align","center"),i(Uf,"align","center"),i(qf,"align","center"),i(Qf,"align","center"),i(jf,"align","center"),i($f,"align","center"),i(e1,"align","center"),i(t1,"align","center"),i(r1,"align","center"),i(a1,"align","center"),i(l1,"align","center"),i(n1,"align","center"),i(o1,"align","center"),i(i1,"align","center"),i(d1,"align","center"),i(s1,"align","center"),i(h1,"align","center"),i(c1,"align","center"),i(g1,"align","center"),i(f1,"align","center"),i(u1,"align","center"),i(v1,"align","center"),i(E1,"align","center"),i(T1,"align","center"),i(_1,"align","center"),i(m1,"align","center"),i(p1,"align","center"),i(D1,"align","center"),i(y1,"align","center"),i(A1,"align","center"),i(R1,"align","center"),i(L1,"align","center"),i(b1,"align","center"),i(S1,"align","center"),i(w1,"align","center"),i(M1,"align","center"),i(P1,"align","center"),i(G1,"align","center"),i(k1,"align","center"),i(B1,"align","center"),i(C1,"align","center"),i(N1,"align","center"),i(I1,"align","center"),i(x1,"align","center"),i(O1,"align","center"),i(F1,"align","center"),i(W1,"align","center"),i(Y1,"align","center"),i(J1,"align","center"),i(Z1,"align","center"),i(V1,"align","center"),i(K1,"align","center"),i(X1,"align","center"),i(z1,"align","center"),i(H1,"align","center"),i(U1,"align","center"),i(q1,"align","center"),i(Q1,"align","center"),i(j1,"align","center"),i($1,"align","center"),i(eu,"align","center"),i(tu,"align","center"),i(ru,"align","center"),i(au,"align","center"),i(lu,"align","center"),i(nu,"align","center"),i(ou,"align","center"),i(iu,"align","center"),i(du,"align","center"),i(su,"align","center"),i(hu,"align","center"),i(cu,"align","center"),i(gu,"align","center"),i(fu,"align","center"),i(uu,"align","center"),i(vu,"align","center"),i(Eu,"align","center"),i(Tu,"align","center"),i(_u,"align","center"),i(mu,"align","center"),i(pu,"align","center"),i(Du,"align","center"),i(yu,"align","center"),i(Au,"align","center"),i(Ru,"align","center"),i(Lu,"align","center"),i(bu,"align","center"),i(Su,"align","center"),i(wu,"align","center"),i(Mu,"align","center"),i(Pu,"align","center"),i(Gu,"align","center"),i(ku,"align","center"),i(Bu,"align","center"),i(Cu,"align","center"),i(Nu,"align","center"),i(Iu,"align","center"),i(xu,"align","center"),i(Ou,"align","center"),i(Fu,"align","center"),i(Wu,"align","center"),i(Yu,"align","center"),i(Ju,"align","center"),i(Zu,"align","center"),i(Vu,"align","center"),i(Ku,"align","center"),i(Xu,"align","center"),i(zu,"align","center"),i(Hu,"align","center"),i(Uu,"align","center"),i(qu,"align","center"),i(Qu,"align","center"),i(ju,"align","center"),i($u,"align","center"),i(ev,"align","center"),i(tv,"align","center"),i(rv,"align","center"),i(av,"align","center"),i(lv,"align","center"),i(nv,"align","center"),i(ov,"align","center"),i(iv,"align","center"),i(dv,"align","center"),i(sv,"align","center"),i(hv,"align","center"),i(cv,"align","center"),i(gv,"align","center"),i(fv,"align","center"),i(uv,"align","center"),i(vv,"align","center"),i(Ev,"align","center"),i(Tv,"align","center"),i(_v,"align","center"),i(mv,"align","center"),i(pv,"align","center"),i(Dv,"align","center"),i(yv,"align","center"),i(Av,"align","center"),i(Rv,"align","center"),i(Lv,"align","center"),i(bv,"align","center"),i(Sv,"align","center"),i(wv,"align","center"),i(Mv,"align","center"),i(Pv,"align","center"),i(Gv,"align","center"),i(kv,"align","center"),i(Bv,"align","center"),i(Cv,"align","center"),i(Nv,"align","center"),i(Iv,"align","center"),i(xv,"align","center"),i(Ov,"align","center"),i(Fv,"align","center"),i(Wv,"align","center"),i(Yv,"align","center"),i(Jv,"align","center"),i(Zv,"align","center"),i(Vv,"align","center"),i(Kv,"align","center"),i(Xv,"align","center"),i(zv,"align","center"),i(Hv,"align","center"),i(Uv,"align","center"),i(qv,"align","center"),i(Qv,"align","center"),i(jv,"align","center"),i($v,"align","center"),i(eE,"align","center"),i(tE,"align","center"),i(rE,"align","center"),i(aE,"align","center"),i(lE,"align","center"),i(nE,"align","center"),i(oE,"align","center"),i(iE,"align","center"),i(dE,"align","center"),i(sE,"align","center"),i(hE,"align","center"),i(cE,"align","center"),i(gE,"align","center"),i(fE,"align","center"),i(uE,"align","center"),i(vE,"align","center"),i(EE,"align","center"),i(TE,"align","center"),i(_E,"align","center"),i(mE,"align","center"),i(pE,"align","center"),i(DE,"align","center"),i(yE,"align","center"),i(AE,"align","center"),i(RE,"align","center"),i(LE,"align","center"),i(bE,"align","center"),i(SE,"align","center"),i(wE,"align","center"),i(ME,"align","center"),i(PE,"align","center"),i(GE,"align","center"),i(kE,"align","center"),i(BE,"align","center"),i(CE,"align","center"),i(NE,"align","center"),i(IE,"align","center"),i(xE,"align","center"),i(OE,"align","center"),i(FE,"align","center"),i(WE,"align","center"),i(YE,"align","center"),i(JE,"align","center"),i(ZE,"align","center"),i(VE,"align","center"),i(KE,"align","center"),i(XE,"align","center"),i(zE,"align","center"),i(HE,"align","center"),i(UE,"align","center"),i(qE,"align","center"),i(QE,"align","center"),i(jE,"align","center"),i($E,"align","center"),i(e2,"align","center"),i(t2,"align","center"),i(r2,"align","center"),i(a2,"align","center"),i(l2,"align","center"),i(n2,"align","center"),i(o2,"align","center"),i(i2,"align","center"),i(d2,"align","center"),i(s2,"align","center"),i(h2,"align","center"),i(c2,"align","center"),i(g2,"align","center"),i(f2,"align","center"),i(u2,"align","center"),i(v2,"align","center"),i(E2,"align","center"),i(T2,"align","center"),i(_2,"align","center"),i(m2,"align","center"),i(p2,"align","center"),i(D2,"align","center"),i(y2,"align","center"),i(A2,"align","center"),i(R2,"align","center"),i(L2,"align","center"),i(b2,"align","center"),i(S2,"align","center"),i(w2,"align","center"),i(M2,"align","center"),i(P2,"align","center"),i(G2,"align","center"),i(k2,"align","center"),i(B2,"align","center"),i(C2,"align","center"),i(N2,"align","center"),i(I2,"align","center"),i(x2,"align","center"),i(O2,"align","center"),i(F2,"align","center"),i(W2,"align","center"),i(Y2,"align","center"),i(J2,"align","center"),i(Z2,"align","center"),i(V2,"align","center"),i(K2,"align","center"),i(X2,"align","center"),i(z2,"align","center"),i(H2,"align","center"),i(U2,"align","center"),i(q2,"align","center"),i(Q2,"align","center"),i(j2,"align","center"),i($2,"align","center"),i(eT,"align","center"),i(tT,"align","center"),i(rT,"align","center"),i(aT,"align","center"),i(lT,"align","center"),i(nT,"align","center"),i(oT,"align","center"),i(iT,"align","center"),i(dT,"align","center"),i(sT,"align","center"),i(hT,"align","center"),i(cT,"align","center"),i(gT,"align","center"),i(fT,"align","center"),i(uT,"align","center"),i(vT,"align","center"),i(ET,"align","center"),i(TT,"align","center"),i(_T,"align","center"),i(mT,"align","center"),i(pT,"align","center"),i(DT,"align","center"),i(yT,"align","center"),i(AT,"align","center"),i(RT,"align","center"),i(LT,"align","center"),i(bT,"align","center"),i(ST,"align","center"),i(wT,"align","center"),i(MT,"align","center"),i(PT,"align","center"),i(GT,"align","center"),i(kT,"align","center"),i(BT,"align","center"),i(CT,"align","center"),i(NT,"align","center"),i(IT,"align","center"),i(xT,"align","center"),i(OT,"align","center"),i(FT,"align","center"),i(WT,"align","center"),i(YT,"align","center"),i(JT,"align","center"),i(ZT,"align","center"),i(VT,"align","center"),i(KT,"align","center"),i(XT,"align","center"),i(zT,"align","center"),i(HT,"align","center"),i(UT,"align","center"),i(qT,"align","center"),i(QT,"align","center"),i(jT,"align","center"),i($T,"align","center"),i(e_,"align","center"),i(t_,"align","center"),i(r_,"align","center"),i(a_,"align","center"),i(l_,"align","center"),i(n_,"align","center"),i(o_,"align","center"),i(i_,"align","center"),i(d_,"align","center"),i(s_,"align","center"),i(h_,"align","center"),i(c_,"align","center")},m(u,v){e(document.head,$r),E(u,Mi,v),E(u,ot,v),e(ot,Qr),e(Qr,f_),Gm(hn,f_,null),e(ot,s0),e(ot,u_),e(u_,h0),E(u,Im,v),E(u,Pi,v),e(Pi,c0),E(u,xm,v),E(u,Gi,v),e(Gi,g0),E(u,Om,v),E(u,it,v),e(it,v_),e(v_,f0),e(it,u0),e(it,E_),e(E_,v0),e(it,E0),e(it,T_),e(T_,T0),e(it,_0),e(it,__),e(__,m0),E(u,Fm,v),E(u,dt,v),e(dt,p0),e(dt,cn),e(cn,D0),e(dt,y0),e(dt,gn),e(gn,A0),e(dt,R0),e(dt,fn),e(fn,L0),e(dt,b0),E(u,Wm,v),E(u,ki,v),e(ki,S0),E(u,Ym,v),E(u,da,v),e(da,va),e(va,m_),Gm(un,m_,null),e(da,w0),e(da,p_),e(p_,M0),E(u,Jm,v),E(u,sa,v),e(sa,ea),e(sa,P0),E(u,Zm,v),E(u,Vm,v),E(u,ha,v),e(ha,Ea),e(Ea,D_),Gm(vn,D_,null),e(ha,G0),e(ha,y_),e(y_,k0),E(u,Km,v),E(u,Bi,v),e(Bi,B0),E(u,Xm,v),E(u,lt,v),e(lt,A_),e(A_,Ci),e(Ci,R_),e(R_,C0),e(Ci,N0),e(lt,I0),e(lt,L_),e(L_,Ni),e(Ni,b_),e(b_,x0),e(Ni,O0),e(lt,F0),e(lt,S_),e(S_,Ii),e(Ii,w_),e(w_,W0),e(Ii,Y0),e(lt,J0),e(lt,M_),e(M_,xi),e(xi,P_),e(P_,Z0),e(xi,V0),e(lt,K0),e(lt,En),e(En,Oi),e(Oi,G_),e(G_,X0),e(Oi,z0),e(En,H0),e(En,ca),e(ca,Fi),e(Fi,k_),e(k_,U0),e(Fi,q0),e(ca,Q0),e(ca,Wi),e(Wi,B_),e(B_,j0),e(Wi,$0),e(ca,ep),e(ca,Yi),e(Yi,C_),e(C_,tp),e(Yi,rp),E(u,zm,v),E(u,Ji,v),e(Ji,ap),E(u,Hm,v),E(u,ga,v),e(ga,Ta),e(Ta,N_),Gm(Tn,N_,null),e(ga,lp),e(ga,I_),e(I_,np),E(u,Um,v),E(u,h,v),e(h,_a),e(_a,x_),e(x_,Zi),e(Zi,op),e(_a,ip),e(_a,_n),e(_n,dp),e(_a,sp),e(h,hp),e(h,ma),e(ma,O_),e(O_,Vi),e(Vi,cp),e(ma,gp),e(ma,mn),e(mn,fp),e(ma,up),e(h,vp),e(h,pa),e(pa,F_),e(F_,Ki),e(Ki,Ep),e(pa,Tp),e(pa,pn),e(pn,_p),e(pa,mp),e(h,pp),e(h,Da),e(Da,W_),e(W_,Xi),e(Xi,Dp),e(Da,yp),e(Da,Dn),e(Dn,Ap),e(Da,Rp),e(h,Lp),e(h,ya),e(ya,Y_),e(Y_,zi),e(zi,bp),e(ya,Sp),e(ya,yn),e(yn,wp),e(ya,Mp),e(h,Pp),e(h,Aa),e(Aa,J_),e(J_,Hi),e(Hi,Gp),e(Aa,kp),e(Aa,An),e(An,Bp),e(Aa,Cp),e(h,Np),e(h,Ra),e(Ra,Z_),e(Z_,Ui),e(Ui,Ip),e(Ra,xp),e(Ra,Rn),e(Rn,Op),e(Ra,Fp),e(h,Wp),e(h,La),e(La,V_),e(V_,qi),e(qi,Yp),e(La,Jp),e(La,Ln),e(Ln,Zp),e(La,Vp),e(h,Kp),e(h,ba),e(ba,K_),e(K_,Qi),e(Qi,Xp),e(ba,zp),e(ba,bn),e(bn,Hp),e(ba,Up),e(h,qp),e(h,Sa),e(Sa,X_),e(X_,ji),e(ji,Qp),e(Sa,jp),e(Sa,Sn),e(Sn,$p),e(Sa,e6),e(h,t6),e(h,wa),e(wa,z_),e(z_,$i),e($i,r6),e(wa,a6),e(wa,wn),e(wn,l6),e(wa,n6),e(h,o6),e(h,Ma),e(Ma,H_),e(H_,ed),e(ed,i6),e(Ma,d6),e(Ma,Mn),e(Mn,s6),e(Ma,h6),e(h,c6),e(h,Pa),e(Pa,U_),e(U_,td),e(td,g6),e(Pa,f6),e(Pa,Pn),e(Pn,u6),e(Pa,v6),e(h,E6),e(h,Ga),e(Ga,q_),e(q_,rd),e(rd,T6),e(Ga,_6),e(Ga,Gn),e(Gn,m6),e(Ga,p6),e(h,D6),e(h,ta),e(ta,Q_),e(Q_,ad),e(ad,y6),e(ta,A6),e(ta,kn),e(kn,R6),e(ta,L6),e(ta,j_),e(j_,b6),e(ta,S6),e(h,w6),e(h,ka),e(ka,$_),e($_,ld),e(ld,M6),e(ka,P6),e(ka,Bn),e(Bn,G6),e(ka,k6),e(h,B6),e(h,Ba),e(Ba,e3),e(e3,nd),e(nd,C6),e(Ba,N6),e(Ba,Cn),e(Cn,I6),e(Ba,x6),e(h,O6),e(h,Ca),e(Ca,t3),e(t3,od),e(od,F6),e(Ca,W6),e(Ca,Nn),e(Nn,Y6),e(Ca,J6),e(h,Z6),e(h,Na),e(Na,r3),e(r3,id),e(id,V6),e(Na,K6),e(Na,In),e(In,X6),e(Na,z6),e(h,H6),e(h,Ia),e(Ia,a3),e(a3,dd),e(dd,U6),e(Ia,q6),e(Ia,xn),e(xn,Q6),e(Ia,j6),e(h,$6),e(h,ra),e(ra,l3),e(l3,sd),e(sd,e7),e(ra,t7),e(ra,On),e(On,r7),e(ra,a7),e(ra,n3),e(n3,l7),e(ra,n7),e(h,o7),e(h,xa),e(xa,o3),e(o3,hd),e(hd,i7),e(xa,d7),e(xa,Fn),e(Fn,s7),e(xa,h7),e(h,c7),e(h,Oa),e(Oa,i3),e(i3,cd),e(cd,g7),e(Oa,f7),e(Oa,Wn),e(Wn,u7),e(Oa,v7),e(h,E7),e(h,Fa),e(Fa,d3),e(d3,gd),e(gd,T7),e(Fa,_7),e(Fa,Yn),e(Yn,m7),e(Fa,p7),e(h,D7),e(h,Wa),e(Wa,s3),e(s3,fd),e(fd,y7),e(Wa,A7),e(Wa,Jn),e(Jn,R7),e(Wa,L7),e(h,b7),e(h,Ya),e(Ya,h3),e(h3,ud),e(ud,S7),e(Ya,w7),e(Ya,Zn),e(Zn,M7),e(Ya,P7),e(h,G7),e(h,Ja),e(Ja,c3),e(c3,vd),e(vd,k7),e(Ja,B7),e(Ja,Vn),e(Vn,C7),e(Ja,N7),e(h,I7),e(h,Za),e(Za,g3),e(g3,Ed),e(Ed,x7),e(Za,O7),e(Za,Kn),e(Kn,F7),e(Za,W7),e(h,Y7),e(h,nt),e(nt,f3),e(f3,Td),e(Td,J7),e(nt,Z7),e(nt,Xn),e(Xn,V7),e(nt,K7),e(nt,zn),e(zn,X7),e(nt,z7),e(nt,Hn),e(Hn,H7),e(nt,U7),e(nt,Un),e(Un,q7),e(nt,Q7),e(h,j7),e(h,Va),e(Va,u3),e(u3,_d),e(_d,$7),e(Va,e9),e(Va,qn),e(qn,t9),e(Va,r9),e(h,a9),e(h,Ka),e(Ka,v3),e(v3,md),e(md,l9),e(Ka,n9),e(Ka,Qn),e(Qn,o9),e(Ka,i9),e(h,d9),e(h,Xa),e(Xa,E3),e(E3,pd),e(pd,s9),e(Xa,h9),e(Xa,jn),e(jn,c9),e(Xa,g9),e(h,f9),e(h,za),e(za,T3),e(T3,Dd),e(Dd,u9),e(za,v9),e(za,$n),e($n,E9),e(za,T9),e(h,_9),e(h,Ha),e(Ha,_3),e(_3,yd),e(yd,m9),e(Ha,p9),e(Ha,eo),e(eo,D9),e(Ha,y9),e(h,A9),e(h,Ua),e(Ua,m3),e(m3,Ad),e(Ad,R9),e(Ua,L9),e(Ua,to),e(to,b9),e(Ua,S9),e(h,w9),e(h,qa),e(qa,p3),e(p3,Rd),e(Rd,M9),e(qa,P9),e(qa,ro),e(ro,G9),e(qa,k9),e(h,B9),e(h,st),e(st,D3),e(D3,Ld),e(Ld,C9),e(st,N9),e(st,ao),e(ao,I9),e(st,x9),e(st,y3),e(y3,O9),e(st,F9),e(st,A3),e(A3,W9),e(st,Y9),e(h,J9),e(h,Qa),e(Qa,R3),e(R3,bd),e(bd,Z9),e(Qa,V9),e(Qa,lo),e(lo,K9),e(Qa,X9),e(h,z9),e(h,ja),e(ja,L3),e(L3,Sd),e(Sd,H9),e(ja,U9),e(ja,no),e(no,q9),e(ja,Q9),e(h,j9),e(h,$a),e($a,b3),e(b3,wd),e(wd,$9),e($a,e8),e($a,oo),e(oo,t8),e($a,r8),e(h,a8),e(h,el),e(el,S3),e(S3,Md),e(Md,l8),e(el,n8),e(el,io),e(io,o8),e(el,i8),e(h,d8),e(h,tl),e(tl,w3),e(w3,Pd),e(Pd,s8),e(tl,h8),e(tl,so),e(so,c8),e(tl,g8),e(h,f8),e(h,rl),e(rl,M3),e(M3,Gd),e(Gd,u8),e(rl,v8),e(rl,ho),e(ho,E8),e(rl,T8),e(h,_8),e(h,al),e(al,P3),e(P3,kd),e(kd,m8),e(al,p8),e(al,co),e(co,D8),e(al,y8),e(h,A8),e(h,ll),e(ll,G3),e(G3,Bd),e(Bd,R8),e(ll,L8),e(ll,go),e(go,b8),e(ll,S8),e(h,w8),e(h,nl),e(nl,k3),e(k3,Cd),e(Cd,M8),e(nl,P8),e(nl,fo),e(fo,G8),e(nl,k8),e(h,B8),e(h,ol),e(ol,B3),e(B3,Nd),e(Nd,C8),e(ol,N8),e(ol,uo),e(uo,I8),e(ol,x8),e(h,O8),e(h,il),e(il,C3),e(C3,Id),e(Id,F8),e(il,W8),e(il,vo),e(vo,Y8),e(il,J8),e(h,Z8),e(h,dl),e(dl,N3),e(N3,xd),e(xd,V8),e(dl,K8),e(dl,Eo),e(Eo,X8),e(dl,z8),e(h,H8),e(h,sl),e(sl,I3),e(I3,Od),e(Od,U8),e(sl,q8),e(sl,To),e(To,Q8),e(sl,j8),e(h,$8),e(h,hl),e(hl,x3),e(x3,Fd),e(Fd,eD),e(hl,tD),e(hl,_o),e(_o,rD),e(hl,aD),e(h,lD),e(h,aa),e(aa,O3),e(O3,Wd),e(Wd,nD),e(aa,oD),e(aa,mo),e(mo,iD),e(aa,dD),e(aa,po),e(po,sD),e(aa,hD),e(h,cD),e(h,cl),e(cl,F3),e(F3,Yd),e(Yd,gD),e(cl,fD),e(cl,Do),e(Do,uD),e(cl,vD),e(h,ED),e(h,gl),e(gl,W3),e(W3,Jd),e(Jd,TD),e(gl,_D),e(gl,yo),e(yo,mD),e(gl,pD),e(h,DD),e(h,fl),e(fl,Y3),e(Y3,Zd),e(Zd,yD),e(fl,AD),e(fl,Ao),e(Ao,RD),e(fl,LD),e(h,bD),e(h,ul),e(ul,J3),e(J3,Vd),e(Vd,SD),e(ul,wD),e(ul,Ro),e(Ro,MD),e(ul,PD),e(h,GD),e(h,vl),e(vl,Z3),e(Z3,Kd),e(Kd,kD),e(vl,BD),e(vl,Lo),e(Lo,CD),e(vl,ND),e(h,ID),e(h,El),e(El,V3),e(V3,Xd),e(Xd,xD),e(El,OD),e(El,bo),e(bo,FD),e(El,WD),e(h,YD),e(h,Tl),e(Tl,K3),e(K3,zd),e(zd,JD),e(Tl,ZD),e(Tl,So),e(So,VD),e(Tl,KD),e(h,XD),e(h,_l),e(_l,X3),e(X3,Hd),e(Hd,zD),e(_l,HD),e(_l,wo),e(wo,UD),e(_l,qD),e(h,QD),e(h,ml),e(ml,z3),e(z3,Ud),e(Ud,jD),e(ml,$D),e(ml,Mo),e(Mo,ey),e(ml,ty),e(h,ry),e(h,pl),e(pl,H3),e(H3,qd),e(qd,ay),e(pl,ly),e(pl,Po),e(Po,ny),e(pl,oy),e(h,iy),e(h,Dl),e(Dl,U3),e(U3,Qd),e(Qd,dy),e(Dl,sy),e(Dl,Go),e(Go,hy),e(Dl,cy),e(h,gy),e(h,yl),e(yl,q3),e(q3,jd),e(jd,fy),e(yl,uy),e(yl,ko),e(ko,vy),e(yl,Ey),e(h,Ty),e(h,Al),e(Al,Q3),e(Q3,$d),e($d,_y),e(Al,my),e(Al,Bo),e(Bo,py),e(Al,Dy),e(h,yy),e(h,Rl),e(Rl,j3),e(j3,es),e(es,Ay),e(Rl,Ry),e(Rl,Co),e(Co,Ly),e(Rl,by),e(h,Sy),e(h,Ll),e(Ll,$3),e($3,ts),e(ts,wy),e(Ll,My),e(Ll,No),e(No,Py),e(Ll,Gy),e(h,ky),e(h,bl),e(bl,e4),e(e4,Io),e(Io,By),e(bl,Cy),e(bl,xo),e(xo,Ny),e(bl,Iy),e(h,xy),e(h,Sl),e(Sl,t4),e(t4,rs),e(rs,Oy),e(Sl,Fy),e(Sl,Oo),e(Oo,Wy),e(Sl,Yy),e(h,Jy),e(h,wl),e(wl,r4),e(r4,as),e(as,Zy),e(wl,Vy),e(wl,Fo),e(Fo,Ky),e(wl,Xy),e(h,zy),e(h,Ml),e(Ml,a4),e(a4,ls),e(ls,Hy),e(Ml,Uy),e(Ml,Wo),e(Wo,qy),e(Ml,Qy),e(h,jy),e(h,Pl),e(Pl,l4),e(l4,ns),e(ns,$y),e(Pl,eA),e(Pl,Yo),e(Yo,tA),e(Pl,rA),e(h,aA),e(h,Gl),e(Gl,n4),e(n4,os),e(os,lA),e(Gl,nA),e(Gl,Jo),e(Jo,oA),e(Gl,iA),e(h,dA),e(h,kl),e(kl,o4),e(o4,is),e(is,sA),e(kl,hA),e(kl,Zo),e(Zo,cA),e(kl,gA),e(h,fA),e(h,Bl),e(Bl,i4),e(i4,ds),e(ds,uA),e(Bl,vA),e(Bl,Vo),e(Vo,EA),e(Bl,TA),e(h,_A),e(h,Cl),e(Cl,d4),e(d4,ss),e(ss,mA),e(Cl,pA),e(Cl,Ko),e(Ko,DA),e(Cl,yA),e(h,AA),e(h,Nl),e(Nl,s4),e(s4,hs),e(hs,RA),e(Nl,LA),e(Nl,Xo),e(Xo,bA),e(Nl,SA),e(h,wA),e(h,Il),e(Il,h4),e(h4,cs),e(cs,MA),e(Il,PA),e(Il,zo),e(zo,GA),e(Il,kA),e(h,BA),e(h,xl),e(xl,c4),e(c4,gs),e(gs,CA),e(xl,NA),e(xl,Ho),e(Ho,IA),e(xl,xA),e(h,OA),e(h,Ol),e(Ol,g4),e(g4,fs),e(fs,FA),e(Ol,WA),e(Ol,Uo),e(Uo,YA),e(Ol,JA),e(h,ZA),e(h,Fl),e(Fl,f4),e(f4,us),e(us,VA),e(Fl,KA),e(Fl,qo),e(qo,XA),e(Fl,zA),e(h,HA),e(h,Wl),e(Wl,u4),e(u4,vs),e(vs,UA),e(Wl,qA),e(Wl,Qo),e(Qo,QA),e(Wl,jA),e(h,$A),e(h,Yl),e(Yl,v4),e(v4,Es),e(Es,eR),e(Yl,tR),e(Yl,jo),e(jo,rR),e(Yl,aR),e(h,lR),e(h,la),e(la,E4),e(E4,Ts),e(Ts,nR),e(la,oR),e(la,$o),e($o,iR),e(la,dR),e(la,T4),e(T4,sR),e(la,hR),e(h,cR),e(h,Jl),e(Jl,_4),e(_4,_s),e(_s,gR),e(Jl,fR),e(Jl,ei),e(ei,uR),e(Jl,vR),e(h,ER),e(h,Zl),e(Zl,m4),e(m4,ms),e(ms,TR),e(Zl,_R),e(Zl,ti),e(ti,mR),e(Zl,pR),e(h,DR),e(h,Vl),e(Vl,p4),e(p4,ps),e(ps,yR),e(Vl,AR),e(Vl,ri),e(ri,RR),e(Vl,LR),e(h,bR),e(h,Kl),e(Kl,D4),e(D4,Ds),e(Ds,SR),e(Kl,wR),e(Kl,ai),e(ai,MR),e(Kl,PR),e(h,GR),e(h,Xl),e(Xl,y4),e(y4,ys),e(ys,kR),e(Xl,BR),e(Xl,li),e(li,CR),e(Xl,NR),e(h,IR),e(h,zl),e(zl,A4),e(A4,As),e(As,xR),e(zl,OR),e(zl,ni),e(ni,FR),e(zl,WR),e(h,YR),e(h,Hl),e(Hl,R4),e(R4,Rs),e(Rs,JR),e(Hl,ZR),e(Hl,oi),e(oi,VR),e(Hl,KR),e(h,XR),e(h,Ul),e(Ul,L4),e(L4,Ls),e(Ls,zR),e(Ul,HR),e(Ul,ii),e(ii,UR),e(Ul,qR),e(h,QR),e(h,ql),e(ql,b4),e(b4,bs),e(bs,jR),e(ql,$R),e(ql,di),e(di,eL),e(ql,tL),e(h,rL),e(h,Ql),e(Ql,S4),e(S4,si),e(si,aL),e(Ql,lL),e(Ql,hi),e(hi,nL),e(Ql,oL),e(h,iL),e(h,jl),e(jl,w4),e(w4,ci),e(ci,dL),e(jl,sL),e(jl,gi),e(gi,hL),e(jl,cL),e(h,gL),e(h,$l),e($l,M4),e(M4,Ss),e(Ss,fL),e($l,uL),e($l,fi),e(fi,vL),e($l,EL),e(h,TL),e(h,en),e(en,P4),e(P4,ws),e(ws,_L),e(en,mL),e(en,ui),e(ui,pL),e(en,DL),e(h,yL),e(h,na),e(na,G4),e(G4,Ms),e(Ms,AL),e(na,RL),e(na,vi),e(vi,LL),e(na,bL),e(na,k4),e(k4,SL),e(na,wL),e(h,ML),e(h,tn),e(tn,B4),e(B4,Ps),e(Ps,PL),e(tn,GL),e(tn,Ei),e(Ei,kL),e(tn,BL),e(h,CL),e(h,oa),e(oa,C4),e(C4,Gs),e(Gs,NL),e(oa,IL),e(oa,Ti),e(Ti,xL),e(oa,OL),e(oa,N4),e(N4,FL),e(oa,WL),e(h,YL),e(h,rn),e(rn,I4),e(I4,ks),e(ks,JL),e(rn,ZL),e(rn,_i),e(_i,VL),e(rn,KL),e(h,XL),e(h,an),e(an,x4),e(x4,mi),e(mi,zL),e(an,HL),e(an,pi),e(pi,UL),e(an,qL),e(h,QL),e(h,ln),e(ln,O4),e(O4,Bs),e(Bs,jL),e(ln,$L),e(ln,Di),e(Di,eb),e(ln,tb),E(u,qm,v),E(u,fa,v),e(fa,nn),e(nn,F4),Gm(yi,F4,null),e(fa,rb),e(fa,W4),e(W4,ab),E(u,Qm,v),E(u,Cs,v),e(Cs,lb),E(u,jm,v),E(u,on,v),e(on,Y4),e(Y4,T),e(T,Ns),e(Ns,nb),e(T,ob),e(T,Is),e(Is,ib),e(T,db),e(T,xs),e(xs,sb),e(T,hb),e(T,Os),e(Os,cb),e(T,gb),e(T,Fs),e(Fs,fb),e(T,ub),e(T,Ws),e(Ws,vb),e(on,Eb),e(on,g),e(g,_),e(_,Ys),e(Ys,Tb),e(_,_b),e(_,Js),e(Js,mb),e(_,pb),e(_,Zs),e(Zs,Db),e(_,yb),e(_,Vs),e(Vs,Ab),e(_,Rb),e(_,Ks),e(Ks,Lb),e(_,bb),e(_,Xs),e(Xs,Sb),e(g,wb),e(g,m),e(m,zs),e(zs,Mb),e(m,Pb),e(m,Hs),e(Hs,Gb),e(m,kb),e(m,Us),e(Us,Bb),e(m,Cb),e(m,qs),e(qs,Nb),e(m,Ib),e(m,Qs),e(Qs,xb),e(m,Ob),e(m,js),e(js,Fb),e(g,Wb),e(g,p),e(p,$s),e($s,Yb),e(p,Jb),e(p,eh),e(eh,Zb),e(p,Vb),e(p,th),e(th,Kb),e(p,Xb),e(p,rh),e(rh,zb),e(p,Hb),e(p,ah),e(ah,Ub),e(p,qb),e(p,lh),e(lh,Qb),e(g,jb),e(g,D),e(D,nh),e(nh,$b),e(D,eS),e(D,oh),e(oh,tS),e(D,rS),e(D,ih),e(ih,aS),e(D,lS),e(D,dh),e(dh,nS),e(D,oS),e(D,sh),e(sh,iS),e(D,dS),e(D,hh),e(hh,sS),e(g,hS),e(g,y),e(y,ch),e(ch,cS),e(y,gS),e(y,gh),e(gh,fS),e(y,uS),e(y,fh),e(fh,vS),e(y,ES),e(y,uh),e(uh,TS),e(y,_S),e(y,vh),e(vh,mS),e(y,pS),e(y,Eh),e(Eh,DS),e(g,yS),e(g,A),e(A,Th),e(Th,AS),e(A,RS),e(A,_h),e(_h,LS),e(A,bS),e(A,mh),e(mh,SS),e(A,wS),e(A,ph),e(ph,MS),e(A,PS),e(A,Dh),e(Dh,GS),e(A,kS),e(A,yh),e(yh,BS),e(g,CS),e(g,R),e(R,Ah),e(Ah,NS),e(R,IS),e(R,Rh),e(Rh,xS),e(R,OS),e(R,Lh),e(Lh,FS),e(R,WS),e(R,bh),e(bh,YS),e(R,JS),e(R,Sh),e(Sh,ZS),e(R,VS),e(R,wh),e(wh,KS),e(g,XS),e(g,L),e(L,Mh),e(Mh,zS),e(L,HS),e(L,Ph),e(Ph,US),e(L,qS),e(L,Gh),e(Gh,QS),e(L,jS),e(L,kh),e(kh,$S),e(L,ew),e(L,Bh),e(Bh,tw),e(L,rw),e(L,Ch),e(Ch,aw),e(g,lw),e(g,b),e(b,Nh),e(Nh,nw),e(b,ow),e(b,Ih),e(Ih,iw),e(b,dw),e(b,xh),e(xh,sw),e(b,hw),e(b,Oh),e(Oh,cw),e(b,gw),e(b,Fh),e(Fh,fw),e(b,uw),e(b,Wh),e(Wh,vw),e(g,Ew),e(g,S),e(S,Yh),e(Yh,Tw),e(S,_w),e(S,Jh),e(Jh,mw),e(S,pw),e(S,Zh),e(Zh,Dw),e(S,yw),e(S,Vh),e(Vh,Aw),e(S,Rw),e(S,Kh),e(Kh,Lw),e(S,bw),e(S,Xh),e(Xh,Sw),e(g,ww),e(g,w),e(w,zh),e(zh,Mw),e(w,Pw),e(w,Hh),e(Hh,Gw),e(w,kw),e(w,Uh),e(Uh,Bw),e(w,Cw),e(w,qh),e(qh,Nw),e(w,Iw),e(w,Qh),e(Qh,xw),e(w,Ow),e(w,jh),e(jh,Fw),e(g,Ww),e(g,M),e(M,$h),e($h,Yw),e(M,Jw),e(M,ec),e(ec,Zw),e(M,Vw),e(M,tc),e(tc,Kw),e(M,Xw),e(M,rc),e(rc,zw),e(M,Hw),e(M,ac),e(ac,Uw),e(M,qw),e(M,lc),e(lc,Qw),e(g,jw),e(g,P),e(P,nc),e(nc,$w),e(P,eM),e(P,oc),e(oc,tM),e(P,rM),e(P,ic),e(ic,aM),e(P,lM),e(P,dc),e(dc,nM),e(P,oM),e(P,sc),e(sc,iM),e(P,dM),e(P,hc),e(hc,sM),e(g,hM),e(g,G),e(G,cc),e(cc,cM),e(G,gM),e(G,gc),e(gc,fM),e(G,uM),e(G,fc),e(fc,vM),e(G,EM),e(G,uc),e(uc,TM),e(G,_M),e(G,vc),e(vc,mM),e(G,pM),e(G,Ec),e(Ec,DM),e(g,yM),e(g,k),e(k,Tc),e(Tc,AM),e(k,RM),e(k,_c),e(_c,LM),e(k,bM),e(k,mc),e(mc,SM),e(k,wM),e(k,pc),e(pc,MM),e(k,PM),e(k,Dc),e(Dc,GM),e(k,kM),e(k,yc),e(yc,BM),e(g,CM),e(g,B),e(B,Ac),e(Ac,NM),e(B,IM),e(B,Rc),e(Rc,xM),e(B,OM),e(B,Lc),e(Lc,FM),e(B,WM),e(B,bc),e(bc,YM),e(B,JM),e(B,Sc),e(Sc,ZM),e(B,VM),e(B,wc),e(wc,KM),e(g,XM),e(g,C),e(C,Mc),e(Mc,zM),e(C,HM),e(C,Pc),e(Pc,UM),e(C,qM),e(C,Gc),e(Gc,QM),e(C,jM),e(C,kc),e(kc,$M),e(C,eP),e(C,Bc),e(Bc,tP),e(C,rP),e(C,Cc),e(Cc,aP),e(g,lP),e(g,N),e(N,Nc),e(Nc,nP),e(N,oP),e(N,Ic),e(Ic,iP),e(N,dP),e(N,xc),e(xc,sP),e(N,hP),e(N,Oc),e(Oc,cP),e(N,gP),e(N,Fc),e(Fc,fP),e(N,uP),e(N,Wc),e(Wc,vP),e(g,EP),e(g,I),e(I,Yc),e(Yc,TP),e(I,_P),e(I,Jc),e(Jc,mP),e(I,pP),e(I,Zc),e(Zc,DP),e(I,yP),e(I,Vc),e(Vc,AP),e(I,RP),e(I,Kc),e(Kc,LP),e(I,bP),e(I,Xc),e(Xc,SP),e(g,wP),e(g,x),e(x,zc),e(zc,MP),e(x,PP),e(x,Hc),e(Hc,GP),e(x,kP),e(x,Uc),e(Uc,BP),e(x,CP),e(x,qc),e(qc,NP),e(x,IP),e(x,Qc),e(Qc,xP),e(x,OP),e(x,jc),e(jc,FP),e(g,WP),e(g,O),e(O,$c),e($c,YP),e(O,JP),e(O,eg),e(eg,ZP),e(O,VP),e(O,tg),e(tg,KP),e(O,XP),e(O,rg),e(rg,zP),e(O,HP),e(O,ag),e(ag,UP),e(O,qP),e(O,lg),e(lg,QP),e(g,jP),e(g,F),e(F,ng),e(ng,$P),e(F,eG),e(F,og),e(og,tG),e(F,rG),e(F,ig),e(ig,aG),e(F,lG),e(F,dg),e(dg,nG),e(F,oG),e(F,sg),e(sg,iG),e(F,dG),e(F,hg),e(hg,sG),e(g,hG),e(g,W),e(W,cg),e(cg,cG),e(W,gG),e(W,gg),e(gg,fG),e(W,uG),e(W,fg),e(fg,vG),e(W,EG),e(W,ug),e(ug,TG),e(W,_G),e(W,vg),e(vg,mG),e(W,pG),e(W,Eg),e(Eg,DG),e(g,yG),e(g,Y),e(Y,Tg),e(Tg,AG),e(Y,RG),e(Y,_g),e(_g,LG),e(Y,bG),e(Y,mg),e(mg,SG),e(Y,wG),e(Y,pg),e(pg,MG),e(Y,PG),e(Y,Dg),e(Dg,GG),e(Y,kG),e(Y,yg),e(yg,BG),e(g,CG),e(g,J),e(J,Ag),e(Ag,NG),e(J,IG),e(J,Rg),e(Rg,xG),e(J,OG),e(J,Lg),e(Lg,FG),e(J,WG),e(J,bg),e(bg,YG),e(J,JG),e(J,Sg),e(Sg,ZG),e(J,VG),e(J,wg),e(wg,KG),e(g,XG),e(g,Z),e(Z,Mg),e(Mg,zG),e(Z,HG),e(Z,Pg),e(Pg,UG),e(Z,qG),e(Z,Gg),e(Gg,QG),e(Z,jG),e(Z,kg),e(kg,$G),e(Z,ek),e(Z,Bg),e(Bg,tk),e(Z,rk),e(Z,Cg),e(Cg,ak),e(g,lk),e(g,V),e(V,Ng),e(Ng,nk),e(V,ok),e(V,Ig),e(Ig,ik),e(V,dk),e(V,xg),e(xg,sk),e(V,hk),e(V,Og),e(Og,ck),e(V,gk),e(V,Fg),e(Fg,fk),e(V,uk),e(V,Wg),e(Wg,vk),e(g,Ek),e(g,K),e(K,Yg),e(Yg,Tk),e(K,_k),e(K,Jg),e(Jg,mk),e(K,pk),e(K,Zg),e(Zg,Dk),e(K,yk),e(K,Vg),e(Vg,Ak),e(K,Rk),e(K,Kg),e(Kg,Lk),e(K,bk),e(K,Xg),e(Xg,Sk),e(g,wk),e(g,X),e(X,zg),e(zg,Mk),e(X,Pk),e(X,Hg),e(Hg,Gk),e(X,kk),e(X,Ug),e(Ug,Bk),e(X,Ck),e(X,qg),e(qg,Nk),e(X,Ik),e(X,Qg),e(Qg,xk),e(X,Ok),e(X,jg),e(jg,Fk),e(g,Wk),e(g,z),e(z,$g),e($g,Yk),e(z,Jk),e(z,ef),e(ef,Zk),e(z,Vk),e(z,tf),e(tf,Kk),e(z,Xk),e(z,rf),e(rf,zk),e(z,Hk),e(z,af),e(af,Uk),e(z,qk),e(z,lf),e(lf,Qk),e(g,jk),e(g,H),e(H,nf),e(nf,$k),e(H,eB),e(H,of),e(of,tB),e(H,rB),e(H,df),e(df,aB),e(H,lB),e(H,sf),e(sf,nB),e(H,oB),e(H,hf),e(hf,iB),e(H,dB),e(H,cf),e(cf,sB),e(g,hB),e(g,U),e(U,gf),e(gf,cB),e(U,gB),e(U,ff),e(ff,fB),e(U,uB),e(U,uf),e(uf,vB),e(U,EB),e(U,vf),e(vf,TB),e(U,_B),e(U,Ef),e(Ef,mB),e(U,pB),e(U,Tf),e(Tf,DB),e(g,yB),e(g,q),e(q,_f),e(_f,AB),e(q,RB),e(q,mf),e(mf,LB),e(q,bB),e(q,pf),e(pf,SB),e(q,wB),e(q,Df),e(Df,MB),e(q,PB),e(q,yf),e(yf,GB),e(q,kB),e(q,Af),e(Af,BB),e(g,CB),e(g,Q),e(Q,Rf),e(Rf,NB),e(Q,IB),e(Q,Lf),e(Lf,xB),e(Q,OB),e(Q,bf),e(bf,FB),e(Q,WB),e(Q,Sf),e(Sf,YB),e(Q,JB),e(Q,wf),e(wf,ZB),e(Q,VB),e(Q,Mf),e(Mf,KB),e(g,XB),e(g,j),e(j,Pf),e(Pf,zB),e(j,HB),e(j,Gf),e(Gf,UB),e(j,qB),e(j,kf),e(kf,QB),e(j,jB),e(j,Bf),e(Bf,$B),e(j,eC),e(j,Cf),e(Cf,tC),e(j,rC),e(j,Nf),e(Nf,aC),e(g,lC),e(g,$),e($,If),e(If,nC),e($,oC),e($,xf),e(xf,iC),e($,dC),e($,Of),e(Of,sC),e($,hC),e($,Ff),e(Ff,cC),e($,gC),e($,Wf),e(Wf,fC),e($,uC),e($,Yf),e(Yf,vC),e(g,EC),e(g,ee),e(ee,Jf),e(Jf,TC),e(ee,_C),e(ee,Zf),e(Zf,mC),e(ee,pC),e(ee,Vf),e(Vf,DC),e(ee,yC),e(ee,Kf),e(Kf,AC),e(ee,RC),e(ee,Xf),e(Xf,LC),e(ee,bC),e(ee,zf),e(zf,SC),e(g,wC),e(g,te),e(te,Hf),e(Hf,MC),e(te,PC),e(te,Uf),e(Uf,GC),e(te,kC),e(te,qf),e(qf,BC),e(te,CC),e(te,Qf),e(Qf,NC),e(te,IC),e(te,jf),e(jf,xC),e(te,OC),e(te,$f),e($f,FC),e(g,WC),e(g,re),e(re,e1),e(e1,YC),e(re,JC),e(re,t1),e(t1,ZC),e(re,VC),e(re,r1),e(r1,KC),e(re,XC),e(re,a1),e(a1,zC),e(re,HC),e(re,l1),e(l1,UC),e(re,qC),e(re,n1),e(n1,QC),e(g,jC),e(g,ae),e(ae,o1),e(o1,$C),e(ae,eN),e(ae,i1),e(i1,tN),e(ae,rN),e(ae,d1),e(d1,aN),e(ae,lN),e(ae,s1),e(s1,nN),e(ae,oN),e(ae,h1),e(h1,iN),e(ae,dN),e(ae,c1),e(c1,sN),e(g,hN),e(g,le),e(le,g1),e(g1,cN),e(le,gN),e(le,f1),e(f1,fN),e(le,uN),e(le,u1),e(u1,vN),e(le,EN),e(le,v1),e(v1,TN),e(le,_N),e(le,E1),e(E1,mN),e(le,pN),e(le,T1),e(T1,DN),e(g,yN),e(g,ne),e(ne,_1),e(_1,AN),e(ne,RN),e(ne,m1),e(m1,LN),e(ne,bN),e(ne,p1),e(p1,SN),e(ne,wN),e(ne,D1),e(D1,MN),e(ne,PN),e(ne,y1),e(y1,GN),e(ne,kN),e(ne,A1),e(A1,BN),e(g,CN),e(g,oe),e(oe,R1),e(R1,NN),e(oe,IN),e(oe,L1),e(L1,xN),e(oe,ON),e(oe,b1),e(b1,FN),e(oe,WN),e(oe,S1),e(S1,YN),e(oe,JN),e(oe,w1),e(w1,ZN),e(oe,VN),e(oe,M1),e(M1,KN),e(g,XN),e(g,ie),e(ie,P1),e(P1,zN),e(ie,HN),e(ie,G1),e(G1,UN),e(ie,qN),e(ie,k1),e(k1,QN),e(ie,jN),e(ie,B1),e(B1,$N),e(ie,eI),e(ie,C1),e(C1,tI),e(ie,rI),e(ie,N1),e(N1,aI),e(g,lI),e(g,de),e(de,I1),e(I1,nI),e(de,oI),e(de,x1),e(x1,iI),e(de,dI),e(de,O1),e(O1,sI),e(de,hI),e(de,F1),e(F1,cI),e(de,gI),e(de,W1),e(W1,fI),e(de,uI),e(de,Y1),e(Y1,vI),e(g,EI),e(g,se),e(se,J1),e(J1,TI),e(se,_I),e(se,Z1),e(Z1,mI),e(se,pI),e(se,V1),e(V1,DI),e(se,yI),e(se,K1),e(K1,AI),e(se,RI),e(se,X1),e(X1,LI),e(se,bI),e(se,z1),e(z1,SI),e(g,wI),e(g,he),e(he,H1),e(H1,MI),e(he,PI),e(he,U1),e(U1,GI),e(he,kI),e(he,q1),e(q1,BI),e(he,CI),e(he,Q1),e(Q1,NI),e(he,II),e(he,j1),e(j1,xI),e(he,OI),e(he,$1),e($1,FI),e(g,WI),e(g,ce),e(ce,eu),e(eu,YI),e(ce,JI),e(ce,tu),e(tu,ZI),e(ce,VI),e(ce,ru),e(ru,KI),e(ce,XI),e(ce,au),e(au,zI),e(ce,HI),e(ce,lu),e(lu,UI),e(ce,qI),e(ce,nu),e(nu,QI),e(g,jI),e(g,ge),e(ge,ou),e(ou,$I),e(ge,ex),e(ge,iu),e(iu,tx),e(ge,rx),e(ge,du),e(du,ax),e(ge,lx),e(ge,su),e(su,nx),e(ge,ox),e(ge,hu),e(hu,ix),e(ge,dx),e(ge,cu),e(cu,sx),e(g,hx),e(g,fe),e(fe,gu),e(gu,cx),e(fe,gx),e(fe,fu),e(fu,fx),e(fe,ux),e(fe,uu),e(uu,vx),e(fe,Ex),e(fe,vu),e(vu,Tx),e(fe,_x),e(fe,Eu),e(Eu,mx),e(fe,px),e(fe,Tu),e(Tu,Dx),e(g,yx),e(g,ue),e(ue,_u),e(_u,Ax),e(ue,Rx),e(ue,mu),e(mu,Lx),e(ue,bx),e(ue,pu),e(pu,Sx),e(ue,wx),e(ue,Du),e(Du,Mx),e(ue,Px),e(ue,yu),e(yu,Gx),e(ue,kx),e(ue,Au),e(Au,Bx),e(g,Cx),e(g,ve),e(ve,Ru),e(Ru,Nx),e(ve,Ix),e(ve,Lu),e(Lu,xx),e(ve,Ox),e(ve,bu),e(bu,Fx),e(ve,Wx),e(ve,Su),e(Su,Yx),e(ve,Jx),e(ve,wu),e(wu,Zx),e(ve,Vx),e(ve,Mu),e(Mu,Kx),e(g,Xx),e(g,Ee),e(Ee,Pu),e(Pu,zx),e(Ee,Hx),e(Ee,Gu),e(Gu,Ux),e(Ee,qx),e(Ee,ku),e(ku,Qx),e(Ee,jx),e(Ee,Bu),e(Bu,$x),e(Ee,eO),e(Ee,Cu),e(Cu,tO),e(Ee,rO),e(Ee,Nu),e(Nu,aO),e(g,lO),e(g,Te),e(Te,Iu),e(Iu,nO),e(Te,oO),e(Te,xu),e(xu,iO),e(Te,dO),e(Te,Ou),e(Ou,sO),e(Te,hO),e(Te,Fu),e(Fu,cO),e(Te,gO),e(Te,Wu),e(Wu,fO),e(Te,uO),e(Te,Yu),e(Yu,vO),e(g,EO),e(g,_e),e(_e,Ju),e(Ju,TO),e(_e,_O),e(_e,Zu),e(Zu,mO),e(_e,pO),e(_e,Vu),e(Vu,DO),e(_e,yO),e(_e,Ku),e(Ku,AO),e(_e,RO),e(_e,Xu),e(Xu,LO),e(_e,bO),e(_e,zu),e(zu,SO),e(g,wO),e(g,me),e(me,Hu),e(Hu,MO),e(me,PO),e(me,Uu),e(Uu,GO),e(me,kO),e(me,qu),e(qu,BO),e(me,CO),e(me,Qu),e(Qu,NO),e(me,IO),e(me,ju),e(ju,xO),e(me,OO),e(me,$u),e($u,FO),e(g,WO),e(g,pe),e(pe,ev),e(ev,YO),e(pe,JO),e(pe,tv),e(tv,ZO),e(pe,VO),e(pe,rv),e(rv,KO),e(pe,XO),e(pe,av),e(av,zO),e(pe,HO),e(pe,lv),e(lv,UO),e(pe,qO),e(pe,nv),e(nv,QO),e(g,jO),e(g,De),e(De,ov),e(ov,$O),e(De,eF),e(De,iv),e(iv,tF),e(De,rF),e(De,dv),e(dv,aF),e(De,lF),e(De,sv),e(sv,nF),e(De,oF),e(De,hv),e(hv,iF),e(De,dF),e(De,cv),e(cv,sF),e(g,hF),e(g,ye),e(ye,gv),e(gv,cF),e(ye,gF),e(ye,fv),e(fv,fF),e(ye,uF),e(ye,uv),e(uv,vF),e(ye,EF),e(ye,vv),e(vv,TF),e(ye,_F),e(ye,Ev),e(Ev,mF),e(ye,pF),e(ye,Tv),e(Tv,DF),e(g,yF),e(g,Ae),e(Ae,_v),e(_v,AF),e(Ae,RF),e(Ae,mv),e(mv,LF),e(Ae,bF),e(Ae,pv),e(pv,SF),e(Ae,wF),e(Ae,Dv),e(Dv,MF),e(Ae,PF),e(Ae,yv),e(yv,GF),e(Ae,kF),e(Ae,Av),e(Av,BF),e(g,CF),e(g,Re),e(Re,Rv),e(Rv,NF),e(Re,IF),e(Re,Lv),e(Lv,xF),e(Re,OF),e(Re,bv),e(bv,FF),e(Re,WF),e(Re,Sv),e(Sv,YF),e(Re,JF),e(Re,wv),e(wv,ZF),e(Re,VF),e(Re,Mv),e(Mv,KF),e(g,XF),e(g,Le),e(Le,Pv),e(Pv,zF),e(Le,HF),e(Le,Gv),e(Gv,UF),e(Le,qF),e(Le,kv),e(kv,QF),e(Le,jF),e(Le,Bv),e(Bv,$F),e(Le,eW),e(Le,Cv),e(Cv,tW),e(Le,rW),e(Le,Nv),e(Nv,aW),e(g,lW),e(g,be),e(be,Iv),e(Iv,nW),e(be,oW),e(be,xv),e(xv,iW),e(be,dW),e(be,Ov),e(Ov,sW),e(be,hW),e(be,Fv),e(Fv,cW),e(be,gW),e(be,Wv),e(Wv,fW),e(be,uW),e(be,Yv),e(Yv,vW),e(g,EW),e(g,Se),e(Se,Jv),e(Jv,TW),e(Se,_W),e(Se,Zv),e(Zv,mW),e(Se,pW),e(Se,Vv),e(Vv,DW),e(Se,yW),e(Se,Kv),e(Kv,AW),e(Se,RW),e(Se,Xv),e(Xv,LW),e(Se,bW),e(Se,zv),e(zv,SW),e(g,wW),e(g,we),e(we,Hv),e(Hv,MW),e(we,PW),e(we,Uv),e(Uv,GW),e(we,kW),e(we,qv),e(qv,BW),e(we,CW),e(we,Qv),e(Qv,NW),e(we,IW),e(we,jv),e(jv,xW),e(we,OW),e(we,$v),e($v,FW),e(g,WW),e(g,Me),e(Me,eE),e(eE,YW),e(Me,JW),e(Me,tE),e(tE,ZW),e(Me,VW),e(Me,rE),e(rE,KW),e(Me,XW),e(Me,aE),e(aE,zW),e(Me,HW),e(Me,lE),e(lE,UW),e(Me,qW),e(Me,nE),e(nE,QW),e(g,jW),e(g,Pe),e(Pe,oE),e(oE,$W),e(Pe,eY),e(Pe,iE),e(iE,tY),e(Pe,rY),e(Pe,dE),e(dE,aY),e(Pe,lY),e(Pe,sE),e(sE,nY),e(Pe,oY),e(Pe,hE),e(hE,iY),e(Pe,dY),e(Pe,cE),e(cE,sY),e(g,hY),e(g,Ge),e(Ge,gE),e(gE,cY),e(Ge,gY),e(Ge,fE),e(fE,fY),e(Ge,uY),e(Ge,uE),e(uE,vY),e(Ge,EY),e(Ge,vE),e(vE,TY),e(Ge,_Y),e(Ge,EE),e(EE,mY),e(Ge,pY),e(Ge,TE),e(TE,DY),e(g,yY),e(g,ke),e(ke,_E),e(_E,AY),e(ke,RY),e(ke,mE),e(mE,LY),e(ke,bY),e(ke,pE),e(pE,SY),e(ke,wY),e(ke,DE),e(DE,MY),e(ke,PY),e(ke,yE),e(yE,GY),e(ke,kY),e(ke,AE),e(AE,BY),e(g,CY),e(g,Be),e(Be,RE),e(RE,NY),e(Be,IY),e(Be,LE),e(LE,xY),e(Be,OY),e(Be,bE),e(bE,FY),e(Be,WY),e(Be,SE),e(SE,YY),e(Be,JY),e(Be,wE),e(wE,ZY),e(Be,VY),e(Be,ME),e(ME,KY),e(g,XY),e(g,Ce),e(Ce,PE),e(PE,zY),e(Ce,HY),e(Ce,GE),e(GE,UY),e(Ce,qY),e(Ce,kE),e(kE,QY),e(Ce,jY),e(Ce,BE),e(BE,$Y),e(Ce,eJ),e(Ce,CE),e(CE,tJ),e(Ce,rJ),e(Ce,NE),e(NE,aJ),e(g,lJ),e(g,Ne),e(Ne,IE),e(IE,nJ),e(Ne,oJ),e(Ne,xE),e(xE,iJ),e(Ne,dJ),e(Ne,OE),e(OE,sJ),e(Ne,hJ),e(Ne,FE),e(FE,cJ),e(Ne,gJ),e(Ne,WE),e(WE,fJ),e(Ne,uJ),e(Ne,YE),e(YE,vJ),e(g,EJ),e(g,Ie),e(Ie,JE),e(JE,TJ),e(Ie,_J),e(Ie,ZE),e(ZE,mJ),e(Ie,pJ),e(Ie,VE),e(VE,DJ),e(Ie,yJ),e(Ie,KE),e(KE,AJ),e(Ie,RJ),e(Ie,XE),e(XE,LJ),e(Ie,bJ),e(Ie,zE),e(zE,SJ),e(g,wJ),e(g,xe),e(xe,HE),e(HE,MJ),e(xe,PJ),e(xe,UE),e(UE,GJ),e(xe,kJ),e(xe,qE),e(qE,BJ),e(xe,CJ),e(xe,QE),e(QE,NJ),e(xe,IJ),e(xe,jE),e(jE,xJ),e(xe,OJ),e(xe,$E),e($E,FJ),e(g,WJ),e(g,Oe),e(Oe,e2),e(e2,YJ),e(Oe,JJ),e(Oe,t2),e(t2,ZJ),e(Oe,VJ),e(Oe,r2),e(r2,KJ),e(Oe,XJ),e(Oe,a2),e(a2,zJ),e(Oe,HJ),e(Oe,l2),e(l2,UJ),e(Oe,qJ),e(Oe,n2),e(n2,QJ),e(g,jJ),e(g,Fe),e(Fe,o2),e(o2,$J),e(Fe,eZ),e(Fe,i2),e(i2,tZ),e(Fe,rZ),e(Fe,d2),e(d2,aZ),e(Fe,lZ),e(Fe,s2),e(s2,nZ),e(Fe,oZ),e(Fe,h2),e(h2,iZ),e(Fe,dZ),e(Fe,c2),e(c2,sZ),e(g,hZ),e(g,We),e(We,g2),e(g2,cZ),e(We,gZ),e(We,f2),e(f2,fZ),e(We,uZ),e(We,u2),e(u2,vZ),e(We,EZ),e(We,v2),e(v2,TZ),e(We,_Z),e(We,E2),e(E2,mZ),e(We,pZ),e(We,T2),e(T2,DZ),e(g,yZ),e(g,Ye),e(Ye,_2),e(_2,AZ),e(Ye,RZ),e(Ye,m2),e(m2,LZ),e(Ye,bZ),e(Ye,p2),e(p2,SZ),e(Ye,wZ),e(Ye,D2),e(D2,MZ),e(Ye,PZ),e(Ye,y2),e(y2,GZ),e(Ye,kZ),e(Ye,A2),e(A2,BZ),e(g,CZ),e(g,Je),e(Je,R2),e(R2,NZ),e(Je,IZ),e(Je,L2),e(L2,xZ),e(Je,OZ),e(Je,b2),e(b2,FZ),e(Je,WZ),e(Je,S2),e(S2,YZ),e(Je,JZ),e(Je,w2),e(w2,ZZ),e(Je,VZ),e(Je,M2),e(M2,KZ),e(g,XZ),e(g,Ze),e(Ze,P2),e(P2,zZ),e(Ze,HZ),e(Ze,G2),e(G2,UZ),e(Ze,qZ),e(Ze,k2),e(k2,QZ),e(Ze,jZ),e(Ze,B2),e(B2,$Z),e(Ze,eV),e(Ze,C2),e(C2,tV),e(Ze,rV),e(Ze,N2),e(N2,aV),e(g,lV),e(g,Ve),e(Ve,I2),e(I2,nV),e(Ve,oV),e(Ve,x2),e(x2,iV),e(Ve,dV),e(Ve,O2),e(O2,sV),e(Ve,hV),e(Ve,F2),e(F2,cV),e(Ve,gV),e(Ve,W2),e(W2,fV),e(Ve,uV),e(Ve,Y2),e(Y2,vV),e(g,EV),e(g,Ke),e(Ke,J2),e(J2,TV),e(Ke,_V),e(Ke,Z2),e(Z2,mV),e(Ke,pV),e(Ke,V2),e(V2,DV),e(Ke,yV),e(Ke,K2),e(K2,AV),e(Ke,RV),e(Ke,X2),e(X2,LV),e(Ke,bV),e(Ke,z2),e(z2,SV),e(g,wV),e(g,Xe),e(Xe,H2),e(H2,MV),e(Xe,PV),e(Xe,U2),e(U2,GV),e(Xe,kV),e(Xe,q2),e(q2,BV),e(Xe,CV),e(Xe,Q2),e(Q2,NV),e(Xe,IV),e(Xe,j2),e(j2,xV),e(Xe,OV),e(Xe,$2),e($2,FV),e(g,WV),e(g,ze),e(ze,eT),e(eT,YV),e(ze,JV),e(ze,tT),e(tT,ZV),e(ze,VV),e(ze,rT),e(rT,KV),e(ze,XV),e(ze,aT),e(aT,zV),e(ze,HV),e(ze,lT),e(lT,UV),e(ze,qV),e(ze,nT),e(nT,QV),e(g,jV),e(g,He),e(He,oT),e(oT,$V),e(He,eK),e(He,iT),e(iT,tK),e(He,rK),e(He,dT),e(dT,aK),e(He,lK),e(He,sT),e(sT,nK),e(He,oK),e(He,hT),e(hT,iK),e(He,dK),e(He,cT),e(cT,sK),e(g,hK),e(g,Ue),e(Ue,gT),e(gT,cK),e(Ue,gK),e(Ue,fT),e(fT,fK),e(Ue,uK),e(Ue,uT),e(uT,vK),e(Ue,EK),e(Ue,vT),e(vT,TK),e(Ue,_K),e(Ue,ET),e(ET,mK),e(Ue,pK),e(Ue,TT),e(TT,DK),e(g,yK),e(g,qe),e(qe,_T),e(_T,AK),e(qe,RK),e(qe,mT),e(mT,LK),e(qe,bK),e(qe,pT),e(pT,SK),e(qe,wK),e(qe,DT),e(DT,MK),e(qe,PK),e(qe,yT),e(yT,GK),e(qe,kK),e(qe,AT),e(AT,BK),e(g,CK),e(g,Qe),e(Qe,RT),e(RT,NK),e(Qe,IK),e(Qe,LT),e(LT,xK),e(Qe,OK),e(Qe,bT),e(bT,FK),e(Qe,WK),e(Qe,ST),e(ST,YK),e(Qe,JK),e(Qe,wT),e(wT,ZK),e(Qe,VK),e(Qe,MT),e(MT,KK),e(g,XK),e(g,je),e(je,PT),e(PT,zK),e(je,HK),e(je,GT),e(GT,UK),e(je,qK),e(je,kT),e(kT,QK),e(je,jK),e(je,BT),e(BT,$K),e(je,eX),e(je,CT),e(CT,tX),e(je,rX),e(je,NT),e(NT,aX),e(g,lX),e(g,$e),e($e,IT),e(IT,nX),e($e,oX),e($e,xT),e(xT,iX),e($e,dX),e($e,OT),e(OT,sX),e($e,hX),e($e,FT),e(FT,cX),e($e,gX),e($e,WT),e(WT,fX),e($e,uX),e($e,YT),e(YT,vX),e(g,EX),e(g,et),e(et,JT),e(JT,TX),e(et,_X),e(et,ZT),e(ZT,mX),e(et,pX),e(et,VT),e(VT,DX),e(et,yX),e(et,KT),e(KT,AX),e(et,RX),e(et,XT),e(XT,LX),e(et,bX),e(et,zT),e(zT,SX),e(g,wX),e(g,tt),e(tt,HT),e(HT,MX),e(tt,PX),e(tt,UT),e(UT,GX),e(tt,kX),e(tt,qT),e(qT,BX),e(tt,CX),e(tt,QT),e(QT,NX),e(tt,IX),e(tt,jT),e(jT,xX),e(tt,OX),e(tt,$T),e($T,FX),e(g,WX),e(g,rt),e(rt,e_),e(e_,YX),e(rt,JX),e(rt,t_),e(t_,ZX),e(rt,VX),e(rt,r_),e(r_,KX),e(rt,XX),e(rt,a_),e(a_,zX),e(rt,HX),e(rt,l_),e(l_,UX),e(rt,qX),e(rt,n_),e(n_,QX),e(g,jX),e(g,at),e(at,o_),e(o_,$X),e(at,ez),e(at,i_),e(i_,tz),e(at,rz),e(at,d_),e(d_,az),e(at,lz),e(at,s_),e(s_,nz),e(at,oz),e(at,h_),e(h_,iz),e(at,dz),e(at,c_),e(c_,sz),$m=!0},p:Ohe,i(u){$m||(km(hn.$$.fragment,u),km(un.$$.fragment,u),km(vn.$$.fragment,u),km(Tn.$$.fragment,u),km(yi.$$.fragment,u),$m=!0)},o(u){Bm(hn.$$.fragment,u),Bm(un.$$.fragment,u),Bm(vn.$$.fragment,u),Bm(Tn.$$.fragment,u),Bm(yi.$$.fragment,u),$m=!1},d(u){t($r),u&&t(Mi),u&&t(ot),Cm(hn),u&&t(Im),u&&t(Pi),u&&t(xm),u&&t(Gi),u&&t(Om),u&&t(it),u&&t(Fm),u&&t(dt),u&&t(Wm),u&&t(ki),u&&t(Ym),u&&t(da),Cm(un),u&&t(Jm),u&&t(sa),u&&t(Zm),u&&t(Vm),u&&t(ha),Cm(vn),u&&t(Km),u&&t(Bi),u&&t(Xm),u&&t(lt),u&&t(zm),u&&t(Ji),u&&t(Hm),u&&t(ga),Cm(Tn),u&&t(Um),u&&t(h),u&&t(qm),u&&t(fa),Cm(yi),u&&t(Qm),u&&t(Cs),u&&t(jm),u&&t(on)}}}const Whe={local:"transformers",sections:[{local:"if-you-are-looking-for-custom-support-from-the-hugging-face-team",title:"If you are looking for custom support from the Hugging Face team"},{local:"contents",sections:[{local:"supported-models",title:"Supported models"},{local:"supported-frameworks",title:"Supported frameworks"}],title:"Contents"}],title:"\u{1F917} Transformers"};function Yhe(d0,$r,Mi){let{fw:ot}=$r;return d0.$$set=Qr=>{"fw"in Qr&&Mi(0,ot=Qr.fw)},[ot]}class Vhe extends Bhe{constructor($r){super();Che(this,$r,Yhe,Fhe,Nhe,{fw:0})}}export{Vhe as default,Whe as metadata};
270
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/sagemaker.mdx-d221e67e.js
import{S as $e,i as be,s as Me,e as o,k as v,w as _e,t as m,M as ze,c as n,d as t,m as _,a as r,x as ke,h as g,b as l,F as a,g as k,y as we,L as Te,q as Se,o as ye,B as Ee}from"../chunks/vendor-4833417e.js";import{I as Ae}from"../chunks/IconCopyLink-4b81c553.js";function Pe(Q){let c,b,s,i,T,w,B,P,J,L,f,U,S,j,G,x,V,W,I,d,p,D,y,X,F,Y,N,u,q,E,Z,ee,C,A,ae,te,H,$,oe,K;return w=new Ae({}),y=new Ae({}),{c(){c=o("meta"),b=v(),s=o("h1"),i=o("a"),T=o("span"),_e(w.$$.fragment),B=v(),P=o("span"),J=m("Run training on Amazon SageMaker"),L=v(),f=o("p"),U=m("The documentation has been moved to "),S=o("a"),j=m("hf.co/docs/sagemaker"),G=m(". This page will be removed in "),x=o("code"),V=m("transformers"),W=m(" 5.0."),I=v(),d=o("h3"),p=o("a"),D=o("span"),_e(y.$$.fragment),X=v(),F=o("span"),Y=m("Table of Content"),N=v(),u=o("ul"),q=o("li"),E=o("a"),Z=m("Train Hugging Face models on Amazon SageMaker with the SageMaker Python SDK"),ee=v(),C=o("li"),A=o("a"),ae=m("Deploy Hugging Face models to Amazon SageMaker with the SageMaker Python SDK"),te=v(),H=o("li"),$=o("a"),oe=m("Frequently Asked Questions"),this.h()},l(e){const h=ze('[data-svelte="svelte-1phssyn"]',document.head);c=n(h,"META",{name:!0,content:!0}),h.forEach(t),b=_(e),s=n(e,"H1",{class:!0});var R=r(s);i=n(R,"A",{id:!0,class:!0,href:!0});var ne=r(i);T=n(ne,"SPAN",{});var re=r(T);ke(w.$$.fragment,re),re.forEach(t),ne.forEach(t),B=_(R),P=n(R,"SPAN",{});var le=r(P);J=g(le,"Run training on Amazon SageMaker"),le.forEach(t),R.forEach(t),L=_(e),f=n(e,"P",{});var M=r(f);U=g(M,"The documentation has been moved to "),S=n(M,"A",{href:!0,rel:!0});var se=r(S);j=g(se,"hf.co/docs/sagemaker"),se.forEach(t),G=g(M,". This page will be removed in "),x=n(M,"CODE",{});var ie=r(x);V=g(ie,"transformers"),ie.forEach(t),W=g(M," 5.0."),M.forEach(t),I=_(e),d=n(e,"H3",{class:!0});var O=r(d);p=n(O,"A",{id:!0,class:!0,href:!0});var he=r(p);D=n(he,"SPAN",{});var ce=r(D);ke(y.$$.fragment,ce),ce.forEach(t),he.forEach(t),X=_(O),F=n(O,"SPAN",{});var fe=r(F);Y=g(fe,"Table of Content"),fe.forEach(t),O.forEach(t),N=_(e),u=n(e,"UL",{});var z=r(u);q=n(z,"LI",{});var ue=r(q);E=n(ue,"A",{href:!0,rel:!0});var me=r(E);Z=g(me,"Train Hugging Face models on Amazon SageMaker with the SageMaker Python SDK"),me.forEach(t),ue.forEach(t),ee=_(z),C=n(z,"LI",{});var ge=r(C);A=n(ge,"A",{href:!0,rel:!0});var de=r(A);ae=g(de,"Deploy Hugging Face models to Amazon SageMaker with the SageMaker Python SDK"),de.forEach(t),ge.forEach(t),te=_(z),H=n(z,"LI",{});var pe=r(H);$=n(pe,"A",{href:!0,rel:!0});var ve=r($);oe=g(ve,"Frequently Asked Questions"),ve.forEach(t),pe.forEach(t),z.forEach(t),this.h()},h(){l(c,"name","hf:doc:metadata"),l(c,"content",JSON.stringify(xe)),l(i,"id","run-training-on-amazon-sagemaker"),l(i,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(i,"href","#run-training-on-amazon-sagemaker"),l(s,"class","relative group"),l(S,"href","https://huggingface.co/docs/sagemaker"),l(S,"rel","nofollow"),l(p,"id","table-of-content"),l(p,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(p,"href","#table-of-content"),l(d,"class","relative group"),l(E,"href","https://huggingface.co/docs/sagemaker/train"),l(E,"rel","nofollow"),l(A,"href","https://huggingface.co/docs/sagemaker/inference"),l(A,"rel","nofollow"),l($,"href","https://huggingface.co/docs/sagemaker/faq"),l($,"rel","nofollow")},m(e,h){a(document.head,c),k(e,b,h),k(e,s,h),a(s,i),a(i,T),we(w,T,null),a(s,B),a(s,P),a(P,J),k(e,L,h),k(e,f,h),a(f,U),a(f,S),a(S,j),a(f,G),a(f,x),a(x,V),a(f,W),k(e,I,h),k(e,d,h),a(d,p),a(p,D),we(y,D,null),a(d,X),a(d,F),a(F,Y),k(e,N,h),k(e,u,h),a(u,q),a(q,E),a(E,Z),a(u,ee),a(u,C),a(C,A),a(A,ae),a(u,te),a(u,H),a(H,$),a($,oe),K=!0},p:Te,i(e){K||(Se(w.$$.fragment,e),Se(y.$$.fragment,e),K=!0)},o(e){ye(w.$$.fragment,e),ye(y.$$.fragment,e),K=!1},d(e){t(c),e&&t(b),e&&t(s),Ee(w),e&&t(L),e&&t(f),e&&t(I),e&&t(d),Ee(y),e&&t(N),e&&t(u)}}}const xe={local:"run-training-on-amazon-sagemaker",sections:[{local:"table-of-content",title:"Table of Content"}],title:"Run training on Amazon SageMaker"};function De(Q,c,b){let{fw:s}=c;return Q.$$set=i=>{"fw"in i&&b(0,s=i.fw)},[s]}class Ce extends $e{constructor(c){super();be(this,c,De,Pe,Me,{fw:0})}}export{Ce as default,xe as metadata};
271
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/serialization.mdx-809e82fb.js
import{S as rE,i as lE,s as iE,e as s,k as d,w as u,t as r,M as pE,c as n,d as o,m as h,a,x as m,h as l,b as f,F as t,g as p,y as g,q as _,o as v,B as w}from"../chunks/vendor-4833417e.js";import{T as gn}from"../chunks/Tip-fffd6df1.js";import{I as S}from"../chunks/IconCopyLink-4b81c553.js";import{C as D}from"../chunks/CodeBlock-6a3d1b46.js";import{C as aE}from"../chunks/CodeBlockFw-27a176a0.js";import"../chunks/CopyButton-dacfbfaf.js";function dE(L){let c,k,E,x,j,y,O,A;return{c(){c=s("p"),k=r("The features that have a "),E=s("code"),x=r("with-past"),j=r(" suffix (e.g. "),y=s("code"),O=r("causal-lm-with-past"),A=r(`) correspond to model topologies with precomputed hidden states (key and values in the attention blocks) that can be used for fast autoregressive decoding.`)},l(N){c=n(N,"P",{});var T=a(c);k=l(T,"The features that have a "),E=n(T,"CODE",{});var q=a(E);x=l(q,"with-past"),q.forEach(o),j=l(T," suffix (e.g. "),y=n(T,"CODE",{});var P=a(y);O=l(P,"causal-lm-with-past"),P.forEach(o),A=l(T,`) correspond to model topologies with precomputed hidden states (key and values in the attention blocks) that can be used for fast autoregressive decoding.`),T.forEach(o)},m(N,T){p(N,c,T),t(c,k),t(c,E),t(E,x),t(c,j),t(c,y),t(y,O),t(c,A)},d(N){N&&o(c)}}}function hE(L){let c,k,E,x,j;return{c(){c=s("p"),k=r(`A good way to implement a custom ONNX configuration is to look at the existing implementation in the `),E=s("code"),x=r("configuration_<model_name>.py"),j=r(" file of a similar architecture.")},l(y){c=n(y,"P",{});var O=a(c);k=l(O,`A good way to implement a custom ONNX configuration is to look at the existing implementation in the `),E=n(O,"CODE",{});var A=a(E);x=l(A,"configuration_<model_name>.py"),A.forEach(o),j=l(O," file of a similar architecture."),O.forEach(o)},m(y,O){p(y,c,O),t(c,k),t(c,E),t(E,x),t(c,j)},d(y){y&&o(c)}}}function fE(L){let c,k,E,x,j,y,O,A,N,T,q,P,z,X,J,F,zt,we,He,U,R,Ee,se;return{c(){c=s("p"),k=r("Notice that "),E=s("code"),x=r("inputs"),j=r(" property for "),y=s("code"),O=r("DistilBertOnnxConfig"),A=r(` returns an `),N=s("code"),T=r("OrderedDict"),q=r(`. This ensures that the inputs are matched with their relative position within the `),P=s("code"),z=r("PreTrainedModel.forward()"),X=r(` method when tracing the graph. We recommend using an `),J=s("code"),F=r("OrderedDict"),zt=r(" for the "),we=s("code"),He=r("inputs"),U=r(" and "),R=s("code"),Ee=r("outputs"),se=r(` properties when implementing custom ONNX configurations.`)},l($e){c=n($e,"P",{});var C=a(c);k=l(C,"Notice that "),E=n(C,"CODE",{});var hs=a(E);x=l(hs,"inputs"),hs.forEach(o),j=l(C," property for "),y=n(C,"CODE",{});var It=a(y);O=l(It,"DistilBertOnnxConfig"),It.forEach(o),A=l(C,` returns an `),N=n(C,"CODE",{});var V=a(N);T=l(V,"OrderedDict"),V.forEach(o),q=l(C,`. This ensures that the inputs are matched with their relative position within the `),P=n(C,"CODE",{});var fs=a(P);z=l(fs,"PreTrainedModel.forward()"),fs.forEach(o),X=l(C,` method when tracing the graph. We recommend using an `),J=n(C,"CODE",{});var ne=a(J);F=l(ne,"OrderedDict"),ne.forEach(o),zt=l(C," for the "),we=n(C,"CODE",{});var cs=a(we);He=l(cs,"inputs"),cs.forEach(o),U=l(C," and "),R=n(C,"CODE",{});var us=a(R);Ee=l(us,"outputs"),us.forEach(o),se=l(C,` properties when implementing custom ONNX configurations.`),C.forEach(o)},m($e,C){p($e,c,C),t(c,k),t(c,E),t(E,x),t(c,j),t(c,y),t(y,O),t(c,A),t(c,N),t(N,T),t(c,q),t(c,P),t(P,z),t(c,X),t(c,J),t(J,F),t(c,zt),t(c,we),t(we,He),t(c,U),t(c,R),t(R,Ee),t(c,se)},d($e){$e&&o(c)}}}function cE(L){let c,k,E,x,j,y,O,A;return{c(){c=s("p"),k=r("All of the base properties and methods associated with "),E=s("a"),x=r("OnnxConfig"),j=r(` and the other configuration classes can be overriden if needed. Check out `),y=s("code"),O=r("BartOnnxConfig"),A=r(" for an advanced example."),this.h()},l(N){c=n(N,"P",{});var T=a(c);k=l(T,"All of the base properties and methods associated with "),E=n(T,"A",{href:!0});var q=a(E);x=l(q,"OnnxConfig"),q.forEach(o),j=l(T,` and the other configuration classes can be overriden if needed. Check out `),y=n(T,"CODE",{});var P=a(y);O=l(P,"BartOnnxConfig"),P.forEach(o),A=l(T," for an advanced example."),T.forEach(o),this.h()},h(){f(E,"href","/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfig")},m(N,T){p(N,c,T),t(c,k),t(c,E),t(E,x),t(c,j),t(c,y),t(y,O),t(c,A)},d(N){N&&o(c)}}}function uE(L){let c,k,E,x,j,y,O,A,N,T,q;return{c(){c=s("p"),k=r(`If your model is larger than 2GB, you will see that many additional files are created during the export. This is `),E=s("em"),x=r("expected"),j=r(" because ONNX uses "),y=s("a"),O=r(`Protocol Buffers`),A=r(` to store the model and these have a size limit of 2GB. See the `),N=s("a"),T=r(`ONNX documentation`),q=r(` for instructions on how to load models with external data.`),this.h()},l(P){c=n(P,"P",{});var z=a(c);k=l(z,`If your model is larger than 2GB, you will see that many additional files are created during the export. This is `),E=n(z,"EM",{});var X=a(E);x=l(X,"expected"),X.forEach(o),j=l(z," because ONNX uses "),y=n(z,"A",{href:!0,rel:!0});var J=a(y);O=l(J,`Protocol Buffers`),J.forEach(o),A=l(z,` to store the model and these have a size limit of 2GB. See the `),N=n(z,"A",{href:!0,rel:!0});var F=a(N);T=l(F,`ONNX documentation`),F.forEach(o),q=l(z,` for instructions on how to load models with external data.`),z.forEach(o),this.h()},h(){f(y,"href","https://developers.google.com/protocol-buffers/"),f(y,"rel","nofollow"),f(N,"href","https://github.com/onnx/onnx/blob/master/docs/ExternalData.md"),f(N,"rel","nofollow")},m(P,z){p(P,c,z),t(c,k),t(c,E),t(E,x),t(c,j),t(c,y),t(y,O),t(c,A),t(c,N),t(N,T),t(c,q)},d(P){P&&o(c)}}}function mE(L){let c,k;return{c(){c=s("p"),k=r(`This is the very beginning of our experiments with TorchScript and we are still exploring its capabilities with variable-input-size models. It is a focus of interest to us and we will deepen our analysis in upcoming releases, with more code examples, a more flexible implementation, and benchmarks comparing python-based codes with compiled TorchScript.`)},l(E){c=n(E,"P",{});var x=a(c);k=l(x,`This is the very beginning of our experiments with TorchScript and we are still exploring its capabilities with variable-input-size models. It is a focus of interest to us and we will deepen our analysis in upcoming releases, with more code examples, a more flexible implementation, and benchmarks comparing python-based codes with compiled TorchScript.`),x.forEach(o)},m(E,x){p(E,c,x),t(c,k)},d(E){E&&o(c)}}}function gE(L){let c,k,E,x,j,y,O,A,N,T,q,P,z,X,J,F,zt,we,He,U,R,Ee,se,$e,C,hs,It,V,fs,ne,cs,us,_n,vd,wd,dl,ms,Ed,hl,We,$d,vn,bd,yd,fl,gs,xd,cl,$,wn,kd,Td,En,Nd,Od,$n,jd,Ad,bn,Cd,Dd,yn,Pd,Sd,xn,qd,zd,kn,Id,Md,Tn,Bd,Ld,Nn,Xd,Fd,On,Rd,Hd,jn,Wd,Kd,An,Ud,Vd,Cn,Gd,Jd,Dn,Yd,Qd,Pn,Zd,eh,Sn,th,oh,qn,sh,nh,zn,ah,rh,In,lh,ih,Mn,ph,ul,_s,dh,ml,Ke,Mt,hh,Bn,fh,ch,uh,Ln,mh,gl,be,Ue,Xn,Bt,gh,Fn,_h,_l,vs,vh,vl,Lt,wl,Ve,wh,Rn,Eh,$h,El,Xt,$l,ws,bh,bl,Ft,yl,Es,yh,xl,Rt,kl,ae,xh,Hn,kh,Th,Wn,Nh,Oh,Tl,Y,jh,Kn,Ah,Ch,Ht,Dh,Ph,Wt,Sh,qh,Nl,Kt,Ol,Ge,zh,Un,Ih,Mh,jl,Ut,Al,Je,Bh,Vt,Lh,Xh,Cl,Gt,Dl,$s,Fh,Pl,Jt,Sl,re,Rh,Vn,Hh,Wh,Gn,Kh,Uh,ql,Yt,zl,ye,Ye,Jn,Qt,Vh,Yn,Gh,Il,Qe,Jh,Qn,Yh,Qh,Ml,Ze,Zn,Zt,ea,Zh,ef,ta,tf,of,I,eo,to,oa,sf,nf,sa,af,rf,na,aa,lf,pf,oo,so,ra,df,hf,la,ff,cf,ia,pa,uf,mf,no,da,ha,gf,_f,fa,ca,vf,wf,ao,ua,ma,Ef,$f,ga,_a,bf,yf,ro,lo,va,xf,kf,wa,Tf,Nf,Ea,$a,Of,jf,io,ba,ya,Af,Cf,xa,ka,Df,Pf,po,Ta,Na,Sf,qf,Oa,ja,zf,Bl,et,If,Aa,Mf,Bf,Ll,ho,Xl,le,Lf,Ca,Xf,Ff,Da,Rf,Hf,Fl,fo,Rl,bs,Wf,Hl,co,Wl,Q,Kf,Pa,Uf,Vf,Sa,Gf,Jf,qa,Yf,Qf,Kl,tt,Ul,xe,ot,za,uo,Zf,Ia,ec,Vl,ys,tc,Gl,ie,Ma,oc,sc,Ba,nc,ac,La,rc,Jl,xs,lc,Yl,ke,st,Xa,mo,ic,Fa,pc,Ql,ks,dc,Zl,pe,Ts,hc,Ns,fc,cc,Os,uc,js,mc,gc,As,_c,Cs,vc,ei,nt,ti,at,wc,Ra,Ec,$c,oi,go,si,H,bc,Ha,yc,xc,Wa,kc,Tc,Ka,Nc,Oc,Ua,jc,Ac,ni,rt,ai,Ds,Cc,ri,_o,li,Ps,Dc,ii,vo,pi,Ss,Pc,di,wo,hi,W,Sc,Va,qc,zc,Ga,Ic,Mc,Ja,Bc,Lc,Ya,Xc,Fc,fi,Eo,ci,lt,ui,Te,it,Qa,$o,Rc,Za,Hc,mi,de,Wc,er,Kc,Uc,tr,Vc,Gc,gi,bo,_i,M,Jc,or,Yc,Qc,sr,Zc,eu,nr,tu,ou,ar,su,nu,rr,au,ru,vi,yo,wi,pt,Ei,Ne,dt,lr,xo,lu,ir,iu,$i,he,pu,pr,du,hu,dr,fu,cu,bi,ko,yi,ht,uu,hr,mu,gu,xi,Oe,ft,fr,To,_u,cr,vu,ki,qs,wu,Ti,fe,No,Eu,ur,$u,bu,yu,zs,xu,mr,ku,Tu,Is,Nu,gr,Ou,Ni,ct,ju,Oo,Au,Cu,Oi,je,ut,_r,jo,Du,vr,Pu,ji,mt,Ai,gt,Su,Ao,qu,zu,Ci,Ms,Iu,Di,Bs,Mu,Pi,_t,wr,Bu,Lu,Co,Xu,Er,Fu,Ru,Si,Ls,Hu,qi,Ae,vt,$r,Do,Wu,br,Ku,zi,Ce,wt,yr,Po,Uu,xr,Vu,Ii,ce,Gu,kr,Ju,Yu,Tr,Qu,Zu,Mi,Z,em,Nr,tm,om,Or,sm,nm,jr,am,rm,Bi,Et,lm,Ar,im,pm,Li,De,$t,Cr,So,dm,Dr,hm,Xi,Xs,fm,Fi,Fs,cm,Ri,Rs,Pr,um,Hi,Hs,mm,Wi,Ws,gm,Ki,Pe,bt,Sr,qo,_m,qr,vm,Ui,Ks,wm,Vi,Se,yt,zr,zo,Em,Ir,$m,Gi,G,bm,Mr,ym,xm,Br,km,Tm,Lr,Nm,Om,Xr,jm,Ji,Io,Yi,qe,xt,Fr,Mo,Am,Rr,Cm,Qi,ee,Dm,Hr,Pm,Sm,Wr,qm,zm,Kr,Im,Mm,Zi,Bo,ep,ze,kt,Ur,Lo,Bm,Vr,Lm,tp,Tt,Xm,Gr,Fm,Rm,op,Xo,sp,Ie,Nt,Jr,Fo,Hm,Yr,Wm,np,ue,Km,Ro,Um,Vm,Ho,Gm,Jm,ap,me,Qr,Ym,Qm,Us,Zm,Wo,eg,tg,Me,og,Ko,sg,ng,Uo,ag,rg,rp,Be,Ot,Zr,Vo,lg,el,ig,lp,B,pg,Go,dg,hg,Jo,fg,cg,Yo,ug,mg,Qo,gg,_g,Zo,vg,wg,ip,Le,jt,tl,es,Eg,ol,$g,pp,Vs,bg,dp,Gs,Xe,yg,ts,xg,kg,os,Tg,Ng,hp,Fe,At,sl,ss,Og,nl,jg,fp,ge,Ag,ns,Cg,Dg,al,Pg,Sg,cp,as,up,Js,qg,mp,Ys,zg,gp,rs,_p,Qs,Ig,vp,ls,wp,Zs,Mg,Ep,Ct,Bg,is,Lg,Xg,$p;return y=new S({}),se=new S({}),Bt=new S({}),Lt=new D({props:{code:"pip install transformers[onnx]",highlighted:"pip install transformers[onnx]"}}),Xt=new D({props:{code:`python -m transformers.onnx --help usage: Hugging Face Transformers ONNX exporter [-h] -m MODEL [--feature {causal-lm, ...}] [--opset OPSET] [--atol ATOL] output positional arguments: output Path indicating where to store generated ONNX model. optional arguments: -h, --help show this help message and exit -m MODEL, --model MODEL Model ID on huggingface.co or path on disk to load model from. --feature {causal-lm, ...} The type of features to export the model with. --opset OPSET ONNX opset version to export the model with. --atol ATOL Absolute difference tolerence when validating the model.`,highlighted:`python -m transformers.onnx --<span class="hljs-built_in">help</span> usage: Hugging Face Transformers ONNX exporter [-h] -m MODEL [--feature {causal-lm, ...}] [--opset OPSET] [--atol ATOL] output positional arguments: output Path indicating <span class="hljs-built_in">where</span> to store generated ONNX model. optional arguments: -h, --<span class="hljs-built_in">help</span> show this <span class="hljs-built_in">help</span> message and <span class="hljs-built_in">exit</span> -m MODEL, --model MODEL Model ID on huggingface.co or path on disk to load model from. --feature {causal-lm, ...} The <span class="hljs-built_in">type</span> of features to <span class="hljs-built_in">export</span> the model with. --opset OPSET ONNX opset version to <span class="hljs-built_in">export</span> the model with. --atol ATOL Absolute difference tolerence when validating the model.`}}),Ft=new D({props:{code:"python -m transformers.onnx --model=distilbert-base-uncased onnx/",highlighted:"python -m transformers.onnx --model=distilbert-base-uncased onnx/"}}),Rt=new D({props:{code:`Validating ONNX model... -[\u2713] ONNX model output names match reference model ({'last_hidden_state'}) - Validating ONNX Model output "last_hidden_state": -[\u2713] (2, 8, 768) matches (2, 8, 768) -[\u2713] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx`,highlighted:`Validating ONNX model... -[\u2713] ONNX model output names match reference model ({<span class="hljs-string">&#x27;last_hidden_state&#x27;</span>}) - Validating ONNX Model output <span class="hljs-string">&quot;last_hidden_state&quot;</span>: -[\u2713] (2, 8, 768) matches (2, 8, 768) -[\u2713] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx`}}),Kt=new D({props:{code:`from transformers import AutoTokenizer from onnxruntime import InferenceSession tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") session = InferenceSession("onnx/model.onnx") # ONNX Runtime expects NumPy arrays as input inputs = tokenizer("Using DistilBERT with ONNX Runtime!", return_tensors="np") outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> onnxruntime <span class="hljs-keyword">import</span> InferenceSession <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>session = InferenceSession(<span class="hljs-string">&quot;onnx/model.onnx&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># ONNX Runtime expects NumPy arrays as input</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Using DistilBERT with ONNX Runtime!&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = session.run(output_names=[<span class="hljs-string">&quot;last_hidden_state&quot;</span>], input_feed=<span class="hljs-built_in">dict</span>(inputs))`}}),Ut=new D({props:{code:`from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig config = DistilBertConfig() onnx_config = DistilBertOnnxConfig(config) print(list(onnx_config.outputs.keys()))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.distilbert <span class="hljs-keyword">import</span> DistilBertConfig, DistilBertOnnxConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = DistilBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_config = DistilBertOnnxConfig(config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-built_in">list</span>(onnx_config.outputs.keys())) [<span class="hljs-string">&quot;last_hidden_state&quot;</span>]`}}),Gt=new D({props:{code:"python -m transformers.onnx --model=keras-io/transformers-qa onnx/",highlighted:"python -m transformers.onnx --model=keras-io/transformers-qa onnx/"}}),Jt=new aE({props:{group1:{id:"pt",code:`from transformers import AutoTokenizer, AutoModelForSequenceClassification # Load tokenizer and PyTorch weights form the Hub tokenizer = tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") pt_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") # Save to disk tokenizer.save_pretrained("local-pt-checkpoint") pt_model.save_pretrained("local-pt-checkpoint")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Load tokenizer and PyTorch weights form the Hub</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Save to disk</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;local-pt-checkpoint&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(<span class="hljs-string">&quot;local-pt-checkpoint&quot;</span>)`},group2:{id:"tf",code:`from transformers import AutoTokenizer, TFAutoModelForSequenceClassification # Load tokenizer and TensorFlow weights from the Hub tokenizer = tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") tf_model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") # Save to disk tokenizer.save_pretrained("local-tf-checkpoint") tf_model.save_pretrained("local-tf-checkpoint")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Load tokenizer and TensorFlow weights from the Hub</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Save to disk</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;local-tf-checkpoint&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(<span class="hljs-string">&quot;local-tf-checkpoint&quot;</span>)`}}}),Yt=new aE({props:{group1:{id:"pt",code:"python -m transformers.onnx --model=local-pt-checkpoint onnx/",highlighted:"python -m transformers.onnx --model=local-pt-checkpoint onnx/"},group2:{id:"tf",code:"python -m transformers.onnx --model=local-tf-checkpoint onnx/",highlighted:"python -m transformers.onnx --model=local-tf-checkpoint onnx/"}}}),Qt=new S({}),ho=new D({props:{code:`from transformers.onnx.features import FeaturesManager distilbert_features = list(FeaturesManager.get_supported_features_for_model_type("distilbert").keys()) print(distilbert_features)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx.features <span class="hljs-keyword">import</span> FeaturesManager <span class="hljs-meta">&gt;&gt;&gt; </span>distilbert_features = <span class="hljs-built_in">list</span>(FeaturesManager.get_supported_features_for_model_type(<span class="hljs-string">&quot;distilbert&quot;</span>).keys()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(distilbert_features) [<span class="hljs-string">&quot;default&quot;</span>, <span class="hljs-string">&quot;masked-lm&quot;</span>, <span class="hljs-string">&quot;causal-lm&quot;</span>, <span class="hljs-string">&quot;sequence-classification&quot;</span>, <span class="hljs-string">&quot;token-classification&quot;</span>, <span class="hljs-string">&quot;question-answering&quot;</span>]`}}),fo=new D({props:{code:`python -m transformers.onnx --model=distilbert-base-uncased-finetuned-sst-2-english \\ --feature=sequence-classification onnx/`,highlighted:`python -m transformers.onnx --model=distilbert-base-uncased-finetuned-sst-2-english \\ --feature=sequence-classification onnx/`}}),co=new D({props:{code:`Validating ONNX model... -[\u2713] ONNX model output names match reference model ({'logits'}) - Validating ONNX Model output "logits": -[\u2713] (2, 2) matches (2, 2) -[\u2713] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx`,highlighted:`Validating ONNX model... -[\u2713] ONNX model output names match reference model ({<span class="hljs-string">&#x27;logits&#x27;</span>}) - Validating ONNX Model output <span class="hljs-string">&quot;logits&quot;</span>: -[\u2713] (2, 2) matches (2, 2) -[\u2713] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx`}}),tt=new gn({props:{$$slots:{default:[dE]},$$scope:{ctx:L}}}),uo=new S({}),mo=new S({}),nt=new gn({props:{$$slots:{default:[hE]},$$scope:{ctx:L}}}),go=new D({props:{code:`from typing import Mapping, OrderedDict from transformers.onnx import OnnxConfig class DistilBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> Mapping, OrderedDict <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> OnnxConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DistilBertOnnxConfig</span>(<span class="hljs-title class_ inherited__">OnnxConfig</span>): <span class="hljs-meta">... </span> @<span class="hljs-built_in">property</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">inputs</span>(<span class="hljs-params">self</span>) -&gt; Mapping[<span class="hljs-built_in">str</span>, Mapping[<span class="hljs-built_in">int</span>, <span class="hljs-built_in">str</span>]]: <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> OrderedDict( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> (<span class="hljs-string">&quot;input_ids&quot;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;batch&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;sequence&quot;</span>}), <span class="hljs-meta">... </span> (<span class="hljs-string">&quot;attention_mask&quot;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;batch&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;sequence&quot;</span>}), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> )`}}),rt=new gn({props:{$$slots:{default:[fE]},$$scope:{ctx:L}}}),_o=new D({props:{code:`from transformers import AutoConfig config = AutoConfig.from_pretrained("distilbert-base-uncased") onnx_config = DistilBertOnnxConfig(config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_config = DistilBertOnnxConfig(config)`}}),vo=new D({props:{code:"print(onnx_config.default_onnx_opset)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(onnx_config.default_onnx_opset) <span class="hljs-number">11</span>`}}),wo=new D({props:{code:"print(onnx_config.outputs)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(onnx_config.outputs) OrderedDict([(<span class="hljs-string">&quot;last_hidden_state&quot;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;batch&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;sequence&quot;</span>})])`}}),Eo=new D({props:{code:`from transformers import AutoConfig config = AutoConfig.from_pretrained("distilbert-base-uncased") onnx_config_for_seq_clf = DistilBertOnnxConfig(config, task="sequence-classification") print(onnx_config_for_seq_clf.outputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_config_for_seq_clf = DistilBertOnnxConfig(config, task=<span class="hljs-string">&quot;sequence-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(onnx_config_for_seq_clf.outputs) OrderedDict([(<span class="hljs-string">&#x27;logits&#x27;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&#x27;batch&#x27;</span>})])`}}),lt=new gn({props:{$$slots:{default:[cE]},$$scope:{ctx:L}}}),$o=new S({}),bo=new D({props:{code:`from pathlib import Path from transformers.onnx import export from transformers import AutoTokenizer, AutoModel onnx_path = Path("model.onnx") model_ckpt = "distilbert-base-uncased" base_model = AutoModel.from_pretrained(model_ckpt) tokenizer = AutoTokenizer.from_pretrained(model_ckpt) onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> pathlib <span class="hljs-keyword">import</span> Path <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> export <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_path = Path(<span class="hljs-string">&quot;model.onnx&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model_ckpt = <span class="hljs-string">&quot;distilbert-base-uncased&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>base_model = AutoModel.from_pretrained(model_ckpt) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_ckpt) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path)`}}),yo=new D({props:{code:`import onnx onnx_model = onnx.load("model.onnx") onnx.checker.check_model(onnx_model)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> onnx <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_model = onnx.load(<span class="hljs-string">&quot;model.onnx&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx.checker.check_model(onnx_model)`}}),pt=new gn({props:{$$slots:{default:[uE]},$$scope:{ctx:L}}}),xo=new S({}),ko=new D({props:{code:`from transformers.onnx import validate_model_outputs validate_model_outputs( onnx_config, tokenizer, base_model, onnx_path, onnx_outputs, onnx_config.atol_for_validation )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> validate_model_outputs <span class="hljs-meta">&gt;&gt;&gt; </span>validate_model_outputs( <span class="hljs-meta">... </span> onnx_config, tokenizer, base_model, onnx_path, onnx_outputs, onnx_config.atol_for_validation <span class="hljs-meta">... </span>)`}}),To=new S({}),jo=new S({}),mt=new gn({props:{$$slots:{default:[mE]},$$scope:{ctx:L}}}),Do=new S({}),Po=new S({}),So=new S({}),qo=new S({}),zo=new S({}),Io=new D({props:{code:`from transformers import BertModel, BertTokenizer, BertConfig import torch enc = BertTokenizer.from_pretrained("bert-base-uncased") # Tokenizing input text text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" tokenized_text = enc.tokenize(text) # Masking one of the input tokens masked_index = 8 tokenized_text[masked_index] = "[MASK]" indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] # Creating a dummy input tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) dummy_input = [tokens_tensor, segments_tensors] # Initializing the model with the torchscript flag # Flag set to True even though it is not necessary as this model does not have an LM Head. config = BertConfig( vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, torchscript=True, ) # Instantiating the model model = BertModel(config) # The model needs to be in evaluation mode model.eval() # If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag model = BertModel.from_pretrained("bert-base-uncased", torchscript=True) # Creating the trace traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) torch.jit.save(traced_model, "traced_bert.pt")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertModel, BertTokenizer, BertConfig <span class="hljs-keyword">import</span> torch enc = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># Tokenizing input text</span> text = <span class="hljs-string">&quot;[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]&quot;</span> tokenized_text = enc.tokenize(text) <span class="hljs-comment"># Masking one of the input tokens</span> masked_index = <span class="hljs-number">8</span> tokenized_text[masked_index] = <span class="hljs-string">&quot;[MASK]&quot;</span> indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) segments_ids = [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># Creating a dummy input</span> tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) dummy_input = [tokens_tensor, segments_tensors] <span class="hljs-comment"># Initializing the model with the torchscript flag</span> <span class="hljs-comment"># Flag set to True even though it is not necessary as this model does not have an LM Head.</span> config = BertConfig( vocab_size_or_config_json_file=<span class="hljs-number">32000</span>, hidden_size=<span class="hljs-number">768</span>, num_hidden_layers=<span class="hljs-number">12</span>, num_attention_heads=<span class="hljs-number">12</span>, intermediate_size=<span class="hljs-number">3072</span>, torchscript=<span class="hljs-literal">True</span>, ) <span class="hljs-comment"># Instantiating the model</span> model = BertModel(config) <span class="hljs-comment"># The model needs to be in evaluation mode</span> model.<span class="hljs-built_in">eval</span>() <span class="hljs-comment"># If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag</span> model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, torchscript=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Creating the trace</span> traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) torch.jit.save(traced_model, <span class="hljs-string">&quot;traced_bert.pt&quot;</span>)`}}),Mo=new S({}),Bo=new D({props:{code:`loaded_model = torch.jit.load("traced_bert.pt") loaded_model.eval() all_encoder_layers, pooled_output = loaded_model(*dummy_input)`,highlighted:`loaded_model = torch.jit.load(<span class="hljs-string">&quot;traced_bert.pt&quot;</span>) loaded_model.<span class="hljs-built_in">eval</span>() all_encoder_layers, pooled_output = loaded_model(*dummy_input)`}}),Lo=new S({}),Xo=new D({props:{code:"traced_model(tokens_tensor, segments_tensors)",highlighted:"traced_model(tokens_tensor, segments_tensors)"}}),Fo=new S({}),Vo=new S({}),es=new S({}),ss=new S({}),as=new D({props:{code:`from transformers import BertModel, BertTokenizer, BertConfig import torch import torch.neuron`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertModel, BertTokenizer, BertConfig <span class="hljs-keyword">import</span> torch <span class="hljs-keyword">import</span> torch.neuron`}}),rs=new D({props:{code:"torch.jit.trace(model, [tokens_tensor, segments_tensors])",highlighted:"torch.jit.trace(model, [tokens_tensor, segments_tensors])"}}),ls=new D({props:{code:"torch.neuron.trace(model, [token_tensor, segments_tensors])",highlighted:"torch.neuron.trace(model, [token_tensor, segments_tensors])"}}),{c(){c=s("meta"),k=d(),E=s("h1"),x=s("a"),j=s("span"),u(y.$$.fragment),O=d(),A=s("span"),N=r("Exporting \u{1F917} Transformers Models"),T=d(),q=s("p"),P=r(`If you need to deploy \u{1F917} Transformers models in production environments, we recommend exporting them to a serialized format that can be loaded and executed on specialized runtimes and hardware. In this guide, we\u2019ll show you how to export \u{1F917} Transformers models in two widely used formats: ONNX and TorchScript.`),z=d(),X=s("p"),J=r(`Once exported, a model can optimized for inference via techniques such as quantization and pruning. If you are interested in optimizing your models to run with maximum efficiency, check out the `),F=s("a"),zt=r(`\u{1F917} Optimum library`),we=r("."),He=d(),U=s("h2"),R=s("a"),Ee=s("span"),u(se.$$.fragment),$e=d(),C=s("span"),hs=r("ONNX"),It=d(),V=s("p"),fs=r("The "),ne=s("a"),cs=r("ONNX (Open Neural Network eXchange)"),us=r(` project is an open standard that defines a common set of operators and a common file format to represent deep learning models in a wide variety of frameworks, including PyTorch and TensorFlow. When a model is exported to the ONNX format, these operators are used to construct a computational graph (often called an `),_n=s("em"),vd=r("intermediate representation"),wd=r(`) which represents the flow of data through the neural network.`),dl=d(),ms=s("p"),Ed=r(`By exposing a graph with standardized operators and data types, ONNX makes it easy to switch between frameworks. For example, a model trained in PyTorch can be exported to ONNX format and then imported in TensorFlow (and vice versa).`),hl=d(),We=s("p"),$d=r("\u{1F917} Transformers provides a "),vn=s("code"),bd=r("transformers.onnx"),yd=r(` package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects. These configuration objects come ready made for a number of model architectures, and are designed to be easily extendable to other architectures.`),fl=d(),gs=s("p"),xd=r("Ready-made configurations include the following architectures:"),cl=d(),$=s("ul"),wn=s("li"),kd=r("ALBERT"),Td=d(),En=s("li"),Nd=r("BART"),Od=d(),$n=s("li"),jd=r("BERT"),Ad=d(),bn=s("li"),Cd=r("CamemBERT"),Dd=d(),yn=s("li"),Pd=r("Data2VecText"),Sd=d(),xn=s("li"),qd=r("DistilBERT"),zd=d(),kn=s("li"),Id=r("ELECTRA"),Md=d(),Tn=s("li"),Bd=r("GPT Neo"),Ld=d(),Nn=s("li"),Xd=r("I-BERT"),Fd=d(),On=s("li"),Rd=r("LayoutLM"),Hd=d(),jn=s("li"),Wd=r("M2M100"),Kd=d(),An=s("li"),Ud=r("Marian"),Vd=d(),Cn=s("li"),Gd=r("mBART"),Jd=d(),Dn=s("li"),Yd=r("OpenAI GPT-2"),Qd=d(),Pn=s("li"),Zd=r("PLBart"),eh=d(),Sn=s("li"),th=r("RoBERTa"),oh=d(),qn=s("li"),sh=r("T5"),nh=d(),zn=s("li"),ah=r("ViT"),rh=d(),In=s("li"),lh=r("XLM-RoBERTa"),ih=d(),Mn=s("li"),ph=r("XLM-RoBERTa-XL"),ul=d(),_s=s("p"),dh=r("In the next two sections, we\u2019ll show you how to:"),ml=d(),Ke=s("ul"),Mt=s("li"),hh=r("Export a supported model using the "),Bn=s("code"),fh=r("transformers.onnx"),ch=r(" package."),uh=d(),Ln=s("li"),mh=r("Export a custom model for an unsupported architecture."),gl=d(),be=s("h3"),Ue=s("a"),Xn=s("span"),u(Bt.$$.fragment),gh=d(),Fn=s("span"),_h=r("Exporting a model to ONNX"),_l=d(),vs=s("p"),vh=r(`To export a \u{1F917} Transformers model to ONNX, you\u2019ll first need to install some extra dependencies:`),vl=d(),u(Lt.$$.fragment),wl=d(),Ve=s("p"),wh=r("The "),Rn=s("code"),Eh=r("transformers.onnx"),$h=r(" package can then be used as a Python module:"),El=d(),u(Xt.$$.fragment),$l=d(),ws=s("p"),bh=r("Exporting a checkpoint using a ready-made configuration can be done as follows:"),bl=d(),u(Ft.$$.fragment),yl=d(),Es=s("p"),yh=r("which should show the following logs:"),xl=d(),u(Rt.$$.fragment),kl=d(),ae=s("p"),xh=r("This exports an ONNX graph of the checkpoint defined by the "),Hn=s("code"),kh=r("--model"),Th=r(` argument. In this example it is `),Wn=s("code"),Nh=r("distilbert-base-uncased"),Oh=r(`, but it can be any checkpoint on the Hugging Face Hub or one that\u2019s stored locally.`),Tl=d(),Y=s("p"),jh=r("The resulting "),Kn=s("code"),Ah=r("model.onnx"),Ch=r(" file can then be run on one of the "),Ht=s("a"),Dh=r(`many accelerators`),Ph=r(` that support the ONNX standard. For example, we can load and run the model with `),Wt=s("a"),Sh=r(`ONNX Runtime`),qh=r(" as follows:"),Nl=d(),u(Kt.$$.fragment),Ol=d(),Ge=s("p"),zh=r("The required output names (i.e. "),Un=s("code"),Ih=r('["last_hidden_state"]'),Mh=r(`) can be obtained by taking a look at the ONNX configuration of each model. For example, for DistilBERT we have:`),jl=d(),u(Ut.$$.fragment),Al=d(),Je=s("p"),Bh=r(`The process is identical for TensorFlow checkpoints on the Hub. For example, we can export a pure TensorFlow checkpoint from the `),Vt=s("a"),Lh=r(`Keras organization`),Xh=r(" as follows:"),Cl=d(),u(Gt.$$.fragment),Dl=d(),$s=s("p"),Fh=r(`To export a model that\u2019s stored locally, you\u2019ll need to have the model\u2019s weights and tokenizer files stored in a directory. For example, we can load and save a checkpoint as follows:`),Pl=d(),u(Jt.$$.fragment),Sl=d(),re=s("p"),Rh=r("Once the checkpoint is saved, we can export it to ONNX by pointing the "),Vn=s("code"),Hh=r("--model"),Wh=r(` argument of the `),Gn=s("code"),Kh=r("transformers.onnx"),Uh=r(" package to the desired directory:"),ql=d(),u(Yt.$$.fragment),zl=d(),ye=s("h3"),Ye=s("a"),Jn=s("span"),u(Qt.$$.fragment),Vh=d(),Yn=s("span"),Gh=r("Selecting features for different model topologies"),Il=d(),Qe=s("p"),Jh=r("Each ready-made configuration comes with a set of "),Qn=s("em"),Yh=r("features"),Qh=r(` that enable you to export models for different types of topologies or tasks. As shown in the table below, each feature is associated with a different auto class:`),Ml=d(),Ze=s("table"),Zn=s("thead"),Zt=s("tr"),ea=s("th"),Zh=r("Feature"),ef=d(),ta=s("th"),tf=r("Auto Class"),of=d(),I=s("tbody"),eo=s("tr"),to=s("td"),oa=s("code"),sf=r("causal-lm"),nf=r(", "),sa=s("code"),af=r("causal-lm-with-past"),rf=d(),na=s("td"),aa=s("code"),lf=r("AutoModelForCausalLM"),pf=d(),oo=s("tr"),so=s("td"),ra=s("code"),df=r("default"),hf=r(", "),la=s("code"),ff=r("default-with-past"),cf=d(),ia=s("td"),pa=s("code"),uf=r("AutoModel"),mf=d(),no=s("tr"),da=s("td"),ha=s("code"),gf=r("masked-lm"),_f=d(),fa=s("td"),ca=s("code"),vf=r("AutoModelForMaskedLM"),wf=d(),ao=s("tr"),ua=s("td"),ma=s("code"),Ef=r("question-answering"),$f=d(),ga=s("td"),_a=s("code"),bf=r("AutoModelForQuestionAnswering"),yf=d(),ro=s("tr"),lo=s("td"),va=s("code"),xf=r("seq2seq-lm"),kf=r(", "),wa=s("code"),Tf=r("seq2seq-lm-with-past"),Nf=d(),Ea=s("td"),$a=s("code"),Of=r("AutoModelForSeq2SeqLM"),jf=d(),io=s("tr"),ba=s("td"),ya=s("code"),Af=r("sequence-classification"),Cf=d(),xa=s("td"),ka=s("code"),Df=r("AutoModelForSequenceClassification"),Pf=d(),po=s("tr"),Ta=s("td"),Na=s("code"),Sf=r("token-classification"),qf=d(),Oa=s("td"),ja=s("code"),zf=r("AutoModelForTokenClassification"),Bl=d(),et=s("p"),If=r(`For each configuration, you can find the list of supported features via the `),Aa=s("code"),Mf=r("FeaturesManager"),Bf=r(". For example, for DistilBERT we have:"),Ll=d(),u(ho.$$.fragment),Xl=d(),le=s("p"),Lf=r("You can then pass one of these features to the "),Ca=s("code"),Xf=r("--feature"),Ff=r(` argument in the `),Da=s("code"),Rf=r("transformers.onnx"),Hf=r(` package. For example, to export a text-classification model we can pick a fine-tuned model from the Hub and run:`),Fl=d(),u(fo.$$.fragment),Rl=d(),bs=s("p"),Wf=r("which will display the following logs:"),Hl=d(),u(co.$$.fragment),Wl=d(),Q=s("p"),Kf=r(`Notice that in this case, the output names from the fine-tuned model are `),Pa=s("code"),Uf=r("logits"),Vf=r(" instead of the "),Sa=s("code"),Gf=r("last_hidden_state"),Jf=r(` we saw with the `),qa=s("code"),Yf=r("distilbert-base-uncased"),Qf=r(` checkpoint earlier. This is expected since the fine-tuned model has a sequence classification head.`),Kl=d(),u(tt.$$.fragment),Ul=d(),xe=s("h3"),ot=s("a"),za=s("span"),u(uo.$$.fragment),Zf=d(),Ia=s("span"),ec=r("Exporting a model for an unsupported architecture"),Vl=d(),ys=s("p"),tc=r(`If you wish to export a model whose architecture is not natively supported by the library, there are three main steps to follow:`),Gl=d(),ie=s("ol"),Ma=s("li"),oc=r("Implement a custom ONNX configuration."),sc=d(),Ba=s("li"),nc=r("Export the model to ONNX."),ac=d(),La=s("li"),rc=r("Validate the outputs of the PyTorch and exported models."),Jl=d(),xs=s("p"),lc=r(`In this section, we\u2019ll look at how DistilBERT was implemented to show what\u2019s involved with each step.`),Yl=d(),ke=s("h4"),st=s("a"),Xa=s("span"),u(mo.$$.fragment),ic=d(),Fa=s("span"),pc=r("Implementing a custom ONNX configuration"),Ql=d(),ks=s("p"),dc=r(`Let\u2019s start with the ONNX configuration object. We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export:`),Zl=d(),pe=s("ul"),Ts=s("li"),hc=r("Encoder-based models inherit from "),Ns=s("a"),fc=r("OnnxConfig"),cc=d(),Os=s("li"),uc=r("Decoder-based models inherit from "),js=s("a"),mc=r("OnnxConfigWithPast"),gc=d(),As=s("li"),_c=r("Encoder-decoder models inherit from "),Cs=s("a"),vc=r("OnnxSeq2SeqConfigWithPast"),ei=d(),u(nt.$$.fragment),ti=d(),at=s("p"),wc=r(`Since DistilBERT is an encoder-based model, its configuration inherits from `),Ra=s("code"),Ec=r("OnnxConfig"),$c=r(":"),oi=d(),u(go.$$.fragment),si=d(),H=s("p"),bc=r("Every configuration object must implement the "),Ha=s("code"),yc=r("inputs"),xc=r(` property and return a mapping, where each key corresponds to an expected input, and each value indicates the axis of that input. For DistilBERT, we can see that two inputs are required: `),Wa=s("code"),kc=r("input_ids"),Tc=r(" and "),Ka=s("code"),Nc=r("attention_mask"),Oc=r(`. These inputs have the same shape of `),Ua=s("code"),jc=r("(batch_size, sequence_length)"),Ac=r(` which is why we see the same axes used in the configuration.`),ni=d(),u(rt.$$.fragment),ai=d(),Ds=s("p"),Cc=r(`Once you have implemented an ONNX configuration, you can instantiate it by providing the base model\u2019s configuration as follows:`),ri=d(),u(_o.$$.fragment),li=d(),Ps=s("p"),Dc=r(`The resulting object has several useful properties. For example you can view the ONNX operator set that will be used during the export:`),ii=d(),u(vo.$$.fragment),pi=d(),Ss=s("p"),Pc=r("You can also view the outputs associated with the model as follows:"),di=d(),u(wo.$$.fragment),hi=d(),W=s("p"),Sc=r(`Notice that the outputs property follows the same structure as the inputs; it returns an `),Va=s("code"),qc=r("OrderedDict"),zc=r(` of named outputs and their shapes. The output structure is linked to the choice of feature that the configuration is initialised with. By default, the ONNX configuration is initialized with the `),Ga=s("code"),Ic=r("default"),Mc=r(` feature that corresponds to exporting a model loaded with the `),Ja=s("code"),Bc=r("AutoModel"),Lc=r(` class. If you want to export a different model topology, just provide a different feature to the `),Ya=s("code"),Xc=r("task"),Fc=r(` argument when you initialize the ONNX configuration. For example, if we wished to export DistilBERT with a sequence classification head, we could use:`),fi=d(),u(Eo.$$.fragment),ci=d(),u(lt.$$.fragment),ui=d(),Te=s("h4"),it=s("a"),Qa=s("span"),u($o.$$.fragment),Rc=d(),Za=s("span"),Hc=r("Exporting the model"),mi=d(),de=s("p"),Wc=r(`Once you have implemented the ONNX configuration, the next step is to export the model. Here we can use the `),er=s("code"),Kc=r("export()"),Uc=r(` function provided by the `),tr=s("code"),Vc=r("transformers.onnx"),Gc=r(` package. This function expects the ONNX configuration, along with the base model and tokenizer, and the path to save the exported file:`),gi=d(),u(bo.$$.fragment),_i=d(),M=s("p"),Jc=r("The "),or=s("code"),Yc=r("onnx_inputs"),Qc=r(" and "),sr=s("code"),Zc=r("onnx_outputs"),eu=r(" returned by the "),nr=s("code"),tu=r("export()"),ou=r(` function are lists of the keys defined in the `),ar=s("code"),su=r("inputs"),nu=r(" and "),rr=s("code"),au=r("outputs"),ru=r(` properties of the configuration. Once the model is exported, you can test that the model is well formed as follows:`),vi=d(),u(yo.$$.fragment),wi=d(),u(pt.$$.fragment),Ei=d(),Ne=s("h4"),dt=s("a"),lr=s("span"),u(xo.$$.fragment),lu=d(),ir=s("span"),iu=r("Validating the model outputs"),$i=d(),he=s("p"),pu=r(`The final step is to validate that the outputs from the base and exported model agree within some absolute tolerance. Here we can use the `),pr=s("code"),du=r("validate_model_outputs()"),hu=r(" function provided by the "),dr=s("code"),fu=r("transformers.onnx"),cu=r(` package as follows:`),bi=d(),u(ko.$$.fragment),yi=d(),ht=s("p"),uu=r("This function uses the "),hr=s("code"),mu=r("OnnxConfig.generate_dummy_inputs()"),gu=r(` method to generate inputs for the base and exported model, and the absolute tolerance can be defined in the configuration. We generally find numerical agreement in the 1e-6 to 1e-4 range, although anything smaller than 1e-3 is likely to be OK.`),xi=d(),Oe=s("h3"),ft=s("a"),fr=s("span"),u(To.$$.fragment),_u=d(),cr=s("span"),vu=r("Contributing a new configuration to \u{1F917} Transformers"),ki=d(),qs=s("p"),wu=r(`We are looking to expand the set of ready-made configurations and welcome contributions from the community! If you would like to contribute your addition to the library, you will need to:`),Ti=d(),fe=s("ul"),No=s("li"),Eu=r("Implement the ONNX configuration in the corresponding "),ur=s("code"),$u=r("configuration_<model_name>.py"),bu=r(` file`),yu=d(),zs=s("li"),xu=r("Include the model architecture and corresponding features in "),mr=s("code"),ku=r("FeatureManager"),Tu=d(),Is=s("li"),Nu=r("Add your model architecture to the tests in "),gr=s("code"),Ou=r("test_onnx_v2.py"),Ni=d(),ct=s("p"),ju=r("Check out how the configuration for "),Oo=s("a"),Au=r(`IBERT was contributed`),Cu=r(` to get an idea of what\u2019s involved.`),Oi=d(),je=s("h2"),ut=s("a"),_r=s("span"),u(jo.$$.fragment),Du=d(),vr=s("span"),Pu=r("TorchScript"),ji=d(),u(mt.$$.fragment),Ai=d(),gt=s("p"),Su=r(`According to Pytorch\u2019s documentation: \u201CTorchScript is a way to create serializable and optimizable models from PyTorch code\u201D. Pytorch\u2019s two modules `),Ao=s("a"),qu=r("JIT and TRACE"),zu=r(` allow the developer to export their model to be re-used in other programs, such as efficiency-oriented C++ programs.`),Ci=d(),Ms=s("p"),Iu=r(`We have provided an interface that allows the export of \u{1F917} Transformers models to TorchScript so that they can be reused in a different environment than a Pytorch-based python program. Here we explain how to export and use our models using TorchScript.`),Di=d(),Bs=s("p"),Mu=r("Exporting a model requires two things:"),Pi=d(),_t=s("ul"),wr=s("li"),Bu=r("a forward pass with dummy inputs."),Lu=d(),Co=s("li"),Xu=r("model instantiation with the "),Er=s("code"),Fu=r("torchscript"),Ru=r(" flag."),Si=d(),Ls=s("p"),Hu=r("These necessities imply several things developers should be careful about. These are detailed below."),qi=d(),Ae=s("h3"),vt=s("a"),$r=s("span"),u(Do.$$.fragment),Wu=d(),br=s("span"),Ku=r("Implications"),zi=d(),Ce=s("h3"),wt=s("a"),yr=s("span"),u(Po.$$.fragment),Uu=d(),xr=s("span"),Vu=r("TorchScript flag and tied weights"),Ii=d(),ce=s("p"),Gu=r(`This flag is necessary because most of the language models in this repository have tied weights between their `),kr=s("code"),Ju=r("Embedding"),Yu=r(" layer and their "),Tr=s("code"),Qu=r("Decoding"),Zu=r(` layer. TorchScript does not allow the export of models that have tied weights, therefore it is necessary to untie and clone the weights beforehand.`),Mi=d(),Z=s("p"),em=r("This implies that models instantiated with the "),Nr=s("code"),tm=r("torchscript"),om=r(" flag have their "),Or=s("code"),sm=r("Embedding"),nm=r(" layer and "),jr=s("code"),am=r("Decoding"),rm=r(` layer separate, which means that they should not be trained down the line. Training would de-synchronize the two layers, leading to unexpected results.`),Bi=d(),Et=s("p"),lm=r(`This is not the case for models that do not have a Language Model head, as those do not have tied weights. These models can be safely exported without the `),Ar=s("code"),im=r("torchscript"),pm=r(" flag."),Li=d(),De=s("h3"),$t=s("a"),Cr=s("span"),u(So.$$.fragment),dm=d(),Dr=s("span"),hm=r("Dummy inputs and standard lengths"),Xi=d(),Xs=s("p"),fm=r(`The dummy inputs are used to do a model forward pass. While the inputs\u2019 values are propagating through the layers, Pytorch keeps track of the different operations executed on each tensor. These recorded operations are then used to create the \u201Ctrace\u201D of the model.`),Fi=d(),Fs=s("p"),cm=r(`The trace is created relatively to the inputs\u2019 dimensions. It is therefore constrained by the dimensions of the dummy input, and will not work for any other sequence length or batch size. When trying with a different size, an error such as:`),Ri=d(),Rs=s("p"),Pr=s("code"),um=r("The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2"),Hi=d(),Hs=s("p"),mm=r(`will be raised. It is therefore recommended to trace the model with a dummy input size at least as large as the largest input that will be fed to the model during inference. Padding can be performed to fill the missing values. As the model will have been traced with a large input size however, the dimensions of the different matrix will be large as well, resulting in more calculations.`),Wi=d(),Ws=s("p"),gm=r(`It is recommended to be careful of the total number of operations done on each input and to follow performance closely when exporting varying sequence-length models.`),Ki=d(),Pe=s("h3"),bt=s("a"),Sr=s("span"),u(qo.$$.fragment),_m=d(),qr=s("span"),vm=r("Using TorchScript in Python"),Ui=d(),Ks=s("p"),wm=r("Below is an example, showing how to save, load models as well as how to use the trace for inference."),Vi=d(),Se=s("h4"),yt=s("a"),zr=s("span"),u(zo.$$.fragment),Em=d(),Ir=s("span"),$m=r("Saving a model"),Gi=d(),G=s("p"),bm=r("This snippet shows how to use TorchScript to export a "),Mr=s("code"),ym=r("BertModel"),xm=r(". Here the "),Br=s("code"),km=r("BertModel"),Tm=r(` is instantiated according to a `),Lr=s("code"),Nm=r("BertConfig"),Om=r(" class and then saved to disk under the filename "),Xr=s("code"),jm=r("traced_bert.pt"),Ji=d(),u(Io.$$.fragment),Yi=d(),qe=s("h4"),xt=s("a"),Fr=s("span"),u(Mo.$$.fragment),Am=d(),Rr=s("span"),Cm=r("Loading a model"),Qi=d(),ee=s("p"),Dm=r("This snippet shows how to load the "),Hr=s("code"),Pm=r("BertModel"),Sm=r(" that was previously saved to disk under the name "),Wr=s("code"),qm=r("traced_bert.pt"),zm=r(`. We are re-using the previously initialised `),Kr=s("code"),Im=r("dummy_input"),Mm=r("."),Zi=d(),u(Bo.$$.fragment),ep=d(),ze=s("h4"),kt=s("a"),Ur=s("span"),u(Lo.$$.fragment),Bm=d(),Vr=s("span"),Lm=r("Using a traced model for inference"),tp=d(),Tt=s("p"),Xm=r("Using the traced model for inference is as simple as using its "),Gr=s("code"),Fm=r("__call__"),Rm=r(" dunder method:"),op=d(),u(Xo.$$.fragment),sp=d(),Ie=s("h3"),Nt=s("a"),Jr=s("span"),u(Fo.$$.fragment),Hm=d(),Yr=s("span"),Wm=r("Deploying HuggingFace TorchScript models on AWS using the Neuron SDK"),np=d(),ue=s("p"),Km=r("AWS introduced the "),Ro=s("a"),Um=r("Amazon EC2 Inf1"),Vm=r(` instance family for low cost, high performance machine learning inference in the cloud. The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware accelerator, specializing in deep learning inferencing workloads. `),Ho=s("a"),Gm=r("AWS Neuron"),Jm=r(` is the SDK for Inferentia that supports tracing and optimizing transformers models for deployment on Inf1. The Neuron SDK provides:`),ap=d(),me=s("ol"),Qr=s("li"),Ym=r("Easy-to-use API with one line of code change to trace and optimize a TorchScript model for inference in the cloud."),Qm=d(),Us=s("li"),Zm=r("Out of the box performance optimizations for "),Wo=s("a"),eg=r("improved cost-performance"),tg=d(),Me=s("li"),og=r("Support for HuggingFace transformers models built with either "),Ko=s("a"),sg=r("PyTorch"),ng=r(` or `),Uo=s("a"),ag=r("TensorFlow"),rg=r("."),rp=d(),Be=s("h4"),Ot=s("a"),Zr=s("span"),u(Vo.$$.fragment),lg=d(),el=s("span"),ig=r("Implications"),lp=d(),B=s("p"),pg=r("Transformers Models based on the "),Go=s("a"),dg=r("BERT (Bidirectional Encoder Representations from Transformers)"),hg=r(` architecture, or its variants such as `),Jo=s("a"),fg=r("distilBERT"),cg=r(` and `),Yo=s("a"),ug=r("roBERTa"),mg=r(` will run best on Inf1 for non-generative tasks such as Extractive Question Answering, Sequence Classification, Token Classification. Alternatively, text generation tasks can be adapted to run on Inf1, according to this `),Qo=s("a"),gg=r("AWS Neuron MarianMT tutorial"),_g=r(`. More information about models that can be converted out of the box on Inferentia can be found in the `),Zo=s("a"),vg=r("Model Architecture Fit section of the Neuron documentation"),wg=r("."),ip=d(),Le=s("h4"),jt=s("a"),tl=s("span"),u(es.$$.fragment),Eg=d(),ol=s("span"),$g=r("Dependencies"),pp=d(),Vs=s("p"),bg=r("Using AWS Neuron to convert models requires the following dependencies and environment:"),dp=d(),Gs=s("ul"),Xe=s("li"),yg=r("A "),ts=s("a"),xg=r("Neuron SDK environment"),kg=r(`, which comes pre-configured on `),os=s("a"),Tg=r("AWS Deep Learning AMI"),Ng=r("."),hp=d(),Fe=s("h4"),At=s("a"),sl=s("span"),u(ss.$$.fragment),Og=d(),nl=s("span"),jg=r("Converting a Model for AWS Neuron"),fp=d(),ge=s("p"),Ag=r("Using the same script as in "),ns=s("a"),Cg=r("Using TorchScript in Python"),Dg=r(` to trace a \u201CBertModel\u201D, you import `),al=s("code"),Pg=r("torch.neuron"),Sg=r(` framework extension to access the components of the Neuron SDK through a Python API.`),cp=d(),u(as.$$.fragment),up=d(),Js=s("p"),qg=r("And only modify the tracing line of code"),mp=d(),Ys=s("p"),zg=r("from:"),gp=d(),u(rs.$$.fragment),_p=d(),Qs=s("p"),Ig=r("to:"),vp=d(),u(ls.$$.fragment),wp=d(),Zs=s("p"),Mg=r("This change enables Neuron SDK to trace the model and optimize it to run in Inf1 instances."),Ep=d(),Ct=s("p"),Bg=r(`To learn more about AWS Neuron SDK features, tools, example tutorials and latest updates, please see the `),is=s("a"),Lg=r("AWS NeuronSDK documentation"),Xg=r("."),this.h()},l(e){const i=pE('[data-svelte="svelte-1phssyn"]',document.head);c=n(i,"META",{name:!0,content:!0}),i.forEach(o),k=h(e),E=n(e,"H1",{class:!0});var ps=a(E);x=n(ps,"A",{id:!0,class:!0,href:!0});var rl=a(x);j=n(rl,"SPAN",{});var ll=a(j);m(y.$$.fragment,ll),ll.forEach(o),rl.forEach(o),O=h(ps),A=n(ps,"SPAN",{});var il=a(A);N=l(il,"Exporting \u{1F917} Transformers Models"),il.forEach(o),ps.forEach(o),T=h(e),q=n(e,"P",{});var pl=a(q);P=l(pl,`If you need to deploy \u{1F917} Transformers models in production environments, we recommend exporting them to a serialized format that can be loaded and executed on specialized runtimes and hardware. In this guide, we\u2019ll show you how to export \u{1F917} Transformers models in two widely used formats: ONNX and TorchScript.`),pl.forEach(o),z=h(e),X=n(e,"P",{});var ds=a(X);J=l(ds,`Once exported, a model can optimized for inference via techniques such as quantization and pruning. If you are interested in optimizing your models to run with maximum efficiency, check out the `),F=n(ds,"A",{href:!0,rel:!0});var Vg=a(F);zt=l(Vg,`\u{1F917} Optimum library`),Vg.forEach(o),we=l(ds,"."),ds.forEach(o),He=h(e),U=n(e,"H2",{class:!0});var bp=a(U);R=n(bp,"A",{id:!0,class:!0,href:!0});var Gg=a(R);Ee=n(Gg,"SPAN",{});var Jg=a(Ee);m(se.$$.fragment,Jg),Jg.forEach(o),Gg.forEach(o),$e=h(bp),C=n(bp,"SPAN",{});var Yg=a(C);hs=l(Yg,"ONNX"),Yg.forEach(o),bp.forEach(o),It=h(e),V=n(e,"P",{});var en=a(V);fs=l(en,"The "),ne=n(en,"A",{href:!0,rel:!0});var Qg=a(ne);cs=l(Qg,"ONNX (Open Neural Network eXchange)"),Qg.forEach(o),us=l(en,` project is an open standard that defines a common set of operators and a common file format to represent deep learning models in a wide variety of frameworks, including PyTorch and TensorFlow. When a model is exported to the ONNX format, these operators are used to construct a computational graph (often called an `),_n=n(en,"EM",{});var Zg=a(_n);vd=l(Zg,"intermediate representation"),Zg.forEach(o),wd=l(en,`) which represents the flow of data through the neural network.`),en.forEach(o),dl=h(e),ms=n(e,"P",{});var e_=a(ms);Ed=l(e_,`By exposing a graph with standardized operators and data types, ONNX makes it easy to switch between frameworks. For example, a model trained in PyTorch can be exported to ONNX format and then imported in TensorFlow (and vice versa).`),e_.forEach(o),hl=h(e),We=n(e,"P",{});var yp=a(We);$d=l(yp,"\u{1F917} Transformers provides a "),vn=n(yp,"CODE",{});var t_=a(vn);bd=l(t_,"transformers.onnx"),t_.forEach(o),yd=l(yp,` package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects. These configuration objects come ready made for a number of model architectures, and are designed to be easily extendable to other architectures.`),yp.forEach(o),fl=h(e),gs=n(e,"P",{});var o_=a(gs);xd=l(o_,"Ready-made configurations include the following architectures:"),o_.forEach(o),cl=h(e),$=n(e,"UL",{});var b=a($);wn=n(b,"LI",{});var s_=a(wn);kd=l(s_,"ALBERT"),s_.forEach(o),Td=h(b),En=n(b,"LI",{});var n_=a(En);Nd=l(n_,"BART"),n_.forEach(o),Od=h(b),$n=n(b,"LI",{});var a_=a($n);jd=l(a_,"BERT"),a_.forEach(o),Ad=h(b),bn=n(b,"LI",{});var r_=a(bn);Cd=l(r_,"CamemBERT"),r_.forEach(o),Dd=h(b),yn=n(b,"LI",{});var l_=a(yn);Pd=l(l_,"Data2VecText"),l_.forEach(o),Sd=h(b),xn=n(b,"LI",{});var i_=a(xn);qd=l(i_,"DistilBERT"),i_.forEach(o),zd=h(b),kn=n(b,"LI",{});var p_=a(kn);Id=l(p_,"ELECTRA"),p_.forEach(o),Md=h(b),Tn=n(b,"LI",{});var d_=a(Tn);Bd=l(d_,"GPT Neo"),d_.forEach(o),Ld=h(b),Nn=n(b,"LI",{});var h_=a(Nn);Xd=l(h_,"I-BERT"),h_.forEach(o),Fd=h(b),On=n(b,"LI",{});var f_=a(On);Rd=l(f_,"LayoutLM"),f_.forEach(o),Hd=h(b),jn=n(b,"LI",{});var c_=a(jn);Wd=l(c_,"M2M100"),c_.forEach(o),Kd=h(b),An=n(b,"LI",{});var u_=a(An);Ud=l(u_,"Marian"),u_.forEach(o),Vd=h(b),Cn=n(b,"LI",{});var m_=a(Cn);Gd=l(m_,"mBART"),m_.forEach(o),Jd=h(b),Dn=n(b,"LI",{});var g_=a(Dn);Yd=l(g_,"OpenAI GPT-2"),g_.forEach(o),Qd=h(b),Pn=n(b,"LI",{});var __=a(Pn);Zd=l(__,"PLBart"),__.forEach(o),eh=h(b),Sn=n(b,"LI",{});var v_=a(Sn);th=l(v_,"RoBERTa"),v_.forEach(o),oh=h(b),qn=n(b,"LI",{});var w_=a(qn);sh=l(w_,"T5"),w_.forEach(o),nh=h(b),zn=n(b,"LI",{});var E_=a(zn);ah=l(E_,"ViT"),E_.forEach(o),rh=h(b),In=n(b,"LI",{});var $_=a(In);lh=l($_,"XLM-RoBERTa"),$_.forEach(o),ih=h(b),Mn=n(b,"LI",{});var b_=a(Mn);ph=l(b_,"XLM-RoBERTa-XL"),b_.forEach(o),b.forEach(o),ul=h(e),_s=n(e,"P",{});var y_=a(_s);dh=l(y_,"In the next two sections, we\u2019ll show you how to:"),y_.forEach(o),ml=h(e),Ke=n(e,"UL",{});var xp=a(Ke);Mt=n(xp,"LI",{});var kp=a(Mt);hh=l(kp,"Export a supported model using the "),Bn=n(kp,"CODE",{});var x_=a(Bn);fh=l(x_,"transformers.onnx"),x_.forEach(o),ch=l(kp," package."),kp.forEach(o),uh=h(xp),Ln=n(xp,"LI",{});var k_=a(Ln);mh=l(k_,"Export a custom model for an unsupported architecture."),k_.forEach(o),xp.forEach(o),gl=h(e),be=n(e,"H3",{class:!0});var Tp=a(be);Ue=n(Tp,"A",{id:!0,class:!0,href:!0});var T_=a(Ue);Xn=n(T_,"SPAN",{});var N_=a(Xn);m(Bt.$$.fragment,N_),N_.forEach(o),T_.forEach(o),gh=h(Tp),Fn=n(Tp,"SPAN",{});var O_=a(Fn);_h=l(O_,"Exporting a model to ONNX"),O_.forEach(o),Tp.forEach(o),_l=h(e),vs=n(e,"P",{});var j_=a(vs);vh=l(j_,`To export a \u{1F917} Transformers model to ONNX, you\u2019ll first need to install some extra dependencies:`),j_.forEach(o),vl=h(e),m(Lt.$$.fragment,e),wl=h(e),Ve=n(e,"P",{});var Np=a(Ve);wh=l(Np,"The "),Rn=n(Np,"CODE",{});var A_=a(Rn);Eh=l(A_,"transformers.onnx"),A_.forEach(o),$h=l(Np," package can then be used as a Python module:"),Np.forEach(o),El=h(e),m(Xt.$$.fragment,e),$l=h(e),ws=n(e,"P",{});var C_=a(ws);bh=l(C_,"Exporting a checkpoint using a ready-made configuration can be done as follows:"),C_.forEach(o),bl=h(e),m(Ft.$$.fragment,e),yl=h(e),Es=n(e,"P",{});var D_=a(Es);yh=l(D_,"which should show the following logs:"),D_.forEach(o),xl=h(e),m(Rt.$$.fragment,e),kl=h(e),ae=n(e,"P",{});var tn=a(ae);xh=l(tn,"This exports an ONNX graph of the checkpoint defined by the "),Hn=n(tn,"CODE",{});var P_=a(Hn);kh=l(P_,"--model"),P_.forEach(o),Th=l(tn,` argument. In this example it is `),Wn=n(tn,"CODE",{});var S_=a(Wn);Nh=l(S_,"distilbert-base-uncased"),S_.forEach(o),Oh=l(tn,`, but it can be any checkpoint on the Hugging Face Hub or one that\u2019s stored locally.`),tn.forEach(o),Tl=h(e),Y=n(e,"P",{});var Dt=a(Y);jh=l(Dt,"The resulting "),Kn=n(Dt,"CODE",{});var q_=a(Kn);Ah=l(q_,"model.onnx"),q_.forEach(o),Ch=l(Dt," file can then be run on one of the "),Ht=n(Dt,"A",{href:!0,rel:!0});var z_=a(Ht);Dh=l(z_,`many accelerators`),z_.forEach(o),Ph=l(Dt,` that support the ONNX standard. For example, we can load and run the model with `),Wt=n(Dt,"A",{href:!0,rel:!0});var I_=a(Wt);Sh=l(I_,`ONNX Runtime`),I_.forEach(o),qh=l(Dt," as follows:"),Dt.forEach(o),Nl=h(e),m(Kt.$$.fragment,e),Ol=h(e),Ge=n(e,"P",{});var Op=a(Ge);zh=l(Op,"The required output names (i.e. "),Un=n(Op,"CODE",{});var M_=a(Un);Ih=l(M_,'["last_hidden_state"]'),M_.forEach(o),Mh=l(Op,`) can be obtained by taking a look at the ONNX configuration of each model. For example, for DistilBERT we have:`),Op.forEach(o),jl=h(e),m(Ut.$$.fragment,e),Al=h(e),Je=n(e,"P",{});var jp=a(Je);Bh=l(jp,`The process is identical for TensorFlow checkpoints on the Hub. For example, we can export a pure TensorFlow checkpoint from the `),Vt=n(jp,"A",{href:!0,rel:!0});var B_=a(Vt);Lh=l(B_,`Keras organization`),B_.forEach(o),Xh=l(jp," as follows:"),jp.forEach(o),Cl=h(e),m(Gt.$$.fragment,e),Dl=h(e),$s=n(e,"P",{});var L_=a($s);Fh=l(L_,`To export a model that\u2019s stored locally, you\u2019ll need to have the model\u2019s weights and tokenizer files stored in a directory. For example, we can load and save a checkpoint as follows:`),L_.forEach(o),Pl=h(e),m(Jt.$$.fragment,e),Sl=h(e),re=n(e,"P",{});var on=a(re);Rh=l(on,"Once the checkpoint is saved, we can export it to ONNX by pointing the "),Vn=n(on,"CODE",{});var X_=a(Vn);Hh=l(X_,"--model"),X_.forEach(o),Wh=l(on,` argument of the `),Gn=n(on,"CODE",{});var F_=a(Gn);Kh=l(F_,"transformers.onnx"),F_.forEach(o),Uh=l(on," package to the desired directory:"),on.forEach(o),ql=h(e),m(Yt.$$.fragment,e),zl=h(e),ye=n(e,"H3",{class:!0});var Ap=a(ye);Ye=n(Ap,"A",{id:!0,class:!0,href:!0});var R_=a(Ye);Jn=n(R_,"SPAN",{});var H_=a(Jn);m(Qt.$$.fragment,H_),H_.forEach(o),R_.forEach(o),Vh=h(Ap),Yn=n(Ap,"SPAN",{});var W_=a(Yn);Gh=l(W_,"Selecting features for different model topologies"),W_.forEach(o),Ap.forEach(o),Il=h(e),Qe=n(e,"P",{});var Cp=a(Qe);Jh=l(Cp,"Each ready-made configuration comes with a set of "),Qn=n(Cp,"EM",{});var K_=a(Qn);Yh=l(K_,"features"),K_.forEach(o),Qh=l(Cp,` that enable you to export models for different types of topologies or tasks. As shown in the table below, each feature is associated with a different auto class:`),Cp.forEach(o),Ml=h(e),Ze=n(e,"TABLE",{});var Dp=a(Ze);Zn=n(Dp,"THEAD",{});var U_=a(Zn);Zt=n(U_,"TR",{});var Pp=a(Zt);ea=n(Pp,"TH",{});var V_=a(ea);Zh=l(V_,"Feature"),V_.forEach(o),ef=h(Pp),ta=n(Pp,"TH",{});var G_=a(ta);tf=l(G_,"Auto Class"),G_.forEach(o),Pp.forEach(o),U_.forEach(o),of=h(Dp),I=n(Dp,"TBODY",{});var K=a(I);eo=n(K,"TR",{});var Sp=a(eo);to=n(Sp,"TD",{});var qp=a(to);oa=n(qp,"CODE",{});var J_=a(oa);sf=l(J_,"causal-lm"),J_.forEach(o),nf=l(qp,", "),sa=n(qp,"CODE",{});var Y_=a(sa);af=l(Y_,"causal-lm-with-past"),Y_.forEach(o),qp.forEach(o),rf=h(Sp),na=n(Sp,"TD",{});var Q_=a(na);aa=n(Q_,"CODE",{});var Z_=a(aa);lf=l(Z_,"AutoModelForCausalLM"),Z_.forEach(o),Q_.forEach(o),Sp.forEach(o),pf=h(K),oo=n(K,"TR",{});var zp=a(oo);so=n(zp,"TD",{});var Ip=a(so);ra=n(Ip,"CODE",{});var ev=a(ra);df=l(ev,"default"),ev.forEach(o),hf=l(Ip,", "),la=n(Ip,"CODE",{});var tv=a(la);ff=l(tv,"default-with-past"),tv.forEach(o),Ip.forEach(o),cf=h(zp),ia=n(zp,"TD",{});var ov=a(ia);pa=n(ov,"CODE",{});var sv=a(pa);uf=l(sv,"AutoModel"),sv.forEach(o),ov.forEach(o),zp.forEach(o),mf=h(K),no=n(K,"TR",{});var Mp=a(no);da=n(Mp,"TD",{});var nv=a(da);ha=n(nv,"CODE",{});var av=a(ha);gf=l(av,"masked-lm"),av.forEach(o),nv.forEach(o),_f=h(Mp),fa=n(Mp,"TD",{});var rv=a(fa);ca=n(rv,"CODE",{});var lv=a(ca);vf=l(lv,"AutoModelForMaskedLM"),lv.forEach(o),rv.forEach(o),Mp.forEach(o),wf=h(K),ao=n(K,"TR",{});var Bp=a(ao);ua=n(Bp,"TD",{});var iv=a(ua);ma=n(iv,"CODE",{});var pv=a(ma);Ef=l(pv,"question-answering"),pv.forEach(o),iv.forEach(o),$f=h(Bp),ga=n(Bp,"TD",{});var dv=a(ga);_a=n(dv,"CODE",{});var hv=a(_a);bf=l(hv,"AutoModelForQuestionAnswering"),hv.forEach(o),dv.forEach(o),Bp.forEach(o),yf=h(K),ro=n(K,"TR",{});var Lp=a(ro);lo=n(Lp,"TD",{});var Xp=a(lo);va=n(Xp,"CODE",{});var fv=a(va);xf=l(fv,"seq2seq-lm"),fv.forEach(o),kf=l(Xp,", "),wa=n(Xp,"CODE",{});var cv=a(wa);Tf=l(cv,"seq2seq-lm-with-past"),cv.forEach(o),Xp.forEach(o),Nf=h(Lp),Ea=n(Lp,"TD",{});var uv=a(Ea);$a=n(uv,"CODE",{});var mv=a($a);Of=l(mv,"AutoModelForSeq2SeqLM"),mv.forEach(o),uv.forEach(o),Lp.forEach(o),jf=h(K),io=n(K,"TR",{});var Fp=a(io);ba=n(Fp,"TD",{});var gv=a(ba);ya=n(gv,"CODE",{});var _v=a(ya);Af=l(_v,"sequence-classification"),_v.forEach(o),gv.forEach(o),Cf=h(Fp),xa=n(Fp,"TD",{});var vv=a(xa);ka=n(vv,"CODE",{});var wv=a(ka);Df=l(wv,"AutoModelForSequenceClassification"),wv.forEach(o),vv.forEach(o),Fp.forEach(o),Pf=h(K),po=n(K,"TR",{});var Rp=a(po);Ta=n(Rp,"TD",{});var Ev=a(Ta);Na=n(Ev,"CODE",{});var $v=a(Na);Sf=l($v,"token-classification"),$v.forEach(o),Ev.forEach(o),qf=h(Rp),Oa=n(Rp,"TD",{});var bv=a(Oa);ja=n(bv,"CODE",{});var yv=a(ja);zf=l(yv,"AutoModelForTokenClassification"),yv.forEach(o),bv.forEach(o),Rp.forEach(o),K.forEach(o),Dp.forEach(o),Bl=h(e),et=n(e,"P",{});var Hp=a(et);If=l(Hp,`For each configuration, you can find the list of supported features via the `),Aa=n(Hp,"CODE",{});var xv=a(Aa);Mf=l(xv,"FeaturesManager"),xv.forEach(o),Bf=l(Hp,". For example, for DistilBERT we have:"),Hp.forEach(o),Ll=h(e),m(ho.$$.fragment,e),Xl=h(e),le=n(e,"P",{});var sn=a(le);Lf=l(sn,"You can then pass one of these features to the "),Ca=n(sn,"CODE",{});var kv=a(Ca);Xf=l(kv,"--feature"),kv.forEach(o),Ff=l(sn,` argument in the `),Da=n(sn,"CODE",{});var Tv=a(Da);Rf=l(Tv,"transformers.onnx"),Tv.forEach(o),Hf=l(sn,` package. For example, to export a text-classification model we can pick a fine-tuned model from the Hub and run:`),sn.forEach(o),Fl=h(e),m(fo.$$.fragment,e),Rl=h(e),bs=n(e,"P",{});var Nv=a(bs);Wf=l(Nv,"which will display the following logs:"),Nv.forEach(o),Hl=h(e),m(co.$$.fragment,e),Wl=h(e),Q=n(e,"P",{});var Pt=a(Q);Kf=l(Pt,`Notice that in this case, the output names from the fine-tuned model are `),Pa=n(Pt,"CODE",{});var Ov=a(Pa);Uf=l(Ov,"logits"),Ov.forEach(o),Vf=l(Pt," instead of the "),Sa=n(Pt,"CODE",{});var jv=a(Sa);Gf=l(jv,"last_hidden_state"),jv.forEach(o),Jf=l(Pt,` we saw with the `),qa=n(Pt,"CODE",{});var Av=a(qa);Yf=l(Av,"distilbert-base-uncased"),Av.forEach(o),Qf=l(Pt,` checkpoint earlier. This is expected since the fine-tuned model has a sequence classification head.`),Pt.forEach(o),Kl=h(e),m(tt.$$.fragment,e),Ul=h(e),xe=n(e,"H3",{class:!0});var Wp=a(xe);ot=n(Wp,"A",{id:!0,class:!0,href:!0});var Cv=a(ot);za=n(Cv,"SPAN",{});var Dv=a(za);m(uo.$$.fragment,Dv),Dv.forEach(o),Cv.forEach(o),Zf=h(Wp),Ia=n(Wp,"SPAN",{});var Pv=a(Ia);ec=l(Pv,"Exporting a model for an unsupported architecture"),Pv.forEach(o),Wp.forEach(o),Vl=h(e),ys=n(e,"P",{});var Sv=a(ys);tc=l(Sv,`If you wish to export a model whose architecture is not natively supported by the library, there are three main steps to follow:`),Sv.forEach(o),Gl=h(e),ie=n(e,"OL",{});var nn=a(ie);Ma=n(nn,"LI",{});var qv=a(Ma);oc=l(qv,"Implement a custom ONNX configuration."),qv.forEach(o),sc=h(nn),Ba=n(nn,"LI",{});var zv=a(Ba);nc=l(zv,"Export the model to ONNX."),zv.forEach(o),ac=h(nn),La=n(nn,"LI",{});var Iv=a(La);rc=l(Iv,"Validate the outputs of the PyTorch and exported models."),Iv.forEach(o),nn.forEach(o),Jl=h(e),xs=n(e,"P",{});var Mv=a(xs);lc=l(Mv,`In this section, we\u2019ll look at how DistilBERT was implemented to show what\u2019s involved with each step.`),Mv.forEach(o),Yl=h(e),ke=n(e,"H4",{class:!0});var Kp=a(ke);st=n(Kp,"A",{id:!0,class:!0,href:!0});var Bv=a(st);Xa=n(Bv,"SPAN",{});var Lv=a(Xa);m(mo.$$.fragment,Lv),Lv.forEach(o),Bv.forEach(o),ic=h(Kp),Fa=n(Kp,"SPAN",{});var Xv=a(Fa);pc=l(Xv,"Implementing a custom ONNX configuration"),Xv.forEach(o),Kp.forEach(o),Ql=h(e),ks=n(e,"P",{});var Fv=a(ks);dc=l(Fv,`Let\u2019s start with the ONNX configuration object. We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export:`),Fv.forEach(o),Zl=h(e),pe=n(e,"UL",{});var an=a(pe);Ts=n(an,"LI",{});var Fg=a(Ts);hc=l(Fg,"Encoder-based models inherit from "),Ns=n(Fg,"A",{href:!0});var Rv=a(Ns);fc=l(Rv,"OnnxConfig"),Rv.forEach(o),Fg.forEach(o),cc=h(an),Os=n(an,"LI",{});var Rg=a(Os);uc=l(Rg,"Decoder-based models inherit from "),js=n(Rg,"A",{href:!0});var Hv=a(js);mc=l(Hv,"OnnxConfigWithPast"),Hv.forEach(o),Rg.forEach(o),gc=h(an),As=n(an,"LI",{});var Hg=a(As);_c=l(Hg,"Encoder-decoder models inherit from "),Cs=n(Hg,"A",{href:!0});var Wv=a(Cs);vc=l(Wv,"OnnxSeq2SeqConfigWithPast"),Wv.forEach(o),Hg.forEach(o),an.forEach(o),ei=h(e),m(nt.$$.fragment,e),ti=h(e),at=n(e,"P",{});var Up=a(at);wc=l(Up,`Since DistilBERT is an encoder-based model, its configuration inherits from `),Ra=n(Up,"CODE",{});var Kv=a(Ra);Ec=l(Kv,"OnnxConfig"),Kv.forEach(o),$c=l(Up,":"),Up.forEach(o),oi=h(e),m(go.$$.fragment,e),si=h(e),H=n(e,"P",{});var _e=a(H);bc=l(_e,"Every configuration object must implement the "),Ha=n(_e,"CODE",{});var Uv=a(Ha);yc=l(Uv,"inputs"),Uv.forEach(o),xc=l(_e,` property and return a mapping, where each key corresponds to an expected input, and each value indicates the axis of that input. For DistilBERT, we can see that two inputs are required: `),Wa=n(_e,"CODE",{});var Vv=a(Wa);kc=l(Vv,"input_ids"),Vv.forEach(o),Tc=l(_e," and "),Ka=n(_e,"CODE",{});var Gv=a(Ka);Nc=l(Gv,"attention_mask"),Gv.forEach(o),Oc=l(_e,`. These inputs have the same shape of `),Ua=n(_e,"CODE",{});var Jv=a(Ua);jc=l(Jv,"(batch_size, sequence_length)"),Jv.forEach(o),Ac=l(_e,` which is why we see the same axes used in the configuration.`),_e.forEach(o),ni=h(e),m(rt.$$.fragment,e),ai=h(e),Ds=n(e,"P",{});var Yv=a(Ds);Cc=l(Yv,`Once you have implemented an ONNX configuration, you can instantiate it by providing the base model\u2019s configuration as follows:`),Yv.forEach(o),ri=h(e),m(_o.$$.fragment,e),li=h(e),Ps=n(e,"P",{});var Qv=a(Ps);Dc=l(Qv,`The resulting object has several useful properties. For example you can view the ONNX operator set that will be used during the export:`),Qv.forEach(o),ii=h(e),m(vo.$$.fragment,e),pi=h(e),Ss=n(e,"P",{});var Zv=a(Ss);Pc=l(Zv,"You can also view the outputs associated with the model as follows:"),Zv.forEach(o),di=h(e),m(wo.$$.fragment,e),hi=h(e),W=n(e,"P",{});var ve=a(W);Sc=l(ve,`Notice that the outputs property follows the same structure as the inputs; it returns an `),Va=n(ve,"CODE",{});var ew=a(Va);qc=l(ew,"OrderedDict"),ew.forEach(o),zc=l(ve,` of named outputs and their shapes. The output structure is linked to the choice of feature that the configuration is initialised with. By default, the ONNX configuration is initialized with the `),Ga=n(ve,"CODE",{});var tw=a(Ga);Ic=l(tw,"default"),tw.forEach(o),Mc=l(ve,` feature that corresponds to exporting a model loaded with the `),Ja=n(ve,"CODE",{});var ow=a(Ja);Bc=l(ow,"AutoModel"),ow.forEach(o),Lc=l(ve,` class. If you want to export a different model topology, just provide a different feature to the `),Ya=n(ve,"CODE",{});var sw=a(Ya);Xc=l(sw,"task"),sw.forEach(o),Fc=l(ve,` argument when you initialize the ONNX configuration. For example, if we wished to export DistilBERT with a sequence classification head, we could use:`),ve.forEach(o),fi=h(e),m(Eo.$$.fragment,e),ci=h(e),m(lt.$$.fragment,e),ui=h(e),Te=n(e,"H4",{class:!0});var Vp=a(Te);it=n(Vp,"A",{id:!0,class:!0,href:!0});var nw=a(it);Qa=n(nw,"SPAN",{});var aw=a(Qa);m($o.$$.fragment,aw),aw.forEach(o),nw.forEach(o),Rc=h(Vp),Za=n(Vp,"SPAN",{});var rw=a(Za);Hc=l(rw,"Exporting the model"),rw.forEach(o),Vp.forEach(o),mi=h(e),de=n(e,"P",{});var rn=a(de);Wc=l(rn,`Once you have implemented the ONNX configuration, the next step is to export the model. Here we can use the `),er=n(rn,"CODE",{});var lw=a(er);Kc=l(lw,"export()"),lw.forEach(o),Uc=l(rn,` function provided by the `),tr=n(rn,"CODE",{});var iw=a(tr);Vc=l(iw,"transformers.onnx"),iw.forEach(o),Gc=l(rn,` package. This function expects the ONNX configuration, along with the base model and tokenizer, and the path to save the exported file:`),rn.forEach(o),gi=h(e),m(bo.$$.fragment,e),_i=h(e),M=n(e,"P",{});var te=a(M);Jc=l(te,"The "),or=n(te,"CODE",{});var pw=a(or);Yc=l(pw,"onnx_inputs"),pw.forEach(o),Qc=l(te," and "),sr=n(te,"CODE",{});var dw=a(sr);Zc=l(dw,"onnx_outputs"),dw.forEach(o),eu=l(te," returned by the "),nr=n(te,"CODE",{});var hw=a(nr);tu=l(hw,"export()"),hw.forEach(o),ou=l(te,` function are lists of the keys defined in the `),ar=n(te,"CODE",{});var fw=a(ar);su=l(fw,"inputs"),fw.forEach(o),nu=l(te," and "),rr=n(te,"CODE",{});var cw=a(rr);au=l(cw,"outputs"),cw.forEach(o),ru=l(te,` properties of the configuration. Once the model is exported, you can test that the model is well formed as follows:`),te.forEach(o),vi=h(e),m(yo.$$.fragment,e),wi=h(e),m(pt.$$.fragment,e),Ei=h(e),Ne=n(e,"H4",{class:!0});var Gp=a(Ne);dt=n(Gp,"A",{id:!0,class:!0,href:!0});var uw=a(dt);lr=n(uw,"SPAN",{});var mw=a(lr);m(xo.$$.fragment,mw),mw.forEach(o),uw.forEach(o),lu=h(Gp),ir=n(Gp,"SPAN",{});var gw=a(ir);iu=l(gw,"Validating the model outputs"),gw.forEach(o),Gp.forEach(o),$i=h(e),he=n(e,"P",{});var ln=a(he);pu=l(ln,`The final step is to validate that the outputs from the base and exported model agree within some absolute tolerance. Here we can use the `),pr=n(ln,"CODE",{});var _w=a(pr);du=l(_w,"validate_model_outputs()"),_w.forEach(o),hu=l(ln," function provided by the "),dr=n(ln,"CODE",{});var vw=a(dr);fu=l(vw,"transformers.onnx"),vw.forEach(o),cu=l(ln,` package as follows:`),ln.forEach(o),bi=h(e),m(ko.$$.fragment,e),yi=h(e),ht=n(e,"P",{});var Jp=a(ht);uu=l(Jp,"This function uses the "),hr=n(Jp,"CODE",{});var ww=a(hr);mu=l(ww,"OnnxConfig.generate_dummy_inputs()"),ww.forEach(o),gu=l(Jp,` method to generate inputs for the base and exported model, and the absolute tolerance can be defined in the configuration. We generally find numerical agreement in the 1e-6 to 1e-4 range, although anything smaller than 1e-3 is likely to be OK.`),Jp.forEach(o),xi=h(e),Oe=n(e,"H3",{class:!0});var Yp=a(Oe);ft=n(Yp,"A",{id:!0,class:!0,href:!0});var Ew=a(ft);fr=n(Ew,"SPAN",{});var $w=a(fr);m(To.$$.fragment,$w),$w.forEach(o),Ew.forEach(o),_u=h(Yp),cr=n(Yp,"SPAN",{});var bw=a(cr);vu=l(bw,"Contributing a new configuration to \u{1F917} Transformers"),bw.forEach(o),Yp.forEach(o),ki=h(e),qs=n(e,"P",{});var yw=a(qs);wu=l(yw,`We are looking to expand the set of ready-made configurations and welcome contributions from the community! If you would like to contribute your addition to the library, you will need to:`),yw.forEach(o),Ti=h(e),fe=n(e,"UL",{});var pn=a(fe);No=n(pn,"LI",{});var Qp=a(No);Eu=l(Qp,"Implement the ONNX configuration in the corresponding "),ur=n(Qp,"CODE",{});var xw=a(ur);$u=l(xw,"configuration_<model_name>.py"),xw.forEach(o),bu=l(Qp,` file`),Qp.forEach(o),yu=h(pn),zs=n(pn,"LI",{});var Wg=a(zs);xu=l(Wg,"Include the model architecture and corresponding features in "),mr=n(Wg,"CODE",{});var kw=a(mr);ku=l(kw,"FeatureManager"),kw.forEach(o),Wg.forEach(o),Tu=h(pn),Is=n(pn,"LI",{});var Kg=a(Is);Nu=l(Kg,"Add your model architecture to the tests in "),gr=n(Kg,"CODE",{});var Tw=a(gr);Ou=l(Tw,"test_onnx_v2.py"),Tw.forEach(o),Kg.forEach(o),pn.forEach(o),Ni=h(e),ct=n(e,"P",{});var Zp=a(ct);ju=l(Zp,"Check out how the configuration for "),Oo=n(Zp,"A",{href:!0,rel:!0});var Nw=a(Oo);Au=l(Nw,`IBERT was contributed`),Nw.forEach(o),Cu=l(Zp,` to get an idea of what\u2019s involved.`),Zp.forEach(o),Oi=h(e),je=n(e,"H2",{class:!0});var ed=a(je);ut=n(ed,"A",{id:!0,class:!0,href:!0});var Ow=a(ut);_r=n(Ow,"SPAN",{});var jw=a(_r);m(jo.$$.fragment,jw),jw.forEach(o),Ow.forEach(o),Du=h(ed),vr=n(ed,"SPAN",{});var Aw=a(vr);Pu=l(Aw,"TorchScript"),Aw.forEach(o),ed.forEach(o),ji=h(e),m(mt.$$.fragment,e),Ai=h(e),gt=n(e,"P",{});var td=a(gt);Su=l(td,`According to Pytorch\u2019s documentation: \u201CTorchScript is a way to create serializable and optimizable models from PyTorch code\u201D. Pytorch\u2019s two modules `),Ao=n(td,"A",{href:!0,rel:!0});var Cw=a(Ao);qu=l(Cw,"JIT and TRACE"),Cw.forEach(o),zu=l(td,` allow the developer to export their model to be re-used in other programs, such as efficiency-oriented C++ programs.`),td.forEach(o),Ci=h(e),Ms=n(e,"P",{});var Dw=a(Ms);Iu=l(Dw,`We have provided an interface that allows the export of \u{1F917} Transformers models to TorchScript so that they can be reused in a different environment than a Pytorch-based python program. Here we explain how to export and use our models using TorchScript.`),Dw.forEach(o),Di=h(e),Bs=n(e,"P",{});var Pw=a(Bs);Mu=l(Pw,"Exporting a model requires two things:"),Pw.forEach(o),Pi=h(e),_t=n(e,"UL",{});var od=a(_t);wr=n(od,"LI",{});var Sw=a(wr);Bu=l(Sw,"a forward pass with dummy inputs."),Sw.forEach(o),Lu=h(od),Co=n(od,"LI",{});var sd=a(Co);Xu=l(sd,"model instantiation with the "),Er=n(sd,"CODE",{});var qw=a(Er);Fu=l(qw,"torchscript"),qw.forEach(o),Ru=l(sd," flag."),sd.forEach(o),od.forEach(o),Si=h(e),Ls=n(e,"P",{});var zw=a(Ls);Hu=l(zw,"These necessities imply several things developers should be careful about. These are detailed below."),zw.forEach(o),qi=h(e),Ae=n(e,"H3",{class:!0});var nd=a(Ae);vt=n(nd,"A",{id:!0,class:!0,href:!0});var Iw=a(vt);$r=n(Iw,"SPAN",{});var Mw=a($r);m(Do.$$.fragment,Mw),Mw.forEach(o),Iw.forEach(o),Wu=h(nd),br=n(nd,"SPAN",{});var Bw=a(br);Ku=l(Bw,"Implications"),Bw.forEach(o),nd.forEach(o),zi=h(e),Ce=n(e,"H3",{class:!0});var ad=a(Ce);wt=n(ad,"A",{id:!0,class:!0,href:!0});var Lw=a(wt);yr=n(Lw,"SPAN",{});var Xw=a(yr);m(Po.$$.fragment,Xw),Xw.forEach(o),Lw.forEach(o),Uu=h(ad),xr=n(ad,"SPAN",{});var Fw=a(xr);Vu=l(Fw,"TorchScript flag and tied weights"),Fw.forEach(o),ad.forEach(o),Ii=h(e),ce=n(e,"P",{});var dn=a(ce);Gu=l(dn,`This flag is necessary because most of the language models in this repository have tied weights between their `),kr=n(dn,"CODE",{});var Rw=a(kr);Ju=l(Rw,"Embedding"),Rw.forEach(o),Yu=l(dn," layer and their "),Tr=n(dn,"CODE",{});var Hw=a(Tr);Qu=l(Hw,"Decoding"),Hw.forEach(o),Zu=l(dn,` layer. TorchScript does not allow the export of models that have tied weights, therefore it is necessary to untie and clone the weights beforehand.`),dn.forEach(o),Mi=h(e),Z=n(e,"P",{});var St=a(Z);em=l(St,"This implies that models instantiated with the "),Nr=n(St,"CODE",{});var Ww=a(Nr);tm=l(Ww,"torchscript"),Ww.forEach(o),om=l(St," flag have their "),Or=n(St,"CODE",{});var Kw=a(Or);sm=l(Kw,"Embedding"),Kw.forEach(o),nm=l(St," layer and "),jr=n(St,"CODE",{});var Uw=a(jr);am=l(Uw,"Decoding"),Uw.forEach(o),rm=l(St,` layer separate, which means that they should not be trained down the line. Training would de-synchronize the two layers, leading to unexpected results.`),St.forEach(o),Bi=h(e),Et=n(e,"P",{});var rd=a(Et);lm=l(rd,`This is not the case for models that do not have a Language Model head, as those do not have tied weights. These models can be safely exported without the `),Ar=n(rd,"CODE",{});var Vw=a(Ar);im=l(Vw,"torchscript"),Vw.forEach(o),pm=l(rd," flag."),rd.forEach(o),Li=h(e),De=n(e,"H3",{class:!0});var ld=a(De);$t=n(ld,"A",{id:!0,class:!0,href:!0});var Gw=a($t);Cr=n(Gw,"SPAN",{});var Jw=a(Cr);m(So.$$.fragment,Jw),Jw.forEach(o),Gw.forEach(o),dm=h(ld),Dr=n(ld,"SPAN",{});var Yw=a(Dr);hm=l(Yw,"Dummy inputs and standard lengths"),Yw.forEach(o),ld.forEach(o),Xi=h(e),Xs=n(e,"P",{});var Qw=a(Xs);fm=l(Qw,`The dummy inputs are used to do a model forward pass. While the inputs\u2019 values are propagating through the layers, Pytorch keeps track of the different operations executed on each tensor. These recorded operations are then used to create the \u201Ctrace\u201D of the model.`),Qw.forEach(o),Fi=h(e),Fs=n(e,"P",{});var Zw=a(Fs);cm=l(Zw,`The trace is created relatively to the inputs\u2019 dimensions. It is therefore constrained by the dimensions of the dummy input, and will not work for any other sequence length or batch size. When trying with a different size, an error such as:`),Zw.forEach(o),Ri=h(e),Rs=n(e,"P",{});var e1=a(Rs);Pr=n(e1,"CODE",{});var t1=a(Pr);um=l(t1,"The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2"),t1.forEach(o),e1.forEach(o),Hi=h(e),Hs=n(e,"P",{});var o1=a(Hs);mm=l(o1,`will be raised. It is therefore recommended to trace the model with a dummy input size at least as large as the largest input that will be fed to the model during inference. Padding can be performed to fill the missing values. As the model will have been traced with a large input size however, the dimensions of the different matrix will be large as well, resulting in more calculations.`),o1.forEach(o),Wi=h(e),Ws=n(e,"P",{});var s1=a(Ws);gm=l(s1,`It is recommended to be careful of the total number of operations done on each input and to follow performance closely when exporting varying sequence-length models.`),s1.forEach(o),Ki=h(e),Pe=n(e,"H3",{class:!0});var id=a(Pe);bt=n(id,"A",{id:!0,class:!0,href:!0});var n1=a(bt);Sr=n(n1,"SPAN",{});var a1=a(Sr);m(qo.$$.fragment,a1),a1.forEach(o),n1.forEach(o),_m=h(id),qr=n(id,"SPAN",{});var r1=a(qr);vm=l(r1,"Using TorchScript in Python"),r1.forEach(o),id.forEach(o),Ui=h(e),Ks=n(e,"P",{});var l1=a(Ks);wm=l(l1,"Below is an example, showing how to save, load models as well as how to use the trace for inference."),l1.forEach(o),Vi=h(e),Se=n(e,"H4",{class:!0});var pd=a(Se);yt=n(pd,"A",{id:!0,class:!0,href:!0});var i1=a(yt);zr=n(i1,"SPAN",{});var p1=a(zr);m(zo.$$.fragment,p1),p1.forEach(o),i1.forEach(o),Em=h(pd),Ir=n(pd,"SPAN",{});var d1=a(Ir);$m=l(d1,"Saving a model"),d1.forEach(o),pd.forEach(o),Gi=h(e),G=n(e,"P",{});var Re=a(G);bm=l(Re,"This snippet shows how to use TorchScript to export a "),Mr=n(Re,"CODE",{});var h1=a(Mr);ym=l(h1,"BertModel"),h1.forEach(o),xm=l(Re,". Here the "),Br=n(Re,"CODE",{});var f1=a(Br);km=l(f1,"BertModel"),f1.forEach(o),Tm=l(Re,` is instantiated according to a `),Lr=n(Re,"CODE",{});var c1=a(Lr);Nm=l(c1,"BertConfig"),c1.forEach(o),Om=l(Re," class and then saved to disk under the filename "),Xr=n(Re,"CODE",{});var u1=a(Xr);jm=l(u1,"traced_bert.pt"),u1.forEach(o),Re.forEach(o),Ji=h(e),m(Io.$$.fragment,e),Yi=h(e),qe=n(e,"H4",{class:!0});var dd=a(qe);xt=n(dd,"A",{id:!0,class:!0,href:!0});var m1=a(xt);Fr=n(m1,"SPAN",{});var g1=a(Fr);m(Mo.$$.fragment,g1),g1.forEach(o),m1.forEach(o),Am=h(dd),Rr=n(dd,"SPAN",{});var _1=a(Rr);Cm=l(_1,"Loading a model"),_1.forEach(o),dd.forEach(o),Qi=h(e),ee=n(e,"P",{});var qt=a(ee);Dm=l(qt,"This snippet shows how to load the "),Hr=n(qt,"CODE",{});var v1=a(Hr);Pm=l(v1,"BertModel"),v1.forEach(o),Sm=l(qt," that was previously saved to disk under the name "),Wr=n(qt,"CODE",{});var w1=a(Wr);qm=l(w1,"traced_bert.pt"),w1.forEach(o),zm=l(qt,`. We are re-using the previously initialised `),Kr=n(qt,"CODE",{});var E1=a(Kr);Im=l(E1,"dummy_input"),E1.forEach(o),Mm=l(qt,"."),qt.forEach(o),Zi=h(e),m(Bo.$$.fragment,e),ep=h(e),ze=n(e,"H4",{class:!0});var hd=a(ze);kt=n(hd,"A",{id:!0,class:!0,href:!0});var $1=a(kt);Ur=n($1,"SPAN",{});var b1=a(Ur);m(Lo.$$.fragment,b1),b1.forEach(o),$1.forEach(o),Bm=h(hd),Vr=n(hd,"SPAN",{});var y1=a(Vr);Lm=l(y1,"Using a traced model for inference"),y1.forEach(o),hd.forEach(o),tp=h(e),Tt=n(e,"P",{});var fd=a(Tt);Xm=l(fd,"Using the traced model for inference is as simple as using its "),Gr=n(fd,"CODE",{});var x1=a(Gr);Fm=l(x1,"__call__"),x1.forEach(o),Rm=l(fd," dunder method:"),fd.forEach(o),op=h(e),m(Xo.$$.fragment,e),sp=h(e),Ie=n(e,"H3",{class:!0});var cd=a(Ie);Nt=n(cd,"A",{id:!0,class:!0,href:!0});var k1=a(Nt);Jr=n(k1,"SPAN",{});var T1=a(Jr);m(Fo.$$.fragment,T1),T1.forEach(o),k1.forEach(o),Hm=h(cd),Yr=n(cd,"SPAN",{});var N1=a(Yr);Wm=l(N1,"Deploying HuggingFace TorchScript models on AWS using the Neuron SDK"),N1.forEach(o),cd.forEach(o),np=h(e),ue=n(e,"P",{});var hn=a(ue);Km=l(hn,"AWS introduced the "),Ro=n(hn,"A",{href:!0,rel:!0});var O1=a(Ro);Um=l(O1,"Amazon EC2 Inf1"),O1.forEach(o),Vm=l(hn,` instance family for low cost, high performance machine learning inference in the cloud. The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware accelerator, specializing in deep learning inferencing workloads. `),Ho=n(hn,"A",{href:!0,rel:!0});var j1=a(Ho);Gm=l(j1,"AWS Neuron"),j1.forEach(o),Jm=l(hn,` is the SDK for Inferentia that supports tracing and optimizing transformers models for deployment on Inf1. The Neuron SDK provides:`),hn.forEach(o),ap=h(e),me=n(e,"OL",{});var fn=a(me);Qr=n(fn,"LI",{});var A1=a(Qr);Ym=l(A1,"Easy-to-use API with one line of code change to trace and optimize a TorchScript model for inference in the cloud."),A1.forEach(o),Qm=h(fn),Us=n(fn,"LI",{});var Ug=a(Us);Zm=l(Ug,"Out of the box performance optimizations for "),Wo=n(Ug,"A",{href:!0,rel:!0});var C1=a(Wo);eg=l(C1,"improved cost-performance"),C1.forEach(o),Ug.forEach(o),tg=h(fn),Me=n(fn,"LI",{});var cn=a(Me);og=l(cn,"Support for HuggingFace transformers models built with either "),Ko=n(cn,"A",{href:!0,rel:!0});var D1=a(Ko);sg=l(D1,"PyTorch"),D1.forEach(o),ng=l(cn,` or `),Uo=n(cn,"A",{href:!0,rel:!0});var P1=a(Uo);ag=l(P1,"TensorFlow"),P1.forEach(o),rg=l(cn,"."),cn.forEach(o),fn.forEach(o),rp=h(e),Be=n(e,"H4",{class:!0});var ud=a(Be);Ot=n(ud,"A",{id:!0,class:!0,href:!0});var S1=a(Ot);Zr=n(S1,"SPAN",{});var q1=a(Zr);m(Vo.$$.fragment,q1),q1.forEach(o),S1.forEach(o),lg=h(ud),el=n(ud,"SPAN",{});var z1=a(el);ig=l(z1,"Implications"),z1.forEach(o),ud.forEach(o),lp=h(e),B=n(e,"P",{});var oe=a(B);pg=l(oe,"Transformers Models based on the "),Go=n(oe,"A",{href:!0,rel:!0});var I1=a(Go);dg=l(I1,"BERT (Bidirectional Encoder Representations from Transformers)"),I1.forEach(o),hg=l(oe,` architecture, or its variants such as `),Jo=n(oe,"A",{href:!0,rel:!0});var M1=a(Jo);fg=l(M1,"distilBERT"),M1.forEach(o),cg=l(oe,` and `),Yo=n(oe,"A",{href:!0,rel:!0});var B1=a(Yo);ug=l(B1,"roBERTa"),B1.forEach(o),mg=l(oe,` will run best on Inf1 for non-generative tasks such as Extractive Question Answering, Sequence Classification, Token Classification. Alternatively, text generation tasks can be adapted to run on Inf1, according to this `),Qo=n(oe,"A",{href:!0,rel:!0});var L1=a(Qo);gg=l(L1,"AWS Neuron MarianMT tutorial"),L1.forEach(o),_g=l(oe,`. More information about models that can be converted out of the box on Inferentia can be found in the `),Zo=n(oe,"A",{href:!0,rel:!0});var X1=a(Zo);vg=l(X1,"Model Architecture Fit section of the Neuron documentation"),X1.forEach(o),wg=l(oe,"."),oe.forEach(o),ip=h(e),Le=n(e,"H4",{class:!0});var md=a(Le);jt=n(md,"A",{id:!0,class:!0,href:!0});var F1=a(jt);tl=n(F1,"SPAN",{});var R1=a(tl);m(es.$$.fragment,R1),R1.forEach(o),F1.forEach(o),Eg=h(md),ol=n(md,"SPAN",{});var H1=a(ol);$g=l(H1,"Dependencies"),H1.forEach(o),md.forEach(o),pp=h(e),Vs=n(e,"P",{});var W1=a(Vs);bg=l(W1,"Using AWS Neuron to convert models requires the following dependencies and environment:"),W1.forEach(o),dp=h(e),Gs=n(e,"UL",{});var K1=a(Gs);Xe=n(K1,"LI",{});var un=a(Xe);yg=l(un,"A "),ts=n(un,"A",{href:!0,rel:!0});var U1=a(ts);xg=l(U1,"Neuron SDK environment"),U1.forEach(o),kg=l(un,`, which comes pre-configured on `),os=n(un,"A",{href:!0,rel:!0});var V1=a(os);Tg=l(V1,"AWS Deep Learning AMI"),V1.forEach(o),Ng=l(un,"."),un.forEach(o),K1.forEach(o),hp=h(e),Fe=n(e,"H4",{class:!0});var gd=a(Fe);At=n(gd,"A",{id:!0,class:!0,href:!0});var G1=a(At);sl=n(G1,"SPAN",{});var J1=a(sl);m(ss.$$.fragment,J1),J1.forEach(o),G1.forEach(o),Og=h(gd),nl=n(gd,"SPAN",{});var Y1=a(nl);jg=l(Y1,"Converting a Model for AWS Neuron"),Y1.forEach(o),gd.forEach(o),fp=h(e),ge=n(e,"P",{});var mn=a(ge);Ag=l(mn,"Using the same script as in "),ns=n(mn,"A",{href:!0,rel:!0});var Q1=a(ns);Cg=l(Q1,"Using TorchScript in Python"),Q1.forEach(o),Dg=l(mn,` to trace a \u201CBertModel\u201D, you import `),al=n(mn,"CODE",{});var Z1=a(al);Pg=l(Z1,"torch.neuron"),Z1.forEach(o),Sg=l(mn,` framework extension to access the components of the Neuron SDK through a Python API.`),mn.forEach(o),cp=h(e),m(as.$$.fragment,e),up=h(e),Js=n(e,"P",{});var eE=a(Js);qg=l(eE,"And only modify the tracing line of code"),eE.forEach(o),mp=h(e),Ys=n(e,"P",{});var tE=a(Ys);zg=l(tE,"from:"),tE.forEach(o),gp=h(e),m(rs.$$.fragment,e),_p=h(e),Qs=n(e,"P",{});var oE=a(Qs);Ig=l(oE,"to:"),oE.forEach(o),vp=h(e),m(ls.$$.fragment,e),wp=h(e),Zs=n(e,"P",{});var sE=a(Zs);Mg=l(sE,"This change enables Neuron SDK to trace the model and optimize it to run in Inf1 instances."),sE.forEach(o),Ep=h(e),Ct=n(e,"P",{});var _d=a(Ct);Bg=l(_d,`To learn more about AWS Neuron SDK features, tools, example tutorials and latest updates, please see the `),is=n(_d,"A",{href:!0,rel:!0});var nE=a(is);Lg=l(nE,"AWS NeuronSDK documentation"),nE.forEach(o),Xg=l(_d,"."),_d.forEach(o),this.h()},h(){f(c,"name","hf:doc:metadata"),f(c,"content",JSON.stringify(_E)),f(x,"id","exporting-transformers-models"),f(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(x,"href","#exporting-transformers-models"),f(E,"class","relative group"),f(F,"href","https://github.com/huggingface/optimum"),f(F,"rel","nofollow"),f(R,"id","onnx"),f(R,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(R,"href","#onnx"),f(U,"class","relative group"),f(ne,"href","http://onnx.ai"),f(ne,"rel","nofollow"),f(Ue,"id","exporting-a-model-to-onnx"),f(Ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ue,"href","#exporting-a-model-to-onnx"),f(be,"class","relative group"),f(Ht,"href","https://onnx.ai/supported-tools.html#deployModel"),f(Ht,"rel","nofollow"),f(Wt,"href","https://onnxruntime.ai/"),f(Wt,"rel","nofollow"),f(Vt,"href","https://huggingface.co/keras-io"),f(Vt,"rel","nofollow"),f(Ye,"id","selecting-features-for-different-model-topologies"),f(Ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ye,"href","#selecting-features-for-different-model-topologies"),f(ye,"class","relative group"),f(ot,"id","exporting-a-model-for-an-unsupported-architecture"),f(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ot,"href","#exporting-a-model-for-an-unsupported-architecture"),f(xe,"class","relative group"),f(st,"id","implementing-a-custom-onnx-configuration"),f(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(st,"href","#implementing-a-custom-onnx-configuration"),f(ke,"class","relative group"),f(Ns,"href","/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfig"),f(js,"href","/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfigWithPast"),f(Cs,"href","/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxSeq2SeqConfigWithPast"),f(it,"id","exporting-the-model"),f(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(it,"href","#exporting-the-model"),f(Te,"class","relative group"),f(dt,"id","validating-the-model-outputs"),f(dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(dt,"href","#validating-the-model-outputs"),f(Ne,"class","relative group"),f(ft,"id","contributing-a-new-configuration-to-transformers"),f(ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ft,"href","#contributing-a-new-configuration-to-transformers"),f(Oe,"class","relative group"),f(Oo,"href","https://github.com/huggingface/transformers/pull/14868/files"),f(Oo,"rel","nofollow"),f(ut,"id","torchscript"),f(ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ut,"href","#torchscript"),f(je,"class","relative group"),f(Ao,"href","https://pytorch.org/docs/stable/jit.html"),f(Ao,"rel","nofollow"),f(vt,"id","implications"),f(vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(vt,"href","#implications"),f(Ae,"class","relative group"),f(wt,"id","torchscript-flag-and-tied-weights"),f(wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(wt,"href","#torchscript-flag-and-tied-weights"),f(Ce,"class","relative group"),f($t,"id","dummy-inputs-and-standard-lengths"),f($t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f($t,"href","#dummy-inputs-and-standard-lengths"),f(De,"class","relative group"),f(bt,"id","using-torchscript-in-python"),f(bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(bt,"href","#using-torchscript-in-python"),f(Pe,"class","relative group"),f(yt,"id","saving-a-model"),f(yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(yt,"href","#saving-a-model"),f(Se,"class","relative group"),f(xt,"id","loading-a-model"),f(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(xt,"href","#loading-a-model"),f(qe,"class","relative group"),f(kt,"id","using-a-traced-model-for-inference"),f(kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(kt,"href","#using-a-traced-model-for-inference"),f(ze,"class","relative group"),f(Nt,"id","deploying-huggingface-torchscript-models-on-aws-using-the-neuron-sdk"),f(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Nt,"href","#deploying-huggingface-torchscript-models-on-aws-using-the-neuron-sdk"),f(Ie,"class","relative group"),f(Ro,"href","https://aws.amazon.com/ec2/instance-types/inf1/"),f(Ro,"rel","nofollow"),f(Ho,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#"),f(Ho,"rel","nofollow"),f(Wo,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/%3E"),f(Wo,"rel","nofollow"),f(Ko,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html"),f(Ko,"rel","nofollow"),f(Uo,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html"),f(Uo,"rel","nofollow"),f(Ot,"id","implications"),f(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ot,"href","#implications"),f(Be,"class","relative group"),f(Go,"href","https://huggingface.co/docs/transformers/master/model_doc/bert"),f(Go,"rel","nofollow"),f(Jo,"href","https://huggingface.co/docs/transformers/master/model_doc/distilbert"),f(Jo,"rel","nofollow"),f(Yo,"href","https://huggingface.co/docs/transformers/master/model_doc/roberta"),f(Yo,"rel","nofollow"),f(Qo,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html"),f(Qo,"rel","nofollow"),f(Zo,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia"),f(Zo,"rel","nofollow"),f(jt,"id","dependencies"),f(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(jt,"href","#dependencies"),f(Le,"class","relative group"),f(ts,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide"),f(ts,"rel","nofollow"),f(os,"href","https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html"),f(os,"rel","nofollow"),f(At,"id","converting-a-model-for-aws-neuron"),f(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(At,"href","#converting-a-model-for-aws-neuron"),f(Fe,"class","relative group"),f(ns,"href","https://huggingface.co/docs/transformers/master/en/serialization#using-torchscript-in-python"),f(ns,"rel","nofollow"),f(is,"href","https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html"),f(is,"rel","nofollow")},m(e,i){t(document.head,c),p(e,k,i),p(e,E,i),t(E,x),t(x,j),g(y,j,null),t(E,O),t(E,A),t(A,N),p(e,T,i),p(e,q,i),t(q,P),p(e,z,i),p(e,X,i),t(X,J),t(X,F),t(F,zt),t(X,we),p(e,He,i),p(e,U,i),t(U,R),t(R,Ee),g(se,Ee,null),t(U,$e),t(U,C),t(C,hs),p(e,It,i),p(e,V,i),t(V,fs),t(V,ne),t(ne,cs),t(V,us),t(V,_n),t(_n,vd),t(V,wd),p(e,dl,i),p(e,ms,i),t(ms,Ed),p(e,hl,i),p(e,We,i),t(We,$d),t(We,vn),t(vn,bd),t(We,yd),p(e,fl,i),p(e,gs,i),t(gs,xd),p(e,cl,i),p(e,$,i),t($,wn),t(wn,kd),t($,Td),t($,En),t(En,Nd),t($,Od),t($,$n),t($n,jd),t($,Ad),t($,bn),t(bn,Cd),t($,Dd),t($,yn),t(yn,Pd),t($,Sd),t($,xn),t(xn,qd),t($,zd),t($,kn),t(kn,Id),t($,Md),t($,Tn),t(Tn,Bd),t($,Ld),t($,Nn),t(Nn,Xd),t($,Fd),t($,On),t(On,Rd),t($,Hd),t($,jn),t(jn,Wd),t($,Kd),t($,An),t(An,Ud),t($,Vd),t($,Cn),t(Cn,Gd),t($,Jd),t($,Dn),t(Dn,Yd),t($,Qd),t($,Pn),t(Pn,Zd),t($,eh),t($,Sn),t(Sn,th),t($,oh),t($,qn),t(qn,sh),t($,nh),t($,zn),t(zn,ah),t($,rh),t($,In),t(In,lh),t($,ih),t($,Mn),t(Mn,ph),p(e,ul,i),p(e,_s,i),t(_s,dh),p(e,ml,i),p(e,Ke,i),t(Ke,Mt),t(Mt,hh),t(Mt,Bn),t(Bn,fh),t(Mt,ch),t(Ke,uh),t(Ke,Ln),t(Ln,mh),p(e,gl,i),p(e,be,i),t(be,Ue),t(Ue,Xn),g(Bt,Xn,null),t(be,gh),t(be,Fn),t(Fn,_h),p(e,_l,i),p(e,vs,i),t(vs,vh),p(e,vl,i),g(Lt,e,i),p(e,wl,i),p(e,Ve,i),t(Ve,wh),t(Ve,Rn),t(Rn,Eh),t(Ve,$h),p(e,El,i),g(Xt,e,i),p(e,$l,i),p(e,ws,i),t(ws,bh),p(e,bl,i),g(Ft,e,i),p(e,yl,i),p(e,Es,i),t(Es,yh),p(e,xl,i),g(Rt,e,i),p(e,kl,i),p(e,ae,i),t(ae,xh),t(ae,Hn),t(Hn,kh),t(ae,Th),t(ae,Wn),t(Wn,Nh),t(ae,Oh),p(e,Tl,i),p(e,Y,i),t(Y,jh),t(Y,Kn),t(Kn,Ah),t(Y,Ch),t(Y,Ht),t(Ht,Dh),t(Y,Ph),t(Y,Wt),t(Wt,Sh),t(Y,qh),p(e,Nl,i),g(Kt,e,i),p(e,Ol,i),p(e,Ge,i),t(Ge,zh),t(Ge,Un),t(Un,Ih),t(Ge,Mh),p(e,jl,i),g(Ut,e,i),p(e,Al,i),p(e,Je,i),t(Je,Bh),t(Je,Vt),t(Vt,Lh),t(Je,Xh),p(e,Cl,i),g(Gt,e,i),p(e,Dl,i),p(e,$s,i),t($s,Fh),p(e,Pl,i),g(Jt,e,i),p(e,Sl,i),p(e,re,i),t(re,Rh),t(re,Vn),t(Vn,Hh),t(re,Wh),t(re,Gn),t(Gn,Kh),t(re,Uh),p(e,ql,i),g(Yt,e,i),p(e,zl,i),p(e,ye,i),t(ye,Ye),t(Ye,Jn),g(Qt,Jn,null),t(ye,Vh),t(ye,Yn),t(Yn,Gh),p(e,Il,i),p(e,Qe,i),t(Qe,Jh),t(Qe,Qn),t(Qn,Yh),t(Qe,Qh),p(e,Ml,i),p(e,Ze,i),t(Ze,Zn),t(Zn,Zt),t(Zt,ea),t(ea,Zh),t(Zt,ef),t(Zt,ta),t(ta,tf),t(Ze,of),t(Ze,I),t(I,eo),t(eo,to),t(to,oa),t(oa,sf),t(to,nf),t(to,sa),t(sa,af),t(eo,rf),t(eo,na),t(na,aa),t(aa,lf),t(I,pf),t(I,oo),t(oo,so),t(so,ra),t(ra,df),t(so,hf),t(so,la),t(la,ff),t(oo,cf),t(oo,ia),t(ia,pa),t(pa,uf),t(I,mf),t(I,no),t(no,da),t(da,ha),t(ha,gf),t(no,_f),t(no,fa),t(fa,ca),t(ca,vf),t(I,wf),t(I,ao),t(ao,ua),t(ua,ma),t(ma,Ef),t(ao,$f),t(ao,ga),t(ga,_a),t(_a,bf),t(I,yf),t(I,ro),t(ro,lo),t(lo,va),t(va,xf),t(lo,kf),t(lo,wa),t(wa,Tf),t(ro,Nf),t(ro,Ea),t(Ea,$a),t($a,Of),t(I,jf),t(I,io),t(io,ba),t(ba,ya),t(ya,Af),t(io,Cf),t(io,xa),t(xa,ka),t(ka,Df),t(I,Pf),t(I,po),t(po,Ta),t(Ta,Na),t(Na,Sf),t(po,qf),t(po,Oa),t(Oa,ja),t(ja,zf),p(e,Bl,i),p(e,et,i),t(et,If),t(et,Aa),t(Aa,Mf),t(et,Bf),p(e,Ll,i),g(ho,e,i),p(e,Xl,i),p(e,le,i),t(le,Lf),t(le,Ca),t(Ca,Xf),t(le,Ff),t(le,Da),t(Da,Rf),t(le,Hf),p(e,Fl,i),g(fo,e,i),p(e,Rl,i),p(e,bs,i),t(bs,Wf),p(e,Hl,i),g(co,e,i),p(e,Wl,i),p(e,Q,i),t(Q,Kf),t(Q,Pa),t(Pa,Uf),t(Q,Vf),t(Q,Sa),t(Sa,Gf),t(Q,Jf),t(Q,qa),t(qa,Yf),t(Q,Qf),p(e,Kl,i),g(tt,e,i),p(e,Ul,i),p(e,xe,i),t(xe,ot),t(ot,za),g(uo,za,null),t(xe,Zf),t(xe,Ia),t(Ia,ec),p(e,Vl,i),p(e,ys,i),t(ys,tc),p(e,Gl,i),p(e,ie,i),t(ie,Ma),t(Ma,oc),t(ie,sc),t(ie,Ba),t(Ba,nc),t(ie,ac),t(ie,La),t(La,rc),p(e,Jl,i),p(e,xs,i),t(xs,lc),p(e,Yl,i),p(e,ke,i),t(ke,st),t(st,Xa),g(mo,Xa,null),t(ke,ic),t(ke,Fa),t(Fa,pc),p(e,Ql,i),p(e,ks,i),t(ks,dc),p(e,Zl,i),p(e,pe,i),t(pe,Ts),t(Ts,hc),t(Ts,Ns),t(Ns,fc),t(pe,cc),t(pe,Os),t(Os,uc),t(Os,js),t(js,mc),t(pe,gc),t(pe,As),t(As,_c),t(As,Cs),t(Cs,vc),p(e,ei,i),g(nt,e,i),p(e,ti,i),p(e,at,i),t(at,wc),t(at,Ra),t(Ra,Ec),t(at,$c),p(e,oi,i),g(go,e,i),p(e,si,i),p(e,H,i),t(H,bc),t(H,Ha),t(Ha,yc),t(H,xc),t(H,Wa),t(Wa,kc),t(H,Tc),t(H,Ka),t(Ka,Nc),t(H,Oc),t(H,Ua),t(Ua,jc),t(H,Ac),p(e,ni,i),g(rt,e,i),p(e,ai,i),p(e,Ds,i),t(Ds,Cc),p(e,ri,i),g(_o,e,i),p(e,li,i),p(e,Ps,i),t(Ps,Dc),p(e,ii,i),g(vo,e,i),p(e,pi,i),p(e,Ss,i),t(Ss,Pc),p(e,di,i),g(wo,e,i),p(e,hi,i),p(e,W,i),t(W,Sc),t(W,Va),t(Va,qc),t(W,zc),t(W,Ga),t(Ga,Ic),t(W,Mc),t(W,Ja),t(Ja,Bc),t(W,Lc),t(W,Ya),t(Ya,Xc),t(W,Fc),p(e,fi,i),g(Eo,e,i),p(e,ci,i),g(lt,e,i),p(e,ui,i),p(e,Te,i),t(Te,it),t(it,Qa),g($o,Qa,null),t(Te,Rc),t(Te,Za),t(Za,Hc),p(e,mi,i),p(e,de,i),t(de,Wc),t(de,er),t(er,Kc),t(de,Uc),t(de,tr),t(tr,Vc),t(de,Gc),p(e,gi,i),g(bo,e,i),p(e,_i,i),p(e,M,i),t(M,Jc),t(M,or),t(or,Yc),t(M,Qc),t(M,sr),t(sr,Zc),t(M,eu),t(M,nr),t(nr,tu),t(M,ou),t(M,ar),t(ar,su),t(M,nu),t(M,rr),t(rr,au),t(M,ru),p(e,vi,i),g(yo,e,i),p(e,wi,i),g(pt,e,i),p(e,Ei,i),p(e,Ne,i),t(Ne,dt),t(dt,lr),g(xo,lr,null),t(Ne,lu),t(Ne,ir),t(ir,iu),p(e,$i,i),p(e,he,i),t(he,pu),t(he,pr),t(pr,du),t(he,hu),t(he,dr),t(dr,fu),t(he,cu),p(e,bi,i),g(ko,e,i),p(e,yi,i),p(e,ht,i),t(ht,uu),t(ht,hr),t(hr,mu),t(ht,gu),p(e,xi,i),p(e,Oe,i),t(Oe,ft),t(ft,fr),g(To,fr,null),t(Oe,_u),t(Oe,cr),t(cr,vu),p(e,ki,i),p(e,qs,i),t(qs,wu),p(e,Ti,i),p(e,fe,i),t(fe,No),t(No,Eu),t(No,ur),t(ur,$u),t(No,bu),t(fe,yu),t(fe,zs),t(zs,xu),t(zs,mr),t(mr,ku),t(fe,Tu),t(fe,Is),t(Is,Nu),t(Is,gr),t(gr,Ou),p(e,Ni,i),p(e,ct,i),t(ct,ju),t(ct,Oo),t(Oo,Au),t(ct,Cu),p(e,Oi,i),p(e,je,i),t(je,ut),t(ut,_r),g(jo,_r,null),t(je,Du),t(je,vr),t(vr,Pu),p(e,ji,i),g(mt,e,i),p(e,Ai,i),p(e,gt,i),t(gt,Su),t(gt,Ao),t(Ao,qu),t(gt,zu),p(e,Ci,i),p(e,Ms,i),t(Ms,Iu),p(e,Di,i),p(e,Bs,i),t(Bs,Mu),p(e,Pi,i),p(e,_t,i),t(_t,wr),t(wr,Bu),t(_t,Lu),t(_t,Co),t(Co,Xu),t(Co,Er),t(Er,Fu),t(Co,Ru),p(e,Si,i),p(e,Ls,i),t(Ls,Hu),p(e,qi,i),p(e,Ae,i),t(Ae,vt),t(vt,$r),g(Do,$r,null),t(Ae,Wu),t(Ae,br),t(br,Ku),p(e,zi,i),p(e,Ce,i),t(Ce,wt),t(wt,yr),g(Po,yr,null),t(Ce,Uu),t(Ce,xr),t(xr,Vu),p(e,Ii,i),p(e,ce,i),t(ce,Gu),t(ce,kr),t(kr,Ju),t(ce,Yu),t(ce,Tr),t(Tr,Qu),t(ce,Zu),p(e,Mi,i),p(e,Z,i),t(Z,em),t(Z,Nr),t(Nr,tm),t(Z,om),t(Z,Or),t(Or,sm),t(Z,nm),t(Z,jr),t(jr,am),t(Z,rm),p(e,Bi,i),p(e,Et,i),t(Et,lm),t(Et,Ar),t(Ar,im),t(Et,pm),p(e,Li,i),p(e,De,i),t(De,$t),t($t,Cr),g(So,Cr,null),t(De,dm),t(De,Dr),t(Dr,hm),p(e,Xi,i),p(e,Xs,i),t(Xs,fm),p(e,Fi,i),p(e,Fs,i),t(Fs,cm),p(e,Ri,i),p(e,Rs,i),t(Rs,Pr),t(Pr,um),p(e,Hi,i),p(e,Hs,i),t(Hs,mm),p(e,Wi,i),p(e,Ws,i),t(Ws,gm),p(e,Ki,i),p(e,Pe,i),t(Pe,bt),t(bt,Sr),g(qo,Sr,null),t(Pe,_m),t(Pe,qr),t(qr,vm),p(e,Ui,i),p(e,Ks,i),t(Ks,wm),p(e,Vi,i),p(e,Se,i),t(Se,yt),t(yt,zr),g(zo,zr,null),t(Se,Em),t(Se,Ir),t(Ir,$m),p(e,Gi,i),p(e,G,i),t(G,bm),t(G,Mr),t(Mr,ym),t(G,xm),t(G,Br),t(Br,km),t(G,Tm),t(G,Lr),t(Lr,Nm),t(G,Om),t(G,Xr),t(Xr,jm),p(e,Ji,i),g(Io,e,i),p(e,Yi,i),p(e,qe,i),t(qe,xt),t(xt,Fr),g(Mo,Fr,null),t(qe,Am),t(qe,Rr),t(Rr,Cm),p(e,Qi,i),p(e,ee,i),t(ee,Dm),t(ee,Hr),t(Hr,Pm),t(ee,Sm),t(ee,Wr),t(Wr,qm),t(ee,zm),t(ee,Kr),t(Kr,Im),t(ee,Mm),p(e,Zi,i),g(Bo,e,i),p(e,ep,i),p(e,ze,i),t(ze,kt),t(kt,Ur),g(Lo,Ur,null),t(ze,Bm),t(ze,Vr),t(Vr,Lm),p(e,tp,i),p(e,Tt,i),t(Tt,Xm),t(Tt,Gr),t(Gr,Fm),t(Tt,Rm),p(e,op,i),g(Xo,e,i),p(e,sp,i),p(e,Ie,i),t(Ie,Nt),t(Nt,Jr),g(Fo,Jr,null),t(Ie,Hm),t(Ie,Yr),t(Yr,Wm),p(e,np,i),p(e,ue,i),t(ue,Km),t(ue,Ro),t(Ro,Um),t(ue,Vm),t(ue,Ho),t(Ho,Gm),t(ue,Jm),p(e,ap,i),p(e,me,i),t(me,Qr),t(Qr,Ym),t(me,Qm),t(me,Us),t(Us,Zm),t(Us,Wo),t(Wo,eg),t(me,tg),t(me,Me),t(Me,og),t(Me,Ko),t(Ko,sg),t(Me,ng),t(Me,Uo),t(Uo,ag),t(Me,rg),p(e,rp,i),p(e,Be,i),t(Be,Ot),t(Ot,Zr),g(Vo,Zr,null),t(Be,lg),t(Be,el),t(el,ig),p(e,lp,i),p(e,B,i),t(B,pg),t(B,Go),t(Go,dg),t(B,hg),t(B,Jo),t(Jo,fg),t(B,cg),t(B,Yo),t(Yo,ug),t(B,mg),t(B,Qo),t(Qo,gg),t(B,_g),t(B,Zo),t(Zo,vg),t(B,wg),p(e,ip,i),p(e,Le,i),t(Le,jt),t(jt,tl),g(es,tl,null),t(Le,Eg),t(Le,ol),t(ol,$g),p(e,pp,i),p(e,Vs,i),t(Vs,bg),p(e,dp,i),p(e,Gs,i),t(Gs,Xe),t(Xe,yg),t(Xe,ts),t(ts,xg),t(Xe,kg),t(Xe,os),t(os,Tg),t(Xe,Ng),p(e,hp,i),p(e,Fe,i),t(Fe,At),t(At,sl),g(ss,sl,null),t(Fe,Og),t(Fe,nl),t(nl,jg),p(e,fp,i),p(e,ge,i),t(ge,Ag),t(ge,ns),t(ns,Cg),t(ge,Dg),t(ge,al),t(al,Pg),t(ge,Sg),p(e,cp,i),g(as,e,i),p(e,up,i),p(e,Js,i),t(Js,qg),p(e,mp,i),p(e,Ys,i),t(Ys,zg),p(e,gp,i),g(rs,e,i),p(e,_p,i),p(e,Qs,i),t(Qs,Ig),p(e,vp,i),g(ls,e,i),p(e,wp,i),p(e,Zs,i),t(Zs,Mg),p(e,Ep,i),p(e,Ct,i),t(Ct,Bg),t(Ct,is),t(is,Lg),t(Ct,Xg),$p=!0},p(e,[i]){const ps={};i&2&&(ps.$$scope={dirty:i,ctx:e}),tt.$set(ps);const rl={};i&2&&(rl.$$scope={dirty:i,ctx:e}),nt.$set(rl);const ll={};i&2&&(ll.$$scope={dirty:i,ctx:e}),rt.$set(ll);const il={};i&2&&(il.$$scope={dirty:i,ctx:e}),lt.$set(il);const pl={};i&2&&(pl.$$scope={dirty:i,ctx:e}),pt.$set(pl);const ds={};i&2&&(ds.$$scope={dirty:i,ctx:e}),mt.$set(ds)},i(e){$p||(_(y.$$.fragment,e),_(se.$$.fragment,e),_(Bt.$$.fragment,e),_(Lt.$$.fragment,e),_(Xt.$$.fragment,e),_(Ft.$$.fragment,e),_(Rt.$$.fragment,e),_(Kt.$$.fragment,e),_(Ut.$$.fragment,e),_(Gt.$$.fragment,e),_(Jt.$$.fragment,e),_(Yt.$$.fragment,e),_(Qt.$$.fragment,e),_(ho.$$.fragment,e),_(fo.$$.fragment,e),_(co.$$.fragment,e),_(tt.$$.fragment,e),_(uo.$$.fragment,e),_(mo.$$.fragment,e),_(nt.$$.fragment,e),_(go.$$.fragment,e),_(rt.$$.fragment,e),_(_o.$$.fragment,e),_(vo.$$.fragment,e),_(wo.$$.fragment,e),_(Eo.$$.fragment,e),_(lt.$$.fragment,e),_($o.$$.fragment,e),_(bo.$$.fragment,e),_(yo.$$.fragment,e),_(pt.$$.fragment,e),_(xo.$$.fragment,e),_(ko.$$.fragment,e),_(To.$$.fragment,e),_(jo.$$.fragment,e),_(mt.$$.fragment,e),_(Do.$$.fragment,e),_(Po.$$.fragment,e),_(So.$$.fragment,e),_(qo.$$.fragment,e),_(zo.$$.fragment,e),_(Io.$$.fragment,e),_(Mo.$$.fragment,e),_(Bo.$$.fragment,e),_(Lo.$$.fragment,e),_(Xo.$$.fragment,e),_(Fo.$$.fragment,e),_(Vo.$$.fragment,e),_(es.$$.fragment,e),_(ss.$$.fragment,e),_(as.$$.fragment,e),_(rs.$$.fragment,e),_(ls.$$.fragment,e),$p=!0)},o(e){v(y.$$.fragment,e),v(se.$$.fragment,e),v(Bt.$$.fragment,e),v(Lt.$$.fragment,e),v(Xt.$$.fragment,e),v(Ft.$$.fragment,e),v(Rt.$$.fragment,e),v(Kt.$$.fragment,e),v(Ut.$$.fragment,e),v(Gt.$$.fragment,e),v(Jt.$$.fragment,e),v(Yt.$$.fragment,e),v(Qt.$$.fragment,e),v(ho.$$.fragment,e),v(fo.$$.fragment,e),v(co.$$.fragment,e),v(tt.$$.fragment,e),v(uo.$$.fragment,e),v(mo.$$.fragment,e),v(nt.$$.fragment,e),v(go.$$.fragment,e),v(rt.$$.fragment,e),v(_o.$$.fragment,e),v(vo.$$.fragment,e),v(wo.$$.fragment,e),v(Eo.$$.fragment,e),v(lt.$$.fragment,e),v($o.$$.fragment,e),v(bo.$$.fragment,e),v(yo.$$.fragment,e),v(pt.$$.fragment,e),v(xo.$$.fragment,e),v(ko.$$.fragment,e),v(To.$$.fragment,e),v(jo.$$.fragment,e),v(mt.$$.fragment,e),v(Do.$$.fragment,e),v(Po.$$.fragment,e),v(So.$$.fragment,e),v(qo.$$.fragment,e),v(zo.$$.fragment,e),v(Io.$$.fragment,e),v(Mo.$$.fragment,e),v(Bo.$$.fragment,e),v(Lo.$$.fragment,e),v(Xo.$$.fragment,e),v(Fo.$$.fragment,e),v(Vo.$$.fragment,e),v(es.$$.fragment,e),v(ss.$$.fragment,e),v(as.$$.fragment,e),v(rs.$$.fragment,e),v(ls.$$.fragment,e),$p=!1},d(e){o(c),e&&o(k),e&&o(E),w(y),e&&o(T),e&&o(q),e&&o(z),e&&o(X),e&&o(He),e&&o(U),w(se),e&&o(It),e&&o(V),e&&o(dl),e&&o(ms),e&&o(hl),e&&o(We),e&&o(fl),e&&o(gs),e&&o(cl),e&&o($),e&&o(ul),e&&o(_s),e&&o(ml),e&&o(Ke),e&&o(gl),e&&o(be),w(Bt),e&&o(_l),e&&o(vs),e&&o(vl),w(Lt,e),e&&o(wl),e&&o(Ve),e&&o(El),w(Xt,e),e&&o($l),e&&o(ws),e&&o(bl),w(Ft,e),e&&o(yl),e&&o(Es),e&&o(xl),w(Rt,e),e&&o(kl),e&&o(ae),e&&o(Tl),e&&o(Y),e&&o(Nl),w(Kt,e),e&&o(Ol),e&&o(Ge),e&&o(jl),w(Ut,e),e&&o(Al),e&&o(Je),e&&o(Cl),w(Gt,e),e&&o(Dl),e&&o($s),e&&o(Pl),w(Jt,e),e&&o(Sl),e&&o(re),e&&o(ql),w(Yt,e),e&&o(zl),e&&o(ye),w(Qt),e&&o(Il),e&&o(Qe),e&&o(Ml),e&&o(Ze),e&&o(Bl),e&&o(et),e&&o(Ll),w(ho,e),e&&o(Xl),e&&o(le),e&&o(Fl),w(fo,e),e&&o(Rl),e&&o(bs),e&&o(Hl),w(co,e),e&&o(Wl),e&&o(Q),e&&o(Kl),w(tt,e),e&&o(Ul),e&&o(xe),w(uo),e&&o(Vl),e&&o(ys),e&&o(Gl),e&&o(ie),e&&o(Jl),e&&o(xs),e&&o(Yl),e&&o(ke),w(mo),e&&o(Ql),e&&o(ks),e&&o(Zl),e&&o(pe),e&&o(ei),w(nt,e),e&&o(ti),e&&o(at),e&&o(oi),w(go,e),e&&o(si),e&&o(H),e&&o(ni),w(rt,e),e&&o(ai),e&&o(Ds),e&&o(ri),w(_o,e),e&&o(li),e&&o(Ps),e&&o(ii),w(vo,e),e&&o(pi),e&&o(Ss),e&&o(di),w(wo,e),e&&o(hi),e&&o(W),e&&o(fi),w(Eo,e),e&&o(ci),w(lt,e),e&&o(ui),e&&o(Te),w($o),e&&o(mi),e&&o(de),e&&o(gi),w(bo,e),e&&o(_i),e&&o(M),e&&o(vi),w(yo,e),e&&o(wi),w(pt,e),e&&o(Ei),e&&o(Ne),w(xo),e&&o($i),e&&o(he),e&&o(bi),w(ko,e),e&&o(yi),e&&o(ht),e&&o(xi),e&&o(Oe),w(To),e&&o(ki),e&&o(qs),e&&o(Ti),e&&o(fe),e&&o(Ni),e&&o(ct),e&&o(Oi),e&&o(je),w(jo),e&&o(ji),w(mt,e),e&&o(Ai),e&&o(gt),e&&o(Ci),e&&o(Ms),e&&o(Di),e&&o(Bs),e&&o(Pi),e&&o(_t),e&&o(Si),e&&o(Ls),e&&o(qi),e&&o(Ae),w(Do),e&&o(zi),e&&o(Ce),w(Po),e&&o(Ii),e&&o(ce),e&&o(Mi),e&&o(Z),e&&o(Bi),e&&o(Et),e&&o(Li),e&&o(De),w(So),e&&o(Xi),e&&o(Xs),e&&o(Fi),e&&o(Fs),e&&o(Ri),e&&o(Rs),e&&o(Hi),e&&o(Hs),e&&o(Wi),e&&o(Ws),e&&o(Ki),e&&o(Pe),w(qo),e&&o(Ui),e&&o(Ks),e&&o(Vi),e&&o(Se),w(zo),e&&o(Gi),e&&o(G),e&&o(Ji),w(Io,e),e&&o(Yi),e&&o(qe),w(Mo),e&&o(Qi),e&&o(ee),e&&o(Zi),w(Bo,e),e&&o(ep),e&&o(ze),w(Lo),e&&o(tp),e&&o(Tt),e&&o(op),w(Xo,e),e&&o(sp),e&&o(Ie),w(Fo),e&&o(np),e&&o(ue),e&&o(ap),e&&o(me),e&&o(rp),e&&o(Be),w(Vo),e&&o(lp),e&&o(B),e&&o(ip),e&&o(Le),w(es),e&&o(pp),e&&o(Vs),e&&o(dp),e&&o(Gs),e&&o(hp),e&&o(Fe),w(ss),e&&o(fp),e&&o(ge),e&&o(cp),w(as,e),e&&o(up),e&&o(Js),e&&o(mp),e&&o(Ys),e&&o(gp),w(rs,e),e&&o(_p),e&&o(Qs),e&&o(vp),w(ls,e),e&&o(wp),e&&o(Zs),e&&o(Ep),e&&o(Ct)}}}const _E={local:"exporting-transformers-models",sections:[{local:"onnx",sections:[{local:"exporting-a-model-to-onnx",title:"Exporting a model to ONNX"},{local:"selecting-features-for-different-model-topologies",title:"Selecting features for different model topologies"},{local:"exporting-a-model-for-an-unsupported-architecture",sections:[{local:"implementing-a-custom-onnx-configuration",title:"Implementing a custom ONNX configuration"},{local:"exporting-the-model",title:"Exporting the model"},{local:"validating-the-model-outputs",title:"Validating the model outputs"}],title:"Exporting a model for an unsupported architecture"},{local:"contributing-a-new-configuration-to-transformers",title:"Contributing a new configuration to \u{1F917} Transformers"}],title:"ONNX"},{local:"torchscript",sections:[{local:"implications",title:"Implications"},{local:"torchscript-flag-and-tied-weights",title:"TorchScript flag and tied weights"},{local:"dummy-inputs-and-standard-lengths",title:"Dummy inputs and standard lengths"},{local:"using-torchscript-in-python",sections:[{local:"saving-a-model",title:"Saving a model"},{local:"loading-a-model",title:"Loading a model"},{local:"using-a-traced-model-for-inference",title:"Using a traced model for inference"}],title:"Using TorchScript in Python"},{local:"deploying-huggingface-torchscript-models-on-aws-using-the-neuron-sdk",sections:[{local:"implications",title:"Implications"},{local:"dependencies",title:"Dependencies"},{local:"converting-a-model-for-aws-neuron",title:"Converting a Model for AWS Neuron"}],title:"Deploying HuggingFace TorchScript models on AWS using the Neuron SDK"}],title:"TorchScript"}],title:"Exporting \u{1F917} Transformers Models"};function vE(L,c,k){let{fw:E}=c;return L.$$set=x=>{"fw"in x&&k(0,E=x.fw)},[E]}class kE extends rE{constructor(c){super();lE(this,c,vE,gE,iE,{fw:0})}}export{kE as default,_E as metadata};
272
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/testing.mdx-614baedb.js
import{S as sS,i as oS,s as rS,e as l,k as f,w as d,t as o,M as lS,c as a,d as s,m as h,a as i,x as c,h as r,b as u,F as t,g as p,y as m,q as _,o as v,B as w}from"../chunks/vendor-4833417e.js";import{T as PE}from"../chunks/Tip-fffd6df1.js";import{I as E}from"../chunks/IconCopyLink-4b81c553.js";import{C as y}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function aS(me){let $,x,g,b,A,j,z,D;return{c(){$=l("p"),x=o("This plugin doesn\u2019t work with "),g=l("code"),b=o("-n"),A=o(" flag from "),j=l("code"),z=o("pytest-xdist"),D=o(".")},l(O){$=a(O,"P",{});var k=i($);x=r(k,"This plugin doesn\u2019t work with "),g=a(k,"CODE",{});var S=i(g);b=r(S,"-n"),S.forEach(s),A=r(k," flag from "),j=a(k,"CODE",{});var R=i(j);z=r(R,"pytest-xdist"),R.forEach(s),D=r(k,"."),k.forEach(s)},m(O,k){p(O,$,k),t($,x),t($,g),t(g,b),t($,A),t($,j),t(j,z),t($,D)},d(O){O&&s($)}}}function iS(me){let $,x,g,b,A,j,z,D;return{c(){$=l("p"),x=o("There is another plugin "),g=l("code"),b=o("pytest-repeat"),A=o(", but it doesn\u2019t work with "),j=l("code"),z=o("unittest"),D=o(".")},l(O){$=a(O,"P",{});var k=i($);x=r(k,"There is another plugin "),g=a(k,"CODE",{});var S=i(g);b=r(S,"pytest-repeat"),S.forEach(s),A=r(k,", but it doesn\u2019t work with "),j=a(k,"CODE",{});var R=i(j);z=r(R,"unittest"),R.forEach(s),D=r(k,"."),k.forEach(s)},m(O,k){p(O,$,k),t($,x),t($,g),t(g,b),t($,A),t($,j),t(j,z),t($,D)},d(O){O&&s($)}}}function nS(me){let $,x,g,b,A,j,z,D,O,k,S,R,Nt,ye;return{c(){$=l("p"),x=o("In order to run the equivalent of "),g=l("code"),b=o("rm -r"),A=o(` safely, only subdirs of the project repository checkout are allowed if an explicit `),j=l("code"),z=o("tmp_dir"),D=o(" is used, so that by mistake no "),O=l("code"),k=o("/tmp"),S=o(` or similar important part of the filesystem will get nuked. i.e. please always pass paths that start with `),R=l("code"),Nt=o("./"),ye=o(".")},l(Be){$=a(Be,"P",{});var q=i($);x=r(q,"In order to run the equivalent of "),g=a(q,"CODE",{});var _e=i(g);b=r(_e,"rm -r"),_e.forEach(s),A=r(q,` safely, only subdirs of the project repository checkout are allowed if an explicit `),j=a(q,"CODE",{});var Ye=i(j);z=r(Ye,"tmp_dir"),Ye.forEach(s),D=r(q," is used, so that by mistake no "),O=a(q,"CODE",{});var Ht=i(O);k=r(Ht,"/tmp"),Ht.forEach(s),S=r(q,` or similar important part of the filesystem will get nuked. i.e. please always pass paths that start with `),R=a(q,"CODE",{});var ti=i(R);Nt=r(ti,"./"),ti.forEach(s),ye=r(q,"."),q.forEach(s)},m(Be,q){p(Be,$,q),t($,x),t($,g),t(g,b),t($,A),t($,j),t(j,z),t($,D),t($,O),t(O,k),t($,S),t($,R),t(R,Nt),t($,ye)},d(Be){Be&&s($)}}}function pS(me){let $,x;return{c(){$=l("p"),x=o(`Each test can register multiple temporary directories and they all will get auto-removed, unless requested otherwise.`)},l(g){$=a(g,"P",{});var b=i($);x=r(b,`Each test can register multiple temporary directories and they all will get auto-removed, unless requested otherwise.`),b.forEach(s)},m(g,b){p(g,$,b),t($,x)},d(g){g&&s($)}}}function fS(me){let $,x,g,b,A,j,z,D,O,k,S,R,Nt,ye,Be,q,_e,Ye,Ht,ti,OE,IE,si,fp,TE,AE,fm,Ve,zt,hp,Lo,DE,up,qE,hm,Rt,Uo,No,SE,Ho,LE,UE,NE,zo,HE,dp,zE,RE,GE,Ro,Go,FE,Fo,ME,WE,BE,Xe,cp,oi,Mo,YE,VE,XE,mp,L,Wo,JE,ZE,_p,KE,QE,vp,eg,tg,wp,sg,og,yp,rg,lg,$p,ag,ig,ng,Ep,$e,Bo,pg,fg,gp,hg,ug,bp,dg,cg,um,Yo,dm,Gt,mg,Vo,_g,vg,cm,Je,Ft,kp,Xo,wg,Cp,yg,mm,Ze,Mt,jp,Jo,$g,xp,Eg,_m,Wt,gg,Zo,bg,kg,vm,ri,Cg,wm,li,jg,ym,Ko,$m,ai,xg,Em,Qo,gm,ii,Pg,bm,er,km,ni,Og,Cm,Q,Pp,Ig,Tg,Op,Ag,Dg,Ip,qg,Sg,Tp,Lg,jm,Ke,Bt,Ap,tr,Ug,Dp,Ng,xm,pi,Hg,Pm,sr,Om,fi,zg,Im,or,Tm,Qe,Yt,qp,rr,Rg,Sp,Gg,Am,hi,Fg,Dm,lr,qm,et,Vt,Lp,ar,Mg,Up,Wg,Sm,ui,Bg,Lm,ir,Um,di,Yg,Nm,Ee,ci,Np,Vg,Xg,Jg,mi,Hp,Zg,Kg,Qg,_i,zp,eb,tb,Hm,vi,sb,zm,nr,Rm,wi,ob,Gm,Xt,rb,Rp,lb,ab,Fm,pr,Mm,yi,ib,Wm,Jt,nb,Gp,pb,fb,Bm,fr,Ym,ee,hb,Fp,ub,db,Mp,cb,mb,Wp,_b,vb,Vm,Zt,wb,Bp,yb,$b,Xm,hr,Jm,$i,Eb,Zm,ur,Km,ge,gb,Yp,bb,kb,Vp,Cb,jb,Qm,dr,e_,Kt,xb,Xp,Pb,Ob,t_,Qt,Ib,Jp,Tb,Ab,s_,cr,o_,tt,es,Zp,mr,Db,Kp,qb,r_,ts,Sb,_r,Lb,Ub,l_,vr,a_,wr,i_,Ei,Nb,n_,st,ss,Qp,yr,Hb,ef,zb,p_,$r,Er,Rb,Gb,f_,gr,h_,ot,Fb,tf,Mb,Wb,sf,Bb,u_,be,Yb,of,Vb,Xb,rf,Jb,Zb,d_,br,c_,ke,Kb,lf,Qb,e3,af,t3,s3,m_,kr,__,gi,o3,v_,Cr,jr,r3,l3,w_,rt,os,nf,xr,a3,pf,i3,y_,rs,n3,ff,p3,f3,$_,Pr,E_,lt,ls,hf,Or,h3,uf,u3,g_,bi,d3,b_,Ir,k_,at,as,df,Tr,c3,cf,m3,C_,M,_3,mf,v3,w3,_f,y3,$3,vf,E3,g3,wf,b3,k3,j_,ve,yf,C3,j3,$f,x3,P3,Ef,O3,I3,x_,Ce,T3,gf,A3,D3,Ar,q3,S3,P_,it,is,bf,Dr,L3,kf,U3,O_,ki,N3,I_,nt,ns,Cf,qr,H3,jf,z3,T_,Ci,ji,Sr,R3,G3,A_,Lr,D_,xi,F3,q_,Ur,S_,ps,L_,fs,U_,pt,hs,xf,Nr,M3,Pf,W3,N_,Hr,H_,us,B3,Of,Y3,V3,z_,ds,X3,If,J3,Z3,R_,zr,G_,Pi,K3,F_,Rr,M_,cs,Q3,Tf,e2,t2,W_,Gr,B_,Oi,s2,Y_,Fr,V_,I,o2,Af,r2,l2,Df,a2,i2,qf,n2,p2,Sf,f2,h2,Lf,u2,d2,Mr,c2,m2,X_,je,_2,Wr,Uf,v2,w2,Nf,y2,$2,J_,ft,ms,Hf,Br,E2,zf,g2,Z_,ht,_s,Rf,Yr,b2,Gf,k2,K_,Vr,Xr,C2,j2,Q_,Jr,ev,Ii,x2,tv,Zr,sv,Ti,P2,ov,ut,vs,Ff,Kr,O2,Mf,I2,rv,xe,T2,Wf,A2,D2,Bf,q2,S2,lv,Qr,av,dt,ws,Yf,el,L2,Vf,U2,iv,tl,sl,N2,H2,nv,ol,pv,rl,fv,ct,ys,Xf,ll,z2,Jf,R2,hv,$s,G2,Zf,F2,M2,uv,al,dv,te,W2,Kf,B2,Y2,Qf,V2,X2,eh,J2,Z2,cv,il,mv,Ai,K2,_v,Di,Q2,vv,U,qi,th,e5,t5,s5,Es,sh,o5,r5,oh,l5,a5,i5,gs,rh,n5,p5,lh,f5,h5,u5,bs,ah,d5,c5,ih,m5,_5,v5,ks,nh,w5,y5,ph,$5,E5,g5,Cs,fh,b5,k5,hh,C5,j5,wv,Si,x5,yv,C,P5,uh,O5,I5,dh,T5,A5,ch,D5,q5,mh,S5,L5,_h,U5,N5,vh,H5,z5,wh,R5,G5,yh,F5,M5,$h,W5,B5,Eh,Y5,V5,$v,Li,X5,Ev,nl,gv,Pe,J5,gh,Z5,K5,bh,Q5,ek,bv,pl,kv,Ui,tk,Cv,fl,jv,Oe,sk,kh,ok,rk,Ch,lk,ak,xv,hl,Pv,js,ik,jh,nk,pk,Ov,Ni,fk,Iv,Hi,xh,hk,Tv,ul,Av,mt,xs,Ph,dl,uk,Oh,dk,Dv,_t,Ih,ck,mk,Th,_k,vk,qv,zi,wk,Sv,Ps,Ah,cl,yk,$k,Dh,ml,Ek,Lv,Os,gk,qh,bk,kk,Uv,Ri,Ck,Nv,_l,Hv,vt,Is,Sh,vl,jk,Lh,xk,zv,Ie,Pk,Uh,Ok,Ik,Nh,Tk,Ak,Rv,W,Dk,Hh,qk,Sk,zh,Lk,Uk,Rh,Nk,Hk,Gh,zk,Rk,Gv,wl,Fv,Gi,Gk,Mv,yl,Wv,wt,Ts,Fh,$l,Fk,Mh,Mk,Bv,Fi,Wk,Yv,El,Vv,yt,As,Wh,gl,Bk,Bh,Yk,Xv,Mi,Vk,Jv,bl,Zv,Wi,Xk,Kv,Bi,Jk,Qv,kl,e1,$t,Ds,Yh,Cl,Zk,Vh,Kk,t1,Te,Qk,Xh,e4,t4,Jh,s4,o4,s1,se,r4,jl,l4,a4,Zh,i4,n4,Kh,p4,f4,o1,Et,qs,Qh,xl,h4,eu,u4,r1,Yi,d4,l1,Pl,a1,Ss,c4,tu,m4,_4,i1,Ae,v4,su,w4,y4,ou,$4,E4,n1,Ol,p1,Ls,g4,ru,b4,k4,f1,Il,h1,Us,C4,lu,j4,x4,u1,Tl,d1,Vi,P4,c1,Al,m1,Xi,O4,_1,Dl,v1,B,I4,ql,T4,A4,au,D4,q4,iu,S4,L4,nu,U4,N4,w1,oe,H4,pu,z4,R4,fu,G4,F4,hu,M4,W4,y1,De,B4,uu,Y4,V4,du,X4,J4,$1,Sl,E1,re,Z4,cu,K4,Q4,mu,e0,t0,_u,s0,o0,g1,Ll,b1,Ji,r0,k1,Ul,C1,Zi,l0,j1,Nl,x1,Ki,a0,P1,gt,Ns,vu,Hl,i0,wu,n0,O1,Hs,p0,yu,f0,h0,I1,zs,zl,Qi,$u,u0,d0,c0,G,Rl,Eu,m0,_0,gu,v0,w0,en,bu,y0,$0,E0,Rs,ku,g0,b0,Cu,k0,C0,j0,Gs,ju,x0,P0,xu,O0,I0,T0,tn,Pu,A0,D0,q0,qe,Ou,S0,L0,Iu,U0,N0,Tu,H0,z0,R0,Gl,Fl,G0,Au,F0,M0,W0,F,Du,qu,B0,Y0,Su,Lu,V0,X0,Uu,Nu,J0,Z0,Hu,zu,K0,Q0,Ru,Gu,e6,t6,Fu,Mu,s6,T1,Fs,o6,Wu,r6,l6,A1,Ml,D1,Y,a6,Bu,i6,n6,Yu,p6,f6,Vu,h6,u6,Xu,d6,c6,q1,Wl,S1,bt,Ms,Ju,Bl,m6,Zu,_6,L1,Ws,v6,Ku,w6,y6,U1,sn,$6,N1,Se,E6,Qu,g6,b6,ed,k6,C6,H1,on,j6,z1,Yl,R1,Bs,x6,td,P6,O6,G1,rn,sd,I6,F1,Vl,M1,Xl,od,T6,A6,W1,ln,rd,D6,B1,Jl,Y1,an,q6,V1,nn,Zl,kt,S6,ld,L6,U6,ad,N6,H6,z6,we,pn,id,R6,G6,F6,fn,nd,M6,W6,B6,hn,pd,Y6,V6,X6,un,fd,J6,Z6,X1,Ys,J1,Vs,Z1,Ct,Xs,hd,Kl,K6,ud,Q6,K1,Le,e7,dd,t7,s7,cd,o7,r7,Q1,Ql,ew,jt,Js,md,ea,l7,_d,a7,tw,Zs,i7,vd,n7,p7,sw,dn,f7,ow,Ks,wd,ta,h7,yd,u7,d7,c7,$d,sa,m7,Ed,_7,v7,rw,le,w7,gd,y7,$7,bd,E7,g7,kd,b7,k7,lw,xt,Qs,Cd,oa,C7,jd,j7,aw,cn,xd,x7,iw,ra,nw,mn,P7,pw,la,fw,eo,O7,Pd,I7,T7,hw,aa,uw,_n,Od,A7,dw,ia,cw,vn,D7,mw,na,_w,to,q7,Id,S7,L7,vw,pa,ww,wn,Td,U7,yw,fa,$w,yn,Ad,N7,Ew,ha,gw,$n,H7,bw,ua,kw,En,z7,Cw,da,jw,so,R7,ca,G7,F7,xw,Pt,oo,Dd,ma,M7,qd,W7,Pw,gn,B7,Ow,_a,Iw,Ue,Y7,Sd,V7,X7,Ld,J7,Z7,Tw,va,Aw,ae,K7,Ud,Q7,e8,Nd,t8,s8,Hd,o8,r8,Dw,wa,qw,bn,l8,Sw,kn,a8,Lw,Cn,i8,Uw,ie,zd,n8,p8,Rd,f8,h8,Ot,u8,Gd,d8,c8,Fd,m8,_8,v8,Md,w8,Nw,Ne,y8,Wd,$8,E8,Bd,g8,b8,Hw,ya,zw,He,k8,$a,C8,j8,Ea,x8,P8,Rw,ro,O8,Yd,I8,T8,Gw,jn,A8,Fw,It,lo,Vd,ga,D8,Xd,q8,Mw,V,S8,Jd,L8,U8,Zd,N8,H8,Kd,z8,R8,ba,G8,F8,Ww,ka,Bw,ao,M8,Qd,W8,B8,Yw,Ca,Vw,io,Y8,ec,V8,X8,Xw,ja,Jw,T,J8,tc,Z8,K8,sc,Q8,e9,oc,t9,s9,rc,o9,r9,lc,l9,a9,ac,i9,n9,Zw,no,p9,ic,f9,h9,Kw,xa,Qw,xn,u9,ey,Pa,ty,ze,d9,nc,c9,m9,pc,_9,v9,sy,Oa,oy,po,w9,fc,y9,$9,ry,Ia,ly,Pn,E9,ay,Tt,fo,hc,Ta,g9,uc,b9,iy,ho,k9,dc,C9,j9,ny,Aa,py,At,uo,cc,Da,x9,mc,P9,fy,qa,O9,_c,I9,hy,Sa,uy,ne,T9,vc,A9,D9,wc,q9,S9,yc,L9,U9,dy,La,cy,N,N9,$c,H9,z9,Ec,R9,G9,gc,F9,M9,bc,W9,B9,kc,Y9,V9,my,co,X9,Cc,J9,Z9,_y,Dt,mo,jc,Ua,K9,xc,Q9,vy,On,eC,wy,Na,yy,qt,_o,Pc,Ha,tC,Oc,sC,$y,In,oC,Ey,za,gy,St,vo,Ic,Ra,rC,Tc,lC,by,Tn,aC,ky,pe,Ga,iC,Ac,nC,pC,fC,H,hC,Dc,uC,dC,qc,cC,mC,Sc,_C,vC,Lc,wC,yC,Fa,$C,EC,Uc,gC,bC,Nc,kC,CC,Ma,jC,Wa,xC,PC,Cy,Lt,wo,Hc,Ba,OC,zc,IC,jy,An,TC,xy,fe,Rc,AC,DC,Gc,qC,SC,Fc,LC,UC,Mc,NC,Py,Dn,HC,Oy,qn,zC,Iy,Sn,RC,Ty,Ln,GC,Ay,yo,Un,Wc,FC,MC,WC,Ut,BC,Bc,YC,VC,Yc,XC,JC,Dy,Nn,ZC,qy,Ya,Sy,Hn,KC,Ly,Va,Uy,$o,QC,Vc,ej,tj,Ny,Eo,sj,Xc,oj,rj,Hy,zn,lj,zy,go,Jc,Xa,aj,ij,Zc,Ja,nj,Ry;return j=new E({}),Lo=new E({}),Yo=new y({props:{code:`RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/`,highlighted:`RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/`}}),Xo=new E({}),Jo=new E({}),Ko=new y({props:{code:"pytest",highlighted:"pytest"}}),Qo=new y({props:{code:"make test",highlighted:'make <span class="hljs-built_in">test</span>'}}),er=new y({props:{code:"python -m pytest -n auto --dist=loadfile -s -v ./tests/",highlighted:"python -m pytest -n auto --dist=loadfile -s -v ./tests/"}}),tr=new E({}),sr=new y({props:{code:"pytest --collect-only -q",highlighted:"pytest --collect-only -q"}}),or=new y({props:{code:"pytest tests/test_optimization.py --collect-only -q",highlighted:"pytest tests/test_optimization.py --collect-only -q"}}),rr=new E({}),lr=new y({props:{code:"pytest tests/test_logging.py",highlighted:"pytest tests/test_logging.py"}}),ar=new E({}),ir=new y({props:{code:"pytest tests/test_optimization.py::OptimizationTest::test_adam_w",highlighted:"pytest tests/test_optimization.py::OptimizationTest::test_adam_w"}}),nr=new y({props:{code:"pytest tests/test_optimization.py::OptimizationTest",highlighted:"pytest tests/test_optimization.py::OptimizationTest"}}),pr=new y({props:{code:"pytest tests/test_optimization.py::OptimizationTest --collect-only -q",highlighted:"pytest tests/test_optimization.py::OptimizationTest --collect-only -q"}}),fr=new y({props:{code:"pytest -k adam tests/test_optimization.py",highlighted:"pytest -k adam tests/test_optimization.py"}}),hr=new y({props:{code:'pytest -k "not adam" tests/test_optimization.py',highlighted:'pytest -k <span class="hljs-string">&quot;not adam&quot;</span> tests/test_optimization.py'}}),ur=new y({props:{code:'pytest -k "ada and not adam" tests/test_optimization.py',highlighted:'pytest -k <span class="hljs-string">&quot;ada and not adam&quot;</span> tests/test_optimization.py'}}),dr=new y({props:{code:'pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py',highlighted:'pytest -k <span class="hljs-string">&quot;test_adam_w or test_adam_w&quot;</span> tests/test_optimization.py'}}),cr=new y({props:{code:'pytest -k "test and ada" tests/test_optimization.py',highlighted:'pytest -k <span class="hljs-string">&quot;test and ada&quot;</span> tests/test_optimization.py'}}),mr=new E({}),vr=new y({props:{code:"pip install pytest-picked",highlighted:"pip install pytest-picked"}}),wr=new y({props:{code:"pytest --picked",highlighted:"pytest --picked"}}),yr=new E({}),gr=new y({props:{code:"pip install pytest-xdist",highlighted:"pip install pytest-xdist"}}),br=new y({props:{code:`[tool:pytest] looponfailroots = transformers tests`,highlighted:`<span class="hljs-section">[tool:pytest]</span> <span class="hljs-attr">looponfailroots</span> = transformers tests`}}),kr=new y({props:{code:`[pytest] looponfailroots = transformers tests`,highlighted:`<span class="hljs-section">[pytest]</span> <span class="hljs-attr">looponfailroots</span> = transformers tests`}}),xr=new E({}),Pr=new y({props:{code:"pytest *ls -1 tests/*py | grep -v test_modeling*",highlighted:'pytest *<span class="hljs-built_in">ls</span> -1 tests/*py | grep -v test_modeling*'}}),Or=new E({}),Ir=new y({props:{code:"pytest --cache-clear tests",highlighted:"pytest --cache-clear tests"}}),Tr=new E({}),Dr=new E({}),qr=new E({}),Lr=new y({props:{code:"pip install pytest-flakefinder",highlighted:"pip install pytest-flakefinder"}}),Ur=new y({props:{code:"pytest --flake-finder --flake-runs=5 tests/test_failing_test.py",highlighted:"pytest --flake-finder --flake-runs=5 tests/test_failing_test.py"}}),ps=new PE({props:{$$slots:{default:[aS]},$$scope:{ctx:me}}}),fs=new PE({props:{$$slots:{default:[iS]},$$scope:{ctx:me}}}),Nr=new E({}),Hr=new y({props:{code:"pip install pytest-random-order",highlighted:"pip install pytest-random-order"}}),zr=new y({props:{code:`pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663`,highlighted:`pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663`}}),Rr=new y({props:{code:`pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663`,highlighted:`pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663`}}),Gr=new y({props:{code:"pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py",highlighted:"pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py"}}),Fr=new y({props:{code:"pytest --random-order-bucket=none",highlighted:"pytest --random-order-bucket=none"}}),Br=new E({}),Yr=new E({}),Jr=new y({props:{code:"pip install pytest-sugar",highlighted:"pip install pytest-sugar"}}),Zr=new y({props:{code:"pytest -p no:sugar",highlighted:"pytest -p no:sugar"}}),Kr=new E({}),Qr=new y({props:{code:"pytest --pspec tests/test_optimization.py",highlighted:"pytest --pspec tests/test_optimization.py"}}),el=new E({}),ol=new y({props:{code:"pip install pytest-instafail",highlighted:"pip install pytest-instafail"}}),rl=new y({props:{code:"pytest --instafail",highlighted:"pytest --instafail"}}),ll=new E({}),al=new y({props:{code:'CUDA_VISIBLE_DEVICES="" pytest tests/test_logging.py',highlighted:'CUDA_VISIBLE_DEVICES=<span class="hljs-string">&quot;&quot;</span> pytest tests/test_logging.py'}}),il=new y({props:{code:'CUDA_VISIBLE_DEVICES="1" pytest tests/test_logging.py',highlighted:'CUDA_VISIBLE_DEVICES=<span class="hljs-string">&quot;1&quot;</span> pytest tests/test_logging.py'}}),nl=new y({props:{code:`@require_torch_multi_gpu def test_example_with_multi_gpu():`,highlighted:`<span class="hljs-meta">@require_torch_multi_gpu</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_example_with_multi_gpu</span>():`}}),pl=new y({props:{code:`@require_tf def test_tf_thing_with_tensorflow():`,highlighted:`<span class="hljs-meta">@require_tf</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_tf_thing_with_tensorflow</span>():`}}),fl=new y({props:{code:`@require_torch_gpu @slow def test_example_slow_on_gpu():`,highlighted:`<span class="hljs-meta">@require_torch_gpu</span> <span class="hljs-meta">@slow</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_example_slow_on_gpu</span>():`}}),hl=new y({props:{code:`@parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo():`,highlighted:`<span class="hljs-meta">@parameterized.expand(<span class="hljs-params">...</span>)</span> <span class="hljs-meta">@require_torch_multi_gpu</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_integration_foo</span>():`}}),ul=new y({props:{code:`from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() # works with torch and tf`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> get_gpu_count n_gpu = get_gpu_count() <span class="hljs-comment"># works with torch and tf</span>`}}),dl=new E({}),_l=new y({props:{code:"CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py",highlighted:"CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py"}}),vl=new E({}),wl=new y({props:{code:"pytest -s tests/test_logging.py",highlighted:"pytest -s tests/test_logging.py"}}),yl=new y({props:{code:"py.test tests --junitxml=result.xml",highlighted:"py.test tests --junitxml=result.xml"}}),$l=new E({}),El=new y({props:{code:"pytest --color=no tests/test_logging.py",highlighted:"pytest --color=no tests/test_logging.py"}}),gl=new E({}),bl=new y({props:{code:"pytest --pastebin=failed tests/test_logging.py",highlighted:"pytest --pastebin=failed tests/test_logging.py"}}),kl=new y({props:{code:"pytest --pastebin=all tests/test_logging.py",highlighted:"pytest --pastebin=all tests/test_logging.py"}}),Cl=new E({}),xl=new E({}),Pl=new y({props:{code:`# test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected)`,highlighted:`<span class="hljs-comment"># test_this1.py</span> <span class="hljs-keyword">import</span> unittest <span class="hljs-keyword">from</span> parameterized <span class="hljs-keyword">import</span> parameterized <span class="hljs-keyword">class</span> <span class="hljs-title class_">TestMathUnitTest</span>(unittest.TestCase): <span class="hljs-meta"> @parameterized.expand(<span class="hljs-params"> [ (<span class="hljs-params"><span class="hljs-string">&quot;negative&quot;</span>, -<span class="hljs-number">1.5</span>, -<span class="hljs-number">2.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;integer&quot;</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;large fraction&quot;</span>, <span class="hljs-number">1.6</span>, <span class="hljs-number">1</span></span>), ] </span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_floor</span>(<span class="hljs-params">self, name, <span class="hljs-built_in">input</span>, expected</span>): assert_equal(math.floor(<span class="hljs-built_in">input</span>), expected)`}}),Ol=new y({props:{code:'pytest -k "negative and integer" tests/test_mytest.py',highlighted:'pytest -k <span class="hljs-string">&quot;negative and integer&quot;</span> tests/test_mytest.py'}}),Il=new y({props:{code:'pytest -k "not negative" tests/test_mytest.py',highlighted:'pytest -k <span class="hljs-string">&quot;not negative&quot;</span> tests/test_mytest.py'}}),Tl=new y({props:{code:"pytest test_this1.py --collect-only -q",highlighted:"pytest test_this1.py --collect-only -q"}}),Al=new y({props:{code:`test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction`,highlighted:`test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction`}}),Dl=new y({props:{code:"pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer",highlighted:"pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer"}}),Sl=new y({props:{code:`# test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected)`,highlighted:`<span class="hljs-comment"># test_this2.py</span> <span class="hljs-keyword">import</span> pytest <span class="hljs-meta">@pytest.mark.parametrize(<span class="hljs-params"> <span class="hljs-string">&quot;name, input, expected&quot;</span>, [ (<span class="hljs-params"><span class="hljs-string">&quot;negative&quot;</span>, -<span class="hljs-number">1.5</span>, -<span class="hljs-number">2.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;integer&quot;</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;large fraction&quot;</span>, <span class="hljs-number">1.6</span>, <span class="hljs-number">1</span></span>), ], </span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_floor</span>(<span class="hljs-params">name, <span class="hljs-built_in">input</span>, expected</span>): assert_equal(math.floor(<span class="hljs-built_in">input</span>), expected)`}}),Ll=new y({props:{code:"pytest test_this2.py --collect-only -q",highlighted:"pytest test_this2.py --collect-only -q"}}),Ul=new y({props:{code:`test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1]`,highlighted:`test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1]`}}),Nl=new y({props:{code:"pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0]",highlighted:"pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0]"}}),Hl=new E({}),Ml=new y({props:{code:`from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro"`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">PathExampleTest</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_something_involving_local_locations</span>(<span class="hljs-params">self</span>): data_dir = self.tests_dir / <span class="hljs-string">&quot;fixtures/tests_samples/wmt_en_ro&quot;</span>`}}),Wl=new y({props:{code:`from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">PathExampleTest</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_something_involving_stringified_locations</span>(<span class="hljs-params">self</span>): examples_dir = self.examples_dir_str`}}),Bl=new E({}),Yl=new y({props:{code:`from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir()`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">ExamplesTests</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_whatever</span>(<span class="hljs-params">self</span>): tmp_dir = self.get_auto_remove_tmp_dir()`}}),Vl=new y({props:{code:`def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir()`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">test_whatever</span>(<span class="hljs-params">self</span>): tmp_dir = self.get_auto_remove_tmp_dir()`}}),Jl=new y({props:{code:`def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx")`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">test_whatever</span>(<span class="hljs-params">self</span>): tmp_dir = self.get_auto_remove_tmp_dir(<span class="hljs-string">&quot;./xxx&quot;</span>)`}}),Ys=new PE({props:{$$slots:{default:[nS]},$$scope:{ctx:me}}}),Vs=new PE({props:{$$slots:{default:[pS]},$$scope:{ctx:me}}}),Kl=new E({}),Ql=new y({props:{code:`import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa`,highlighted:`<span class="hljs-keyword">import</span> os <span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) <span class="hljs-keyword">with</span> ExtendSysPath(<span class="hljs-string">f&quot;<span class="hljs-subst">{bindir}</span>/..&quot;</span>): <span class="hljs-keyword">from</span> test_trainer <span class="hljs-keyword">import</span> TrainerIntegrationCommon <span class="hljs-comment"># noqa</span>`}}),ea=new E({}),oa=new E({}),ra=new y({props:{code:`@unittest.skip("this bug needs to be fixed") def test_feature_x():`,highlighted:`<span class="hljs-meta">@unittest.skip(<span class="hljs-params"><span class="hljs-string">&quot;this bug needs to be fixed&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():`}}),la=new y({props:{code:'@pytest.mark.skip(reason="this bug needs to be fixed")',highlighted:'<span class="hljs-meta">@pytest.mark.skip(<span class="hljs-params">reason=<span class="hljs-string">&quot;this bug needs to be fixed&quot;</span></span>)</span>'}}),aa=new y({props:{code:`@pytest.mark.xfail def test_feature_x():`,highlighted:`<span class="hljs-meta">@pytest.mark.xfail</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():`}}),ia=new y({props:{code:`def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration")`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>(): <span class="hljs-keyword">if</span> <span class="hljs-keyword">not</span> has_something(): pytest.skip(<span class="hljs-string">&quot;unsupported configuration&quot;</span>)`}}),na=new y({props:{code:`import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True)`,highlighted:`<span class="hljs-keyword">import</span> pytest <span class="hljs-keyword">if</span> <span class="hljs-keyword">not</span> pytest.config.getoption(<span class="hljs-string">&quot;--custom-flag&quot;</span>): pytest.skip(<span class="hljs-string">&quot;--custom-flag is missing, skipping tests&quot;</span>, allow_module_level=<span class="hljs-literal">True</span>)`}}),pa=new y({props:{code:`def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed")`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>(): pytest.xfail(<span class="hljs-string">&quot;expected to fail until bug XYZ is fixed&quot;</span>)`}}),fa=new y({props:{code:'docutils = pytest.importorskip("docutils", minversion="0.3")',highlighted:'docutils = pytest.importorskip(<span class="hljs-string">&quot;docutils&quot;</span>, minversion=<span class="hljs-string">&quot;0.3&quot;</span>)'}}),ha=new y({props:{code:`@pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x():`,highlighted:`<span class="hljs-meta">@pytest.mark.skipif(<span class="hljs-params">sys.version_info &lt; (<span class="hljs-params"><span class="hljs-number">3</span>,<span class="hljs-number">6</span></span>), reason=<span class="hljs-string">&quot;requires python3.6 or higher&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():`}}),ua=new y({props:{code:`@unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x():`,highlighted:`<span class="hljs-meta">@unittest.skipIf(<span class="hljs-params">torch_device == <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;Can&#x27;t do half precision&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():`}}),da=new y({props:{code:`@pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self):`,highlighted:`<span class="hljs-meta">@pytest.mark.skipif(<span class="hljs-params">sys.platform == <span class="hljs-string">&#x27;win32&#x27;</span>, reason=<span class="hljs-string">&quot;does not run on windows&quot;</span></span>)</span> <span class="hljs-keyword">class</span> <span class="hljs-title class_">TestClass</span>(): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>(<span class="hljs-params">self</span>):`}}),ma=new E({}),_a=new y({props:{code:`from transformers.testing_utils import slow @slow def test_integration_foo():`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> slow <span class="hljs-meta">@slow</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_integration_foo</span>():`}}),va=new y({props:{code:"RUN_SLOW=1 pytest tests",highlighted:"RUN_SLOW=1 pytest tests"}}),wa=new y({props:{code:`@parameteriz ed.expand(...) @slow def test_integration_foo():`,highlighted:`<span class="hljs-meta">@parameteriz ed.expand(<span class="hljs-params">...</span>)</span> <span class="hljs-meta">@slow</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_integration_foo</span>():`}}),ya=new y({props:{code:"grep tiny tests examples",highlighted:"grep tiny tests examples"}}),ga=new E({}),ka=new y({props:{code:`import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # consume the captured output streams # optional: if you want to replay the consumed streams: sys.stdout.write(out) sys.stderr.write(err) # test: assert msg in out assert msg in err`,highlighted:`<span class="hljs-keyword">import</span> sys <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_to_stdout</span>(<span class="hljs-params">s</span>): <span class="hljs-built_in">print</span>(s) <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_to_stderr</span>(<span class="hljs-params">s</span>): sys.stderr.write(s) <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_result_and_stdout</span>(<span class="hljs-params">capsys</span>): msg = <span class="hljs-string">&quot;Hello&quot;</span> print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() <span class="hljs-comment"># consume the captured output streams</span> <span class="hljs-comment"># optional: if you want to replay the consumed streams:</span> sys.stdout.write(out) sys.stderr.write(err) <span class="hljs-comment"># test:</span> <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> out <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> err`}}),Ca=new y({props:{code:`def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\\n{error}"`,highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">raise_exception</span>(<span class="hljs-params">msg</span>): <span class="hljs-keyword">raise</span> ValueError(msg) <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_something_exception</span>(): msg = <span class="hljs-string">&quot;Not a good value&quot;</span> error = <span class="hljs-string">&quot;&quot;</span> <span class="hljs-keyword">try</span>: raise_exception(msg) <span class="hljs-keyword">except</span> Exception <span class="hljs-keyword">as</span> e: error = <span class="hljs-built_in">str</span>(e) <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> error, <span class="hljs-string">f&quot;<span class="hljs-subst">{msg}</span> is in the exception:\\n<span class="hljs-subst">{error}</span>&quot;</span>`}}),ja=new y({props:{code:`from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # optional: if you want to replay the consumed streams: sys.stdout.write(out) # test: assert msg in out`,highlighted:`<span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> StringIO <span class="hljs-keyword">from</span> contextlib <span class="hljs-keyword">import</span> redirect_stdout <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_to_stdout</span>(<span class="hljs-params">s</span>): <span class="hljs-built_in">print</span>(s) <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_result_and_stdout</span>(): msg = <span class="hljs-string">&quot;Hello&quot;</span> buffer = StringIO() <span class="hljs-keyword">with</span> redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() <span class="hljs-comment"># optional: if you want to replay the consumed streams:</span> sys.stdout.write(out) <span class="hljs-comment"># test:</span> <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> out`}}),xa=new y({props:{code:`from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out)`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStdout <span class="hljs-keyword">with</span> CaptureStdout() <span class="hljs-keyword">as</span> cs: function_that_writes_to_stdout() <span class="hljs-built_in">print</span>(cs.out)`}}),Pa=new y({props:{code:`from transformers.testing_utils import CaptureStdout msg = "Secret message\\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\\n", f"captured: {cs.out}, expecting {final}"`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStdout msg = <span class="hljs-string">&quot;Secret message\\r&quot;</span> final = <span class="hljs-string">&quot;Hello World&quot;</span> <span class="hljs-keyword">with</span> CaptureStdout() <span class="hljs-keyword">as</span> cs: <span class="hljs-built_in">print</span>(msg + final) <span class="hljs-keyword">assert</span> cs.out == final + <span class="hljs-string">&quot;\\n&quot;</span>, <span class="hljs-string">f&quot;captured: <span class="hljs-subst">{cs.out}</span>, expecting <span class="hljs-subst">{final}</span>&quot;</span>`}}),Oa=new y({props:{code:`from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err)`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStderr <span class="hljs-keyword">with</span> CaptureStderr() <span class="hljs-keyword">as</span> cs: function_that_writes_to_stderr() <span class="hljs-built_in">print</span>(cs.err)`}}),Ia=new y({props:{code:`from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out)`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStd <span class="hljs-keyword">with</span> CaptureStd() <span class="hljs-keyword">as</span> cs: function_that_writes_to_stdout_and_stderr() <span class="hljs-built_in">print</span>(cs.err, cs.out)`}}),Ta=new E({}),Aa=new y({props:{code:`from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\\n"`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> logging <span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureLogger msg = <span class="hljs-string">&quot;Testing 1, 2, 3&quot;</span> logging.set_verbosity_info() logger = logging.get_logger(<span class="hljs-string">&quot;transformers.models.bart.tokenization_bart&quot;</span>) <span class="hljs-keyword">with</span> CaptureLogger(logger) <span class="hljs-keyword">as</span> cl: logger.info(msg) <span class="hljs-keyword">assert</span> cl.out, msg + <span class="hljs-string">&quot;\\n&quot;</span>`}}),Da=new E({}),Sa=new y({props:{code:`from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> mockenv <span class="hljs-keyword">class</span> <span class="hljs-title class_">HfArgumentParserTest</span>(unittest.TestCase): <span class="hljs-meta"> @mockenv(<span class="hljs-params">TRANSFORMERS_VERBOSITY=<span class="hljs-string">&quot;error&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_env_override</span>(<span class="hljs-params">self</span>): env_level_str = os.getenv(<span class="hljs-string">&quot;TRANSFORMERS_VERBOSITY&quot;</span>, <span class="hljs-literal">None</span>)`}}),La=new y({props:{code:`from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # now call the external program, passing \`env\` to it`,highlighted:`<span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">EnvExampleTest</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_external_prog</span>(<span class="hljs-params">self</span>): env = self.get_env() <span class="hljs-comment"># now call the external program, passing \`env\` to it</span>`}}),Ua=new E({}),Na=new y({props:{code:`seed = 42 # python RNG import random random.seed(seed) # pytorch RNGs import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # numpy RNG import numpy as np np.random.seed(seed) # tf RNG tf.random.set_seed(seed)`,highlighted:`seed = <span class="hljs-number">42</span> <span class="hljs-comment"># python RNG</span> <span class="hljs-keyword">import</span> random random.seed(seed) <span class="hljs-comment"># pytorch RNGs</span> <span class="hljs-keyword">import</span> torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = <span class="hljs-literal">True</span> <span class="hljs-keyword">if</span> torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) <span class="hljs-comment"># numpy RNG</span> <span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np np.random.seed(seed) <span class="hljs-comment"># tf RNG</span> tf.random.set_seed(seed)`}}),Ha=new E({}),za=new y({props:{code:"pytest tests/test_logging.py -W error::UserWarning --pdb",highlighted:"pytest tests/test_logging.py -W error::UserWarning --pdb"}}),Ra=new E({}),Ba=new E({}),Ya=new y({props:{code:`- run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures"`,highlighted:`<span class="hljs-bullet">-</span> <span class="hljs-attr">run:</span> <span class="hljs-attr">name:</span> <span class="hljs-string">run</span> <span class="hljs-string">CI</span> <span class="hljs-string">experiment</span> <span class="hljs-attr">command:</span> <span class="hljs-string">| set +euo pipefail echo &quot;setting run-all-despite-any-errors-mode&quot; this_command_will_fail echo &quot;but bash continues to run&quot; # emulate another failure false # but the last command must be a success echo &quot;during experiment do not remove: reporting success to CI, even if there were failures&quot;</span>`}}),Va=new y({props:{code:"cmd_that_may_fail || true",highlighted:'cmd_that_may_fail || <span class="hljs-literal">true</span>'}}),{c(){$=l("meta"),x=f(),g=l("h1"),b=l("a"),A=l("span"),d(j.$$.fragment),z=f(),D=l("span"),O=o("Testing"),k=f(),S=l("p"),R=o("Let\u2019s take a look at how \u{1F917} Transformers models are tested and how you can write new tests and improve the existing ones."),Nt=f(),ye=l("p"),Be=o("There are 2 test suites in the repository:"),q=f(),_e=l("ol"),Ye=l("li"),Ht=l("code"),ti=o("tests"),OE=o(" \u2014 tests for the general API"),IE=f(),si=l("li"),fp=l("code"),TE=o("examples"),AE=o(" \u2014 tests primarily for various applications that aren\u2019t part of the API"),fm=f(),Ve=l("h2"),zt=l("a"),hp=l("span"),d(Lo.$$.fragment),DE=f(),up=l("span"),qE=o("How transformers are tested"),hm=f(),Rt=l("ol"),Uo=l("li"),No=l("p"),SE=o(`Once a PR is submitted it gets tested with 9 CircleCi jobs. Every new commit to that PR gets retested. These jobs are defined in this `),Ho=l("a"),LE=o("config file"),UE=o(`, so that if needed you can reproduce the same environment on your machine.`),NE=f(),zo=l("p"),HE=o("These CI jobs don\u2019t run "),dp=l("code"),zE=o("@slow"),RE=o(" tests."),GE=f(),Ro=l("li"),Go=l("p"),FE=o("There are 3 jobs run by "),Fo=l("a"),ME=o("github actions"),WE=o(":"),BE=f(),Xe=l("ul"),cp=l("li"),oi=l("p"),Mo=l("a"),YE=o("torch hub integration"),VE=o(`: checks whether torch hub integration works.`),XE=f(),mp=l("li"),L=l("p"),Wo=l("a"),JE=o("self-hosted (push)"),ZE=o(`: runs fast tests on GPU only on commits on `),_p=l("code"),KE=o("master"),QE=o(". It only runs if a commit on "),vp=l("code"),eg=o("master"),tg=o(" has updated the code in one of the following folders: "),wp=l("code"),sg=o("src"),og=o(`, `),yp=l("code"),rg=o("tests"),lg=o(", "),$p=l("code"),ag=o(".github"),ig=o(" (to prevent running on added model cards, notebooks, etc.)"),ng=f(),Ep=l("li"),$e=l("p"),Bo=l("a"),pg=o("self-hosted runner"),fg=o(`: runs normal and slow tests on GPU in `),gp=l("code"),hg=o("tests"),ug=o(" and "),bp=l("code"),dg=o("examples"),cg=o(":"),um=f(),d(Yo.$$.fragment),dm=f(),Gt=l("p"),mg=o("The results can be observed "),Vo=l("a"),_g=o("here"),vg=o("."),cm=f(),Je=l("h2"),Ft=l("a"),kp=l("span"),d(Xo.$$.fragment),wg=f(),Cp=l("span"),yg=o("Running tests"),mm=f(),Ze=l("h3"),Mt=l("a"),jp=l("span"),d(Jo.$$.fragment),$g=f(),xp=l("span"),Eg=o("Choosing which tests to run"),_m=f(),Wt=l("p"),gg=o(`This document goes into many details of how tests can be run. If after reading everything, you need even more details you will find them `),Zo=l("a"),bg=o("here"),kg=o("."),vm=f(),ri=l("p"),Cg=o("Here are some most useful ways of running tests."),wm=f(),li=l("p"),jg=o("Run all:"),ym=f(),d(Ko.$$.fragment),$m=f(),ai=l("p"),xg=o("or:"),Em=f(),d(Qo.$$.fragment),gm=f(),ii=l("p"),Pg=o("Note that the latter is defined as:"),bm=f(),d(er.$$.fragment),km=f(),ni=l("p"),Og=o("which tells pytest to:"),Cm=f(),Q=l("ul"),Pp=l("li"),Ig=o("run as many test processes as they are CPU cores (which could be too many if you don\u2019t have a ton of RAM!)"),Tg=f(),Op=l("li"),Ag=o("ensure that all tests from the same file will be run by the same test process"),Dg=f(),Ip=l("li"),qg=o("do not capture output"),Sg=f(),Tp=l("li"),Lg=o("run in verbose mode"),jm=f(),Ke=l("h3"),Bt=l("a"),Ap=l("span"),d(tr.$$.fragment),Ug=f(),Dp=l("span"),Ng=o("Getting the list of all tests"),xm=f(),pi=l("p"),Hg=o("All tests of the test suite:"),Pm=f(),d(sr.$$.fragment),Om=f(),fi=l("p"),zg=o("All tests of a given test file:"),Im=f(),d(or.$$.fragment),Tm=f(),Qe=l("h3"),Yt=l("a"),qp=l("span"),d(rr.$$.fragment),Rg=f(),Sp=l("span"),Gg=o("Run a specific test module"),Am=f(),hi=l("p"),Fg=o("To run an individual test module:"),Dm=f(),d(lr.$$.fragment),qm=f(),et=l("h3"),Vt=l("a"),Lp=l("span"),d(ar.$$.fragment),Mg=f(),Up=l("span"),Wg=o("Run specific tests"),Sm=f(),ui=l("p"),Bg=o(`Since unittest is used inside most of the tests, to run specific subtests you need to know the name of the unittest class containing those tests. For example, it could be:`),Lm=f(),d(ir.$$.fragment),Um=f(),di=l("p"),Yg=o("Here:"),Nm=f(),Ee=l("ul"),ci=l("li"),Np=l("code"),Vg=o("tests/test_optimization.py"),Xg=o(" - the file with tests"),Jg=f(),mi=l("li"),Hp=l("code"),Zg=o("OptimizationTest"),Kg=o(" - the name of the class"),Qg=f(),_i=l("li"),zp=l("code"),eb=o("test_adam_w"),tb=o(" - the name of the specific test function"),Hm=f(),vi=l("p"),sb=o("If the file contains multiple classes, you can choose to run only tests of a given class. For example:"),zm=f(),d(nr.$$.fragment),Rm=f(),wi=l("p"),ob=o("will run all the tests inside that class."),Gm=f(),Xt=l("p"),rb=o("As mentioned earlier you can see what tests are contained inside the "),Rp=l("code"),lb=o("OptimizationTest"),ab=o(" class by running:"),Fm=f(),d(pr.$$.fragment),Mm=f(),yi=l("p"),ib=o("You can run tests by keyword expressions."),Wm=f(),Jt=l("p"),nb=o("To run only tests whose name contains "),Gp=l("code"),pb=o("adam"),fb=o(":"),Bm=f(),d(fr.$$.fragment),Ym=f(),ee=l("p"),hb=o("Logical "),Fp=l("code"),ub=o("and"),db=o(" and "),Mp=l("code"),cb=o("or"),mb=o(" can be used to indicate whether all keywords should match or either. "),Wp=l("code"),_b=o("not"),vb=o(` can be used to negate.`),Vm=f(),Zt=l("p"),wb=o("To run all tests except those whose name contains "),Bp=l("code"),yb=o("adam"),$b=o(":"),Xm=f(),d(hr.$$.fragment),Jm=f(),$i=l("p"),Eb=o("And you can combine the two patterns in one:"),Zm=f(),d(ur.$$.fragment),Km=f(),ge=l("p"),gb=o("For example to run both "),Yp=l("code"),bb=o("test_adafactor"),kb=o(" and "),Vp=l("code"),Cb=o("test_adam_w"),jb=o(" you can use:"),Qm=f(),d(dr.$$.fragment),e_=f(),Kt=l("p"),xb=o("Note that we use "),Xp=l("code"),Pb=o("or"),Ob=o(" here, since we want either of the keywords to match to include both."),t_=f(),Qt=l("p"),Ib=o("If you want to include only tests that include both patterns, "),Jp=l("code"),Tb=o("and"),Ab=o(" is to be used:"),s_=f(),d(cr.$$.fragment),o_=f(),tt=l("h3"),es=l("a"),Zp=l("span"),d(mr.$$.fragment),Db=f(),Kp=l("span"),qb=o("Run only modified tests"),r_=f(),ts=l("p"),Sb=o("You can run the tests related to the unstaged files or the current branch (according to Git) by using "),_r=l("a"),Lb=o("pytest-picked"),Ub=o(`. This is a great way of quickly testing your changes didn\u2019t break anything, since it won\u2019t run the tests related to files you didn\u2019t touch.`),l_=f(),d(vr.$$.fragment),a_=f(),d(wr.$$.fragment),i_=f(),Ei=l("p"),Nb=o("All tests will be run from files and folders which are modified, but not yet committed."),n_=f(),st=l("h3"),ss=l("a"),Qp=l("span"),d(yr.$$.fragment),Hb=f(),ef=l("span"),zb=o("Automatically rerun failed tests on source modification"),p_=f(),$r=l("p"),Er=l("a"),Rb=o("pytest-xdist"),Gb=o(` provides a very useful feature of detecting all failed tests, and then waiting for you to modify files and continuously re-rerun those failing tests until they pass while you fix them. So that you don\u2019t need to re start pytest after you made the fix. This is repeated until all tests pass after which again a full run is performed.`),f_=f(),d(gr.$$.fragment),h_=f(),ot=l("p"),Fb=o("To enter the mode: "),tf=l("code"),Mb=o("pytest -f"),Wb=o(" or "),sf=l("code"),Bb=o("pytest --looponfail"),u_=f(),be=l("p"),Yb=o("File changes are detected by looking at "),of=l("code"),Vb=o("looponfailroots"),Xb=o(` root directories and all of their contents (recursively). If the default for this value does not work for you, you can change it in your project by setting a configuration option in `),rf=l("code"),Jb=o("setup.cfg"),Zb=o(":"),d_=f(),d(br.$$.fragment),c_=f(),ke=l("p"),Kb=o("or "),lf=l("code"),Qb=o("pytest.ini"),e3=o("/"),af=l("code"),t3=o("tox.ini"),s3=o(" files:"),m_=f(),d(kr.$$.fragment),__=f(),gi=l("p"),o3=o(`This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file\u2019s directory.`),v_=f(),Cr=l("p"),jr=l("a"),r3=o("pytest-watch"),l3=o(" is an alternative implementation of this functionality."),w_=f(),rt=l("h3"),os=l("a"),nf=l("span"),d(xr.$$.fragment),a3=f(),pf=l("span"),i3=o("Skip a test module"),y_=f(),rs=l("p"),n3=o(`If you want to run all test modules, except a few you can exclude them by giving an explicit list of tests to run. For example, to run all except `),ff=l("code"),p3=o("test_modeling_*.py"),f3=o(" tests:"),$_=f(),d(Pr.$$.fragment),E_=f(),lt=l("h3"),ls=l("a"),hf=l("span"),d(Or.$$.fragment),h3=f(),uf=l("span"),u3=o("Clearing state"),g_=f(),bi=l("p"),d3=o("CI builds and when isolation is important (against speed), cache should be cleared:"),b_=f(),d(Ir.$$.fragment),k_=f(),at=l("h3"),as=l("a"),df=l("span"),d(Tr.$$.fragment),c3=f(),cf=l("span"),m3=o("Running tests in parallel"),C_=f(),M=l("p"),_3=o("As mentioned earlier "),mf=l("code"),v3=o("make test"),w3=o(" runs tests in parallel via "),_f=l("code"),y3=o("pytest-xdist"),$3=o(" plugin ("),vf=l("code"),E3=o("-n X"),g3=o(" argument, e.g. "),wf=l("code"),b3=o("-n 2"),k3=o(` to run 2 parallel jobs).`),j_=f(),ve=l("p"),yf=l("code"),C3=o("pytest-xdist"),j3=o("\u2019s "),$f=l("code"),x3=o("--dist="),P3=o(" option allows one to control how the tests are grouped. "),Ef=l("code"),O3=o("--dist=loadfile"),I3=o(` puts the tests located in one file onto the same process.`),x_=f(),Ce=l("p"),T3=o("Since the order of executed tests is different and unpredictable, if running the test suite with "),gf=l("code"),A3=o("pytest-xdist"),D3=o(` produces failures (meaning we have some undetected coupled tests), use `),Ar=l("a"),q3=o("pytest-replay"),S3=o(` to replay the tests in the same order, which should help with then somehow reducing that failing sequence to a minimum.`),P_=f(),it=l("h3"),is=l("a"),bf=l("span"),d(Dr.$$.fragment),L3=f(),kf=l("span"),U3=o("Test order and repetition"),O_=f(),ki=l("p"),N3=o(`It\u2019s good to repeat the tests several times, in sequence, randomly, or in sets, to detect any potential inter-dependency and state-related bugs (tear down). And the straightforward multiple repetition is just good to detect some problems that get uncovered by randomness of DL.`),I_=f(),nt=l("h4"),ns=l("a"),Cf=l("span"),d(qr.$$.fragment),H3=f(),jf=l("span"),z3=o("Repeat tests"),T_=f(),Ci=l("ul"),ji=l("li"),Sr=l("a"),R3=o("pytest-flakefinder"),G3=o(":"),A_=f(),d(Lr.$$.fragment),D_=f(),xi=l("p"),F3=o("And then run every test multiple times (50 by default):"),q_=f(),d(Ur.$$.fragment),S_=f(),d(ps.$$.fragment),L_=f(),d(fs.$$.fragment),U_=f(),pt=l("h4"),hs=l("a"),xf=l("span"),d(Nr.$$.fragment),M3=f(),Pf=l("span"),W3=o("Run tests in a random order"),N_=f(),d(Hr.$$.fragment),H_=f(),us=l("p"),B3=o("Important: the presence of "),Of=l("code"),Y3=o("pytest-random-order"),V3=o(` will automatically randomize tests, no configuration change or command line options is required.`),z_=f(),ds=l("p"),X3=o(`As explained earlier this allows detection of coupled tests - where one test\u2019s state affects the state of another. When `),If=l("code"),J3=o("pytest-random-order"),Z3=o(" is installed it will print the random seed it used for that session, e.g:"),R_=f(),d(zr.$$.fragment),G_=f(),Pi=l("p"),K3=o("So that if the given particular sequence fails, you can reproduce it by adding that exact seed, e.g.:"),F_=f(),d(Rr.$$.fragment),M_=f(),cs=l("p"),Q3=o(`It will only reproduce the exact order if you use the exact same list of tests (or no list at all). Once you start to manually narrowing down the list you can no longer rely on the seed, but have to list them manually in the exact order they failed and tell pytest to not randomize them instead using `),Tf=l("code"),e2=o("--random-order-bucket=none"),t2=o(", e.g.:"),W_=f(),d(Gr.$$.fragment),B_=f(),Oi=l("p"),s2=o("To disable the shuffling for all tests:"),Y_=f(),d(Fr.$$.fragment),V_=f(),I=l("p"),o2=o("By default "),Af=l("code"),r2=o("--random-order-bucket=module"),l2=o(` is implied, which will shuffle the files on the module levels. It can also shuffle on `),Df=l("code"),a2=o("class"),i2=o(", "),qf=l("code"),n2=o("package"),p2=o(", "),Sf=l("code"),f2=o("global"),h2=o(" and "),Lf=l("code"),u2=o("none"),d2=o(` levels. For the complete details please see its `),Mr=l("a"),c2=o("documentation"),m2=o("."),X_=f(),je=l("p"),_2=o("Another randomization alternative is: "),Wr=l("a"),Uf=l("code"),v2=o("pytest-randomly"),w2=o(`. This module has a very similar functionality/interface, but it doesn\u2019t have the bucket modes available in `),Nf=l("code"),y2=o("pytest-random-order"),$2=o(". It has the same problem of imposing itself once installed."),J_=f(),ft=l("h3"),ms=l("a"),Hf=l("span"),d(Br.$$.fragment),E2=f(),zf=l("span"),g2=o("Look and feel variations"),Z_=f(),ht=l("h4"),_s=l("a"),Rf=l("span"),d(Yr.$$.fragment),b2=f(),Gf=l("span"),k2=o("pytest-sugar"),K_=f(),Vr=l("p"),Xr=l("a"),C2=o("pytest-sugar"),j2=o(` is a plugin that improves the look-n-feel, adds a progressbar, and show tests that fail and the assert instantly. It gets activated automatically upon installation.`),Q_=f(),d(Jr.$$.fragment),ev=f(),Ii=l("p"),x2=o("To run tests without it, run:"),tv=f(),d(Zr.$$.fragment),sv=f(),Ti=l("p"),P2=o("or uninstall it."),ov=f(),ut=l("h4"),vs=l("a"),Ff=l("span"),d(Kr.$$.fragment),O2=f(),Mf=l("span"),I2=o("Report each sub-test name and its progress"),rv=f(),xe=l("p"),T2=o("For a single or a group of tests via "),Wf=l("code"),A2=o("pytest"),D2=o(" (after "),Bf=l("code"),q2=o("pip install pytest-pspec"),S2=o("):"),lv=f(),d(Qr.$$.fragment),av=f(),dt=l("h4"),ws=l("a"),Yf=l("span"),d(el.$$.fragment),L2=f(),Vf=l("span"),U2=o("Instantly shows failed tests"),iv=f(),tl=l("p"),sl=l("a"),N2=o("pytest-instafail"),H2=o(` shows failures and errors instantly instead of waiting until the end of test session.`),nv=f(),d(ol.$$.fragment),pv=f(),d(rl.$$.fragment),fv=f(),ct=l("h3"),ys=l("a"),Xf=l("span"),d(ll.$$.fragment),z2=f(),Jf=l("span"),R2=o("To GPU or not to GPU"),hv=f(),$s=l("p"),G2=o("On a GPU-enabled setup, to test in CPU-only mode add "),Zf=l("code"),F2=o('CUDA_VISIBLE_DEVICES=""'),M2=o(":"),uv=f(),d(al.$$.fragment),dv=f(),te=l("p"),W2=o("or if you have multiple gpus, you can specify which one is to be used by "),Kf=l("code"),B2=o("pytest"),Y2=o(`. For example, to use only the second gpu if you have gpus `),Qf=l("code"),V2=o("0"),X2=o(" and "),eh=l("code"),J2=o("1"),Z2=o(", you can run:"),cv=f(),d(il.$$.fragment),mv=f(),Ai=l("p"),K2=o("This is handy when you want to run different tasks on different GPUs."),_v=f(),Di=l("p"),Q2=o(`Some tests must be run on CPU-only, others on either CPU or GPU or TPU, yet others on multiple-GPUs. The following skip decorators are used to set the requirements of tests CPU/GPU/TPU-wise:`),vv=f(),U=l("ul"),qi=l("li"),th=l("code"),e5=o("require_torch"),t5=o(" - this test will run only under torch"),s5=f(),Es=l("li"),sh=l("code"),o5=o("require_torch_gpu"),r5=o(" - as "),oh=l("code"),l5=o("require_torch"),a5=o(" plus requires at least 1 GPU"),i5=f(),gs=l("li"),rh=l("code"),n5=o("require_torch_multi_gpu"),p5=o(" - as "),lh=l("code"),f5=o("require_torch"),h5=o(" plus requires at least 2 GPUs"),u5=f(),bs=l("li"),ah=l("code"),d5=o("require_torch_non_multi_gpu"),c5=o(" - as "),ih=l("code"),m5=o("require_torch"),_5=o(" plus requires 0 or 1 GPUs"),v5=f(),ks=l("li"),nh=l("code"),w5=o("require_torch_up_to_2_gpus"),y5=o(" - as "),ph=l("code"),$5=o("require_torch"),E5=o(" plus requires 0 or 1 or 2 GPUs"),g5=f(),Cs=l("li"),fh=l("code"),b5=o("require_torch_tpu"),k5=o(" - as "),hh=l("code"),C5=o("require_torch"),j5=o(" plus requires at least 1 TPU"),wv=f(),Si=l("p"),x5=o("Let\u2019s depict the GPU requirements in the following table:"),yv=f(),C=l("p"),P5=o(`| n gpus | decorator | |--------+--------------------------------| | `),uh=l("code"),O5=o(">= 0"),I5=o(" | "),dh=l("code"),T5=o("@require_torch"),A5=o(` | | `),ch=l("code"),D5=o(">= 1"),q5=o(" | "),mh=l("code"),S5=o("@require_torch_gpu"),L5=o(` | | `),_h=l("code"),U5=o(">= 2"),N5=o(" | "),vh=l("code"),H5=o("@require_torch_multi_gpu"),z5=o(` | | `),wh=l("code"),R5=o("< 2"),G5=o(" | "),yh=l("code"),F5=o("@require_torch_non_multi_gpu"),M5=o(` | | `),$h=l("code"),W5=o("< 3"),B5=o(" | "),Eh=l("code"),Y5=o("@require_torch_up_to_2_gpus"),V5=o(" |"),$v=f(),Li=l("p"),X5=o("For example, here is a test that must be run only when there are 2 or more GPUs available and pytorch is installed:"),Ev=f(),d(nl.$$.fragment),gv=f(),Pe=l("p"),J5=o("If a test requires "),gh=l("code"),Z5=o("tensorflow"),K5=o(" use the "),bh=l("code"),Q5=o("require_tf"),ek=o(" decorator. For example:"),bv=f(),d(pl.$$.fragment),kv=f(),Ui=l("p"),tk=o(`These decorators can be stacked. For example, if a test is slow and requires at least one GPU under pytorch, here is how to set it up:`),Cv=f(),d(fl.$$.fragment),jv=f(),Oe=l("p"),sk=o("Some decorators like "),kh=l("code"),ok=o("@parametrized"),rk=o(" rewrite test names, therefore "),Ch=l("code"),lk=o("@require_*"),ak=o(` skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage:`),xv=f(),d(hl.$$.fragment),Pv=f(),js=l("p"),ik=o("This order problem doesn\u2019t exist with "),jh=l("code"),nk=o("@pytest.mark.parametrize"),pk=o(`, you can put it first or last and it will still work. But it only works with non-unittests.`),Ov=f(),Ni=l("p"),fk=o("Inside tests:"),Iv=f(),Hi=l("ul"),xh=l("li"),hk=o("How many GPUs are available:"),Tv=f(),d(ul.$$.fragment),Av=f(),mt=l("h3"),xs=l("a"),Ph=l("span"),d(dl.$$.fragment),uk=f(),Oh=l("span"),dk=o("Distributed training"),Dv=f(),_t=l("p"),Ih=l("code"),ck=o("pytest"),mk=o(` can\u2019t deal with distributed training directly. If this is attempted - the sub-processes don\u2019t do the right thing and end up thinking they are `),Th=l("code"),_k=o("pytest"),vk=o(` and start running the test suite in loops. It works, however, if one spawns a normal process that then spawns off multiple workers and manages the IO pipes.`),qv=f(),zi=l("p"),wk=o("Here are some tests that use it:"),Sv=f(),Ps=l("ul"),Ah=l("li"),cl=l("a"),yk=o("test_trainer_distributed.py"),$k=f(),Dh=l("li"),ml=l("a"),Ek=o("test_deepspeed.py"),Lv=f(),Os=l("p"),gk=o("To jump right into the execution point, search for the "),qh=l("code"),bk=o("execute_subprocess_async"),kk=o(" call in those tests."),Uv=f(),Ri=l("p"),Ck=o("You will need at least 2 GPUs to see these tests in action:"),Nv=f(),d(_l.$$.fragment),Hv=f(),vt=l("h3"),Is=l("a"),Sh=l("span"),d(vl.$$.fragment),jk=f(),Lh=l("span"),xk=o("Output capture"),zv=f(),Ie=l("p"),Pk=o("During test execution any output sent to "),Uh=l("code"),Ok=o("stdout"),Ik=o(" and "),Nh=l("code"),Tk=o("stderr"),Ak=o(` is captured. If a test or a setup method fails, its according captured output will usually be shown along with the failure traceback.`),Rv=f(),W=l("p"),Dk=o("To disable output capturing and to get the "),Hh=l("code"),qk=o("stdout"),Sk=o(" and "),zh=l("code"),Lk=o("stderr"),Uk=o(" normally, use "),Rh=l("code"),Nk=o("-s"),Hk=o(" or "),Gh=l("code"),zk=o("--capture=no"),Rk=o(":"),Gv=f(),d(wl.$$.fragment),Fv=f(),Gi=l("p"),Gk=o("To send test results to JUnit format output:"),Mv=f(),d(yl.$$.fragment),Wv=f(),wt=l("h3"),Ts=l("a"),Fh=l("span"),d($l.$$.fragment),Fk=f(),Mh=l("span"),Mk=o("Color control"),Bv=f(),Fi=l("p"),Wk=o("To have no color (e.g., yellow on white background is not readable):"),Yv=f(),d(El.$$.fragment),Vv=f(),yt=l("h3"),As=l("a"),Wh=l("span"),d(gl.$$.fragment),Bk=f(),Bh=l("span"),Yk=o("Sending test report to online pastebin service"),Xv=f(),Mi=l("p"),Vk=o("Creating a URL for each test failure:"),Jv=f(),d(bl.$$.fragment),Zv=f(),Wi=l("p"),Xk=o(`This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example -x if you only want to send one particular failure.`),Kv=f(),Bi=l("p"),Jk=o("Creating a URL for a whole test session log:"),Qv=f(),d(kl.$$.fragment),e1=f(),$t=l("h2"),Ds=l("a"),Yh=l("span"),d(Cl.$$.fragment),Zk=f(),Vh=l("span"),Kk=o("Writing tests"),t1=f(),Te=l("p"),Qk=o("\u{1F917} transformers tests are based on "),Xh=l("code"),e4=o("unittest"),t4=o(", but run by "),Jh=l("code"),s4=o("pytest"),o4=o(`, so most of the time features from both systems can be used.`),s1=f(),se=l("p"),r4=o("You can read "),jl=l("a"),l4=o("here"),a4=o(` which features are supported, but the important thing to remember is that most `),Zh=l("code"),i4=o("pytest"),n4=o(` fixtures don\u2019t work. Neither parametrization, but we use the module `),Kh=l("code"),p4=o("parameterized"),f4=o(" that works in a similar way."),o1=f(),Et=l("h3"),qs=l("a"),Qh=l("span"),d(xl.$$.fragment),h4=f(),eu=l("span"),u4=o("Parametrization"),r1=f(),Yi=l("p"),d4=o(`Often, there is a need to run the same test multiple times, but with different arguments. It could be done from within the test, but then there is no way of running that test for just one set of arguments.`),l1=f(),d(Pl.$$.fragment),a1=f(),Ss=l("p"),c4=o("Now, by default this test will be run 3 times, each time with the last 3 arguments of "),tu=l("code"),m4=o("test_floor"),_4=o(` being assigned the corresponding arguments in the parameter list.`),i1=f(),Ae=l("p"),v4=o("and you could run just the "),su=l("code"),w4=o("negative"),y4=o(" and "),ou=l("code"),$4=o("integer"),E4=o(" sets of params with:"),n1=f(),d(Ol.$$.fragment),p1=f(),Ls=l("p"),g4=o("or all but "),ru=l("code"),b4=o("negative"),k4=o(" sub-tests, with:"),f1=f(),d(Il.$$.fragment),h1=f(),Us=l("p"),C4=o("Besides using the "),lu=l("code"),j4=o("-k"),x4=o(` filter that was just mentioned, you can find out the exact name of each sub-test and run any or all of them using their exact names.`),u1=f(),d(Tl.$$.fragment),d1=f(),Vi=l("p"),P4=o("and it will list:"),c1=f(),d(Al.$$.fragment),m1=f(),Xi=l("p"),O4=o("So now you can run just 2 specific sub-tests:"),_1=f(),d(Dl.$$.fragment),v1=f(),B=l("p"),I4=o("The module "),ql=l("a"),T4=o("parameterized"),A4=o(` which is already in the developer dependencies of `),au=l("code"),D4=o("transformers"),q4=o(" works for both: "),iu=l("code"),S4=o("unittests"),L4=o(" and "),nu=l("code"),U4=o("pytest"),N4=o(" tests."),w1=f(),oe=l("p"),H4=o("If, however, the test is not a "),pu=l("code"),z4=o("unittest"),R4=o(", you may use "),fu=l("code"),G4=o("pytest.mark.parametrize"),F4=o(` (or you may see it being used in some existing tests, mostly under `),hu=l("code"),M4=o("examples"),W4=o(")."),y1=f(),De=l("p"),B4=o("Here is the same example, this time using "),uu=l("code"),Y4=o("pytest"),V4=o("\u2019s "),du=l("code"),X4=o("parametrize"),J4=o(" marker:"),$1=f(),d(Sl.$$.fragment),E1=f(),re=l("p"),Z4=o("Same as with "),cu=l("code"),K4=o("parameterized"),Q4=o(", with "),mu=l("code"),e0=o("pytest.mark.parametrize"),t0=o(` you can have a fine control over which sub-tests are run, if the `),_u=l("code"),s0=o("-k"),o0=o(` filter doesn\u2019t do the job. Except, this parametrization function creates a slightly different set of names for the sub-tests. Here is what they look like:`),g1=f(),d(Ll.$$.fragment),b1=f(),Ji=l("p"),r0=o("and it will list:"),k1=f(),d(Ul.$$.fragment),C1=f(),Zi=l("p"),l0=o("So now you can run just the specific test:"),j1=f(),d(Nl.$$.fragment),x1=f(),Ki=l("p"),a0=o("as in the previous example."),P1=f(),gt=l("h3"),Ns=l("a"),vu=l("span"),d(Hl.$$.fragment),i0=f(),wu=l("span"),n0=o("Files and directories"),O1=f(),Hs=l("p"),p0=o(`In tests often we need to know where things are relative to the current test file, and it\u2019s not trivial since the test could be invoked from more than one directory or could reside in sub-directories with different depths. A helper class `),yu=l("code"),f0=o("transformers.test_utils.TestCasePlus"),h0=o(` solves this problem by sorting out all the basic paths and provides easy accessors to them:`),I1=f(),zs=l("ul"),zl=l("li"),Qi=l("p"),$u=l("code"),u0=o("pathlib"),d0=o(" objects (all fully resolved):"),c0=f(),G=l("ul"),Rl=l("li"),Eu=l("code"),m0=o("test_file_path"),_0=o(" - the current test file path, i.e. "),gu=l("code"),v0=o("__file__"),w0=f(),en=l("li"),bu=l("code"),y0=o("test_file_dir"),$0=o(" - the directory containing the current test file"),E0=f(),Rs=l("li"),ku=l("code"),g0=o("tests_dir"),b0=o(" - the directory of the "),Cu=l("code"),k0=o("tests"),C0=o(" test suite"),j0=f(),Gs=l("li"),ju=l("code"),x0=o("examples_dir"),P0=o(" - the directory of the "),xu=l("code"),O0=o("examples"),I0=o(" test suite"),T0=f(),tn=l("li"),Pu=l("code"),A0=o("repo_root_dir"),D0=o(" - the directory of the repository"),q0=f(),qe=l("li"),Ou=l("code"),S0=o("src_dir"),L0=o(" - the directory of "),Iu=l("code"),U0=o("src"),N0=o(" (i.e. where the "),Tu=l("code"),H0=o("transformers"),z0=o(" sub-dir resides)"),R0=f(),Gl=l("li"),Fl=l("p"),G0=o("stringified paths---same as above but these return paths as strings, rather than "),Au=l("code"),F0=o("pathlib"),M0=o(" objects:"),W0=f(),F=l("ul"),Du=l("li"),qu=l("code"),B0=o("test_file_path_str"),Y0=f(),Su=l("li"),Lu=l("code"),V0=o("test_file_dir_str"),X0=f(),Uu=l("li"),Nu=l("code"),J0=o("tests_dir_str"),Z0=f(),Hu=l("li"),zu=l("code"),K0=o("examples_dir_str"),Q0=f(),Ru=l("li"),Gu=l("code"),e6=o("repo_root_dir_str"),t6=f(),Fu=l("li"),Mu=l("code"),s6=o("src_dir_str"),T1=f(),Fs=l("p"),o6=o(`To start using those all you need is to make sure that the test resides in a subclass of `),Wu=l("code"),r6=o("transformers.test_utils.TestCasePlus"),l6=o(". For example:"),A1=f(),d(Ml.$$.fragment),D1=f(),Y=l("p"),a6=o("If you don\u2019t need to manipulate paths via "),Bu=l("code"),i6=o("pathlib"),n6=o(` or you just need a path as a string, you can always invoked `),Yu=l("code"),p6=o("str()"),f6=o(" on the "),Vu=l("code"),h6=o("pathlib"),u6=o(" object or use the accessors ending with "),Xu=l("code"),d6=o("_str"),c6=o(". For example:"),q1=f(),d(Wl.$$.fragment),S1=f(),bt=l("h3"),Ms=l("a"),Ju=l("span"),d(Bl.$$.fragment),m6=f(),Zu=l("span"),_6=o("Temporary files and directories"),L1=f(),Ws=l("p"),v6=o(`Using unique temporary files and directories are essential for parallel test running, so that the tests won\u2019t overwrite each other\u2019s data. Also we want to get the temporary files and directories removed at the end of each test that created them. Therefore, using packages like `),Ku=l("code"),w6=o("tempfile"),y6=o(", which address these needs is essential."),U1=f(),sn=l("p"),$6=o(`However, when debugging tests, you need to be able to see what goes into the temporary file or directory and you want to know it\u2019s exact path and not having it randomized on every test re-run.`),N1=f(),Se=l("p"),E6=o("A helper class "),Qu=l("code"),g6=o("transformers.test_utils.TestCasePlus"),b6=o(` is best used for such purposes. It\u2019s a sub-class of `),ed=l("code"),k6=o("unittest.TestCase"),C6=o(", so we can easily inherit from it in the test modules."),H1=f(),on=l("p"),j6=o("Here is an example of its usage:"),z1=f(),d(Yl.$$.fragment),R1=f(),Bs=l("p"),x6=o("This code creates a unique temporary directory, and sets "),td=l("code"),P6=o("tmp_dir"),O6=o(" to its location."),G1=f(),rn=l("ul"),sd=l("li"),I6=o("Create a unique temporary dir:"),F1=f(),d(Vl.$$.fragment),M1=f(),Xl=l("p"),od=l("code"),T6=o("tmp_dir"),A6=o(` will contain the path to the created temporary dir. It will be automatically removed at the end of the test.`),W1=f(),ln=l("ul"),rd=l("li"),D6=o("Create a temporary dir of my choice, ensure it\u2019s empty before the test starts and don\u2019t empty it after the test."),B1=f(),d(Jl.$$.fragment),Y1=f(),an=l("p"),q6=o(`This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests didn\u2019t leave any data in there.`),V1=f(),nn=l("ul"),Zl=l("li"),kt=l("p"),S6=o("You can override the default behavior by directly overriding the "),ld=l("code"),L6=o("before"),U6=o(" and "),ad=l("code"),N6=o("after"),H6=o(` args, leading to one of the following behaviors:`),z6=f(),we=l("ul"),pn=l("li"),id=l("code"),R6=o("before=True"),G6=o(": the temporary dir will always be cleared at the beginning of the test."),F6=f(),fn=l("li"),nd=l("code"),M6=o("before=False"),W6=o(": if the temporary dir already existed, any existing files will remain there."),B6=f(),hn=l("li"),pd=l("code"),Y6=o("after=True"),V6=o(": the temporary dir will always be deleted at the end of the test."),X6=f(),un=l("li"),fd=l("code"),J6=o("after=False"),Z6=o(": the temporary dir will always be left intact at the end of the test."),X1=f(),d(Ys.$$.fragment),J1=f(),d(Vs.$$.fragment),Z1=f(),Ct=l("h3"),Xs=l("a"),hd=l("span"),d(Kl.$$.fragment),K6=f(),ud=l("span"),Q6=o("Temporary sys.path override"),K1=f(),Le=l("p"),e7=o("If you need to temporary override "),dd=l("code"),t7=o("sys.path"),s7=o(` to import from another test for example, you can use the `),cd=l("code"),o7=o("ExtendSysPath"),r7=o(" context manager. Example:"),Q1=f(),d(Ql.$$.fragment),ew=f(),jt=l("h3"),Js=l("a"),md=l("span"),d(ea.$$.fragment),l7=f(),_d=l("span"),a7=o("Skipping tests"),tw=f(),Zs=l("p"),i7=o(`This is useful when a bug is found and a new test is written, yet the bug is not fixed yet. In order to be able to commit it to the main repository we need make sure it\u2019s skipped during `),vd=l("code"),n7=o("make test"),p7=o("."),sw=f(),dn=l("p"),f7=o("Methods:"),ow=f(),Ks=l("ul"),wd=l("li"),ta=l("p"),h7=o("A "),yd=l("strong"),u7=o("skip"),d7=o(` means that you expect your test to pass only if some conditions are met, otherwise pytest should skip running the test altogether. Common examples are skipping windows-only tests on non-windows platforms, or skipping tests that depend on an external resource which is not available at the moment (for example a database).`),c7=f(),$d=l("li"),sa=l("p"),m7=o("A "),Ed=l("strong"),_7=o("xfail"),v7=o(` means that you expect a test to fail for some reason. A common example is a test for a feature not yet implemented, or a bug not yet fixed. When a test passes despite being expected to fail (marked with pytest.mark.xfail), it\u2019s an xpass and will be reported in the test summary.`),rw=f(),le=l("p"),w7=o("One of the important differences between the two is that "),gd=l("code"),y7=o("skip"),$7=o(" doesn\u2019t run the test, and "),bd=l("code"),E7=o("xfail"),g7=o(` does. So if the code that\u2019s buggy causes some bad state that will affect other tests, do not use `),kd=l("code"),b7=o("xfail"),k7=o("."),lw=f(),xt=l("h4"),Qs=l("a"),Cd=l("span"),d(oa.$$.fragment),C7=f(),jd=l("span"),j7=o("Implementation"),aw=f(),cn=l("ul"),xd=l("li"),x7=o("Here is how to skip whole test unconditionally:"),iw=f(),d(ra.$$.fragment),nw=f(),mn=l("p"),P7=o("or via pytest:"),pw=f(),d(la.$$.fragment),fw=f(),eo=l("p"),O7=o("or the "),Pd=l("code"),I7=o("xfail"),T7=o(" way:"),hw=f(),d(aa.$$.fragment),uw=f(),_n=l("ul"),Od=l("li"),A7=o("Here is how to skip a test based on some internal check inside the test:"),dw=f(),d(ia.$$.fragment),cw=f(),vn=l("p"),D7=o("or the whole module:"),mw=f(),d(na.$$.fragment),_w=f(),to=l("p"),q7=o("or the "),Id=l("code"),S7=o("xfail"),L7=o(" way:"),vw=f(),d(pa.$$.fragment),ww=f(),wn=l("ul"),Td=l("li"),U7=o("Here is how to skip all tests in a module if some import is missing:"),yw=f(),d(fa.$$.fragment),$w=f(),yn=l("ul"),Ad=l("li"),N7=o("Skip a test based on a condition:"),Ew=f(),d(ha.$$.fragment),gw=f(),$n=l("p"),H7=o("or:"),bw=f(),d(ua.$$.fragment),kw=f(),En=l("p"),z7=o("or skip the whole module:"),Cw=f(),d(da.$$.fragment),jw=f(),so=l("p"),R7=o("More details, example and ways are "),ca=l("a"),G7=o("here"),F7=o("."),xw=f(),Pt=l("h3"),oo=l("a"),Dd=l("span"),d(ma.$$.fragment),M7=f(),qd=l("span"),W7=o("Slow tests"),Pw=f(),gn=l("p"),B7=o(`The library of tests is ever-growing, and some of the tests take minutes to run, therefore we can\u2019t afford waiting for an hour for the test suite to complete on CI. Therefore, with some exceptions for essential tests, slow tests should be marked as in the example below:`),Ow=f(),d(_a.$$.fragment),Iw=f(),Ue=l("p"),Y7=o("Once a test is marked as "),Sd=l("code"),V7=o("@slow"),X7=o(", to run such tests set "),Ld=l("code"),J7=o("RUN_SLOW=1"),Z7=o(" env var, e.g.:"),Tw=f(),d(va.$$.fragment),Aw=f(),ae=l("p"),K7=o("Some decorators like "),Ud=l("code"),Q7=o("@parameterized"),e8=o(" rewrite test names, therefore "),Nd=l("code"),t8=o("@slow"),s8=o(` and the rest of the skip decorators `),Hd=l("code"),o8=o("@require_*"),r8=o(" have to be listed last for them to work correctly. Here is an example of the correct usage:"),Dw=f(),d(wa.$$.fragment),qw=f(),bn=l("p"),l8=o(`As explained at the beginning of this document, slow tests get to run on a scheduled basis, rather than in PRs CI checks. So it\u2019s possible that some problems will be missed during a PR submission and get merged. Such problems will get caught during the next scheduled CI job. But it also means that it\u2019s important to run the slow tests on your machine before submitting the PR.`),Sw=f(),kn=l("p"),a8=o("Here is a rough decision making mechanism for choosing which tests should be marked as slow:"),Lw=f(),Cn=l("p"),i8=o(`If the test is focused on one of the library\u2019s internal components (e.g., modeling files, tokenization files, pipelines), then we should run that test in the non-slow test suite. If it\u2019s focused on an other aspect of the library, such as the documentation or the examples, then we should run these tests in the slow test suite. And then, to refine this approach we should have exceptions:`),Uw=f(),ie=l("ul"),zd=l("li"),n8=o(`All tests that need to download a heavy set of weights or a dataset that is larger than ~50MB (e.g., model or tokenizer integration tests, pipeline integration tests) should be set to slow. If you\u2019re adding a new model, you should create and upload to the hub a tiny version of it (with random weights) for integration tests. This is discussed in the following paragraphs.`),p8=f(),Rd=l("li"),f8=o("All tests that need to do a training not specifically optimized to be fast should be set to slow."),h8=f(),Ot=l("li"),u8=o(`We can introduce exceptions if some of these should-be-non-slow tests are excruciatingly slow, and set them to `),Gd=l("code"),d8=o("@slow"),c8=o(`. Auto-modeling tests, which save and load large files to disk, are a good example of tests that are marked as `),Fd=l("code"),m8=o("@slow"),_8=o("."),v8=f(),Md=l("li"),w8=o("If a test completes under 1 second on CI (including downloads if any) then it should be a normal test regardless."),Nw=f(),Ne=l("p"),y8=o(`Collectively, all the non-slow tests need to cover entirely the different internals, while remaining fast. For example, a significant coverage can be achieved by testing with specially created tiny models with random weights. Such models have the very minimal number of layers (e.g., 2), vocab size (e.g., 1000), etc. Then the `),Wd=l("code"),$8=o("@slow"),E8=o(` tests can use large slow models to do qualitative testing. To see the use of these simply look for `),Bd=l("em"),g8=o("tiny"),b8=o(" models with:"),Hw=f(),d(ya.$$.fragment),zw=f(),He=l("p"),k8=o("Here is a an example of a "),$a=l("a"),C8=o("script"),j8=o(` that created the tiny model `),Ea=l("a"),x8=o("stas/tiny-wmt19-en-de"),P8=o(`. You can easily adjust it to your specific model\u2019s architecture.`),Rw=f(),ro=l("p"),O8=o(`It\u2019s easy to measure the run-time incorrectly if for example there is an overheard of downloading a huge model, but if you test it locally the downloaded files would be cached and thus the download time not measured. Hence check the execution speed report in CI logs instead (the output of `),Yd=l("code"),I8=o("pytest --durations=0 tests"),T8=o(")."),Gw=f(),jn=l("p"),A8=o(`That report is also useful to find slow outliers that aren\u2019t marked as such, or which need to be re-written to be fast. If you notice that the test suite starts getting slow on CI, the top listing of this report will show the slowest tests.`),Fw=f(),It=l("h3"),lo=l("a"),Vd=l("span"),d(ga.$$.fragment),D8=f(),Xd=l("span"),q8=o("Testing the stdout/stderr output"),Mw=f(),V=l("p"),S8=o("In order to test functions that write to "),Jd=l("code"),L8=o("stdout"),U8=o(" and/or "),Zd=l("code"),N8=o("stderr"),H8=o(`, the test can access those streams using the `),Kd=l("code"),z8=o("pytest"),R8=o("\u2019s "),ba=l("a"),G8=o("capsys system"),F8=o(". Here is how this is accomplished:"),Ww=f(),d(ka.$$.fragment),Bw=f(),ao=l("p"),M8=o("And, of course, most of the time, "),Qd=l("code"),W8=o("stderr"),B8=o(` will come as a part of an exception, so try/except has to be used in such a case:`),Yw=f(),d(Ca.$$.fragment),Vw=f(),io=l("p"),Y8=o("Another approach to capturing stdout is via "),ec=l("code"),V8=o("contextlib.redirect_stdout"),X8=o(":"),Xw=f(),d(ja.$$.fragment),Jw=f(),T=l("p"),J8=o("An important potential issue with capturing stdout is that it may contain "),tc=l("code"),Z8=o("\\r"),K8=o(" characters that in normal "),sc=l("code"),Q8=o("print"),e9=o(` reset everything that has been printed so far. There is no problem with `),oc=l("code"),t9=o("pytest"),s9=o(", but with "),rc=l("code"),o9=o("pytest -s"),r9=o(` these characters get included in the buffer, so to be able to have the test run with and without `),lc=l("code"),l9=o("-s"),a9=o(`, you have to make an extra cleanup to the captured output, using `),ac=l("code"),i9=o("re.sub(r'~.*\\r', '', buf, 0, re.M)"),n9=o("."),Zw=f(),no=l("p"),p9=o(`But, then we have a helper context manager wrapper to automatically take care of it all, regardless of whether it has some `),ic=l("code"),f9=o("\\r"),h9=o("\u2019s in it or not, so it\u2019s a simple:"),Kw=f(),d(xa.$$.fragment),Qw=f(),xn=l("p"),u9=o("Here is a full test example:"),ey=f(),d(Pa.$$.fragment),ty=f(),ze=l("p"),d9=o("If you\u2019d like to capture "),nc=l("code"),c9=o("stderr"),m9=o(" use the "),pc=l("code"),_9=o("CaptureStderr"),v9=o(" class instead:"),sy=f(),d(Oa.$$.fragment),oy=f(),po=l("p"),w9=o("If you need to capture both streams at once, use the parent "),fc=l("code"),y9=o("CaptureStd"),$9=o(" class:"),ry=f(),d(Ia.$$.fragment),ly=f(),Pn=l("p"),E9=o(`Also, to aid debugging test issues, by default these context managers automatically replay the captured streams on exit from the context.`),ay=f(),Tt=l("h3"),fo=l("a"),hc=l("span"),d(Ta.$$.fragment),g9=f(),uc=l("span"),b9=o("Capturing logger stream"),iy=f(),ho=l("p"),k9=o("If you need to validate the output of a logger, you can use "),dc=l("code"),C9=o("CaptureLogger"),j9=o(":"),ny=f(),d(Aa.$$.fragment),py=f(),At=l("h3"),uo=l("a"),cc=l("span"),d(Da.$$.fragment),x9=f(),mc=l("span"),P9=o("Testing with environment variables"),fy=f(),qa=l("p"),O9=o(`If you want to test the impact of environment variables for a specific test you can use a helper decorator `),_c=l("code"),I9=o("transformers.testing_utils.mockenv"),hy=f(),d(Sa.$$.fragment),uy=f(),ne=l("p"),T9=o("At times an external program needs to be called, which requires setting "),vc=l("code"),A9=o("PYTHONPATH"),D9=o(" in "),wc=l("code"),q9=o("os.environ"),S9=o(` to include multiple local paths. A helper class `),yc=l("code"),L9=o("transformers.test_utils.TestCasePlus"),U9=o(" comes to help:"),dy=f(),d(La.$$.fragment),cy=f(),N=l("p"),N9=o("Depending on whether the test file was under the "),$c=l("code"),H9=o("tests"),z9=o(" test suite or "),Ec=l("code"),R9=o("examples"),G9=o(` it\u2019ll correctly set up `),gc=l("code"),F9=o("env[PYTHONPATH]"),M9=o(" to include one of these two directories, and also the "),bc=l("code"),W9=o("src"),B9=o(` directory to ensure the testing is done against the current repo, and finally with whatever `),kc=l("code"),Y9=o("env[PYTHONPATH]"),V9=o(` was already set to before the test was called if anything.`),my=f(),co=l("p"),X9=o("This helper method creates a copy of the "),Cc=l("code"),J9=o("os.environ"),Z9=o(" object, so the original remains intact."),_y=f(),Dt=l("h3"),mo=l("a"),jc=l("span"),d(Ua.$$.fragment),K9=f(),xc=l("span"),Q9=o("Getting reproducible results"),vy=f(),On=l("p"),eC=o(`In some situations you may want to remove randomness for your tests. To get identical reproducable results set, you will need to fix the seed:`),wy=f(),d(Na.$$.fragment),yy=f(),qt=l("h3"),_o=l("a"),Pc=l("span"),d(Ha.$$.fragment),tC=f(),Oc=l("span"),sC=o("Debugging tests"),$y=f(),In=l("p"),oC=o("To start a debugger at the point of the warning, do this:"),Ey=f(),d(za.$$.fragment),gy=f(),St=l("h2"),vo=l("a"),Ic=l("span"),d(Ra.$$.fragment),rC=f(),Tc=l("span"),lC=o("Working with github actions workflows"),by=f(),Tn=l("p"),aC=o("To trigger a self-push workflow CI job, you must:"),ky=f(),pe=l("ol"),Ga=l("li"),iC=o("Create a new branch on "),Ac=l("code"),nC=o("transformers"),pC=o(" origin (not a fork!)."),fC=f(),H=l("li"),hC=o("The branch name has to start with either "),Dc=l("code"),uC=o("ci_"),dC=o(" or "),qc=l("code"),cC=o("ci-"),mC=o(" ("),Sc=l("code"),_C=o("master"),vC=o(` triggers it too, but we can\u2019t do PRs on `),Lc=l("code"),wC=o("master"),yC=o(`). It also gets triggered only for specific paths - you can find the up-to-date definition in case it changed since this document has been written `),Fa=l("a"),$C=o("here"),EC=o(" under "),Uc=l("em"),gC=o("push:"),bC=f(),Nc=l("li"),kC=o("Create a PR from this branch."),CC=f(),Ma=l("li"),jC=o("Then you can see the job appear "),Wa=l("a"),xC=o("here"),PC=o(`. It may not run right away if there is a backlog.`),Cy=f(),Lt=l("h2"),wo=l("a"),Hc=l("span"),d(Ba.$$.fragment),OC=f(),zc=l("span"),IC=o("Testing Experimental CI Features"),jy=f(),An=l("p"),TC=o(`Testing CI features can be potentially problematic as it can interfere with the normal CI functioning. Therefore if a new CI feature is to be added, it should be done as following.`),xy=f(),fe=l("ol"),Rc=l("li"),AC=o("Create a new dedicated job that tests what needs to be tested"),DC=f(),Gc=l("li"),qC=o("The new job must always succeed so that it gives us a green \u2713 (details below)."),SC=f(),Fc=l("li"),LC=o(`Let it run for some days to see that a variety of different PR types get to run on it (user fork branches, non-forked branches, branches originating from github.com UI direct file edit, various forced pushes, etc. - there are so many) while monitoring the experimental job\u2019s logs (not the overall job green as it\u2019s purposefully always green)`),UC=f(),Mc=l("li"),NC=o("When it\u2019s clear that everything is solid, then merge the new changes into existing jobs."),Py=f(),Dn=l("p"),HC=o("That way experiments on CI functionality itself won\u2019t interfere with the normal workflow."),Oy=f(),qn=l("p"),zC=o("Now how can we make the job always succeed while the new CI feature is being developed?"),Iy=f(),Sn=l("p"),RC=o(`Some CIs, like TravisCI support ignore-step-failure and will report the overall job as successful, but CircleCI and Github Actions as of this writing don\u2019t support that.`),Ty=f(),Ln=l("p"),GC=o("So the following workaround can be used:"),Ay=f(),yo=l("ol"),Un=l("li"),Wc=l("code"),FC=o("set +euo pipefail"),MC=o(" at the beginning of the run command to suppress most potential failures in the bash script."),WC=f(),Ut=l("li"),BC=o("the last command must be a success: "),Bc=l("code"),YC=o('echo "done"'),VC=o(" or just "),Yc=l("code"),XC=o("true"),JC=o(" will do"),Dy=f(),Nn=l("p"),ZC=o("Here is an example:"),qy=f(),d(Ya.$$.fragment),Sy=f(),Hn=l("p"),KC=o("For simple commands you could also do:"),Ly=f(),d(Va.$$.fragment),Uy=f(),$o=l("p"),QC=o(`Of course, once satisfied with the results, integrate the experimental step or job with the rest of the normal jobs, while removing `),Vc=l("code"),ej=o("set +euo pipefail"),tj=o(` or any other things you may have added to ensure that the experimental job doesn\u2019t interfere with the normal CI functioning.`),Ny=f(),Eo=l("p"),sj=o("This whole process would have been much easier if we only could set something like "),Xc=l("code"),oj=o("allow-failure"),rj=o(` for the experimental step, and let it fail without impacting the overall status of PRs. But as mentioned earlier CircleCI and Github Actions don\u2019t support it at the moment.`),Hy=f(),zn=l("p"),lj=o("You can vote for this feature and see where it is at at these CI-specific threads:"),zy=f(),go=l("ul"),Jc=l("li"),Xa=l("a"),aj=o("Github Actions:"),ij=f(),Zc=l("li"),Ja=l("a"),nj=o("CircleCI:"),this.h()},l(e){const n=lS('[data-svelte="svelte-1phssyn"]',document.head);$=a(n,"META",{name:!0,content:!0}),n.forEach(s),x=h(e),g=a(e,"H1",{class:!0});var Za=i(g);b=a(Za,"A",{id:!0,class:!0,href:!0});var Kc=i(b);A=a(Kc,"SPAN",{});var Qc=i(A);c(j.$$.fragment,Qc),Qc.forEach(s),Kc.forEach(s),z=h(Za),D=a(Za,"SPAN",{});var em=i(D);O=r(em,"Testing"),em.forEach(s),Za.forEach(s),k=h(e),S=a(e,"P",{});var Tj=i(S);R=r(Tj,"Let\u2019s take a look at how \u{1F917} Transformers models are tested and how you can write new tests and improve the existing ones."),Tj.forEach(s),Nt=h(e),ye=a(e,"P",{});var Aj=i(ye);Be=r(Aj,"There are 2 test suites in the repository:"),Aj.forEach(s),q=h(e),_e=a(e,"OL",{});var Gy=i(_e);Ye=a(Gy,"LI",{});var pj=i(Ye);Ht=a(pj,"CODE",{});var Dj=i(Ht);ti=r(Dj,"tests"),Dj.forEach(s),OE=r(pj," \u2014 tests for the general API"),pj.forEach(s),IE=h(Gy),si=a(Gy,"LI",{});var fj=i(si);fp=a(fj,"CODE",{});var qj=i(fp);TE=r(qj,"examples"),qj.forEach(s),AE=r(fj," \u2014 tests primarily for various applications that aren\u2019t part of the API"),fj.forEach(s),Gy.forEach(s),fm=h(e),Ve=a(e,"H2",{class:!0});var Fy=i(Ve);zt=a(Fy,"A",{id:!0,class:!0,href:!0});var Sj=i(zt);hp=a(Sj,"SPAN",{});var Lj=i(hp);c(Lo.$$.fragment,Lj),Lj.forEach(s),Sj.forEach(s),DE=h(Fy),up=a(Fy,"SPAN",{});var Uj=i(up);qE=r(Uj,"How transformers are tested"),Uj.forEach(s),Fy.forEach(s),hm=h(e),Rt=a(e,"OL",{});var My=i(Rt);Uo=a(My,"LI",{});var Wy=i(Uo);No=a(Wy,"P",{});var By=i(No);SE=r(By,`Once a PR is submitted it gets tested with 9 CircleCi jobs. Every new commit to that PR gets retested. These jobs are defined in this `),Ho=a(By,"A",{href:!0,rel:!0});var Nj=i(Ho);LE=r(Nj,"config file"),Nj.forEach(s),UE=r(By,`, so that if needed you can reproduce the same environment on your machine.`),By.forEach(s),NE=h(Wy),zo=a(Wy,"P",{});var Yy=i(zo);HE=r(Yy,"These CI jobs don\u2019t run "),dp=a(Yy,"CODE",{});var Hj=i(dp);zE=r(Hj,"@slow"),Hj.forEach(s),RE=r(Yy," tests."),Yy.forEach(s),Wy.forEach(s),GE=h(My),Ro=a(My,"LI",{});var Vy=i(Ro);Go=a(Vy,"P",{});var Xy=i(Go);FE=r(Xy,"There are 3 jobs run by "),Fo=a(Xy,"A",{href:!0,rel:!0});var zj=i(Fo);ME=r(zj,"github actions"),zj.forEach(s),WE=r(Xy,":"),Xy.forEach(s),BE=h(Vy),Xe=a(Vy,"UL",{});var Rn=i(Xe);cp=a(Rn,"LI",{});var Rj=i(cp);oi=a(Rj,"P",{});var hj=i(oi);Mo=a(hj,"A",{href:!0,rel:!0});var Gj=i(Mo);YE=r(Gj,"torch hub integration"),Gj.forEach(s),VE=r(hj,`: checks whether torch hub integration works.`),hj.forEach(s),Rj.forEach(s),XE=h(Rn),mp=a(Rn,"LI",{});var Fj=i(mp);L=a(Fj,"P",{});var Z=i(L);Wo=a(Z,"A",{href:!0,rel:!0});var Mj=i(Wo);JE=r(Mj,"self-hosted (push)"),Mj.forEach(s),ZE=r(Z,`: runs fast tests on GPU only on commits on `),_p=a(Z,"CODE",{});var Wj=i(_p);KE=r(Wj,"master"),Wj.forEach(s),QE=r(Z,". It only runs if a commit on "),vp=a(Z,"CODE",{});var Bj=i(vp);eg=r(Bj,"master"),Bj.forEach(s),tg=r(Z," has updated the code in one of the following folders: "),wp=a(Z,"CODE",{});var Yj=i(wp);sg=r(Yj,"src"),Yj.forEach(s),og=r(Z,`, `),yp=a(Z,"CODE",{});var Vj=i(yp);rg=r(Vj,"tests"),Vj.forEach(s),lg=r(Z,", "),$p=a(Z,"CODE",{});var Xj=i($p);ag=r(Xj,".github"),Xj.forEach(s),ig=r(Z," (to prevent running on added model cards, notebooks, etc.)"),Z.forEach(s),Fj.forEach(s),ng=h(Rn),Ep=a(Rn,"LI",{});var Jj=i(Ep);$e=a(Jj,"P",{});var Ka=i($e);Bo=a(Ka,"A",{href:!0,rel:!0});var Zj=i(Bo);pg=r(Zj,"self-hosted runner"),Zj.forEach(s),fg=r(Ka,`: runs normal and slow tests on GPU in `),gp=a(Ka,"CODE",{});var Kj=i(gp);hg=r(Kj,"tests"),Kj.forEach(s),ug=r(Ka," and "),bp=a(Ka,"CODE",{});var Qj=i(bp);dg=r(Qj,"examples"),Qj.forEach(s),cg=r(Ka,":"),Ka.forEach(s),Jj.forEach(s),Rn.forEach(s),Vy.forEach(s),My.forEach(s),um=h(e),c(Yo.$$.fragment,e),dm=h(e),Gt=a(e,"P",{});var Jy=i(Gt);mg=r(Jy,"The results can be observed "),Vo=a(Jy,"A",{href:!0,rel:!0});var ex=i(Vo);_g=r(ex,"here"),ex.forEach(s),vg=r(Jy,"."),Jy.forEach(s),cm=h(e),Je=a(e,"H2",{class:!0});var Zy=i(Je);Ft=a(Zy,"A",{id:!0,class:!0,href:!0});var tx=i(Ft);kp=a(tx,"SPAN",{});var sx=i(kp);c(Xo.$$.fragment,sx),sx.forEach(s),tx.forEach(s),wg=h(Zy),Cp=a(Zy,"SPAN",{});var ox=i(Cp);yg=r(ox,"Running tests"),ox.forEach(s),Zy.forEach(s),mm=h(e),Ze=a(e,"H3",{class:!0});var Ky=i(Ze);Mt=a(Ky,"A",{id:!0,class:!0,href:!0});var rx=i(Mt);jp=a(rx,"SPAN",{});var lx=i(jp);c(Jo.$$.fragment,lx),lx.forEach(s),rx.forEach(s),$g=h(Ky),xp=a(Ky,"SPAN",{});var ax=i(xp);Eg=r(ax,"Choosing which tests to run"),ax.forEach(s),Ky.forEach(s),_m=h(e),Wt=a(e,"P",{});var Qy=i(Wt);gg=r(Qy,`This document goes into many details of how tests can be run. If after reading everything, you need even more details you will find them `),Zo=a(Qy,"A",{href:!0,rel:!0});var ix=i(Zo);bg=r(ix,"here"),ix.forEach(s),kg=r(Qy,"."),Qy.forEach(s),vm=h(e),ri=a(e,"P",{});var nx=i(ri);Cg=r(nx,"Here are some most useful ways of running tests."),nx.forEach(s),wm=h(e),li=a(e,"P",{});var px=i(li);jg=r(px,"Run all:"),px.forEach(s),ym=h(e),c(Ko.$$.fragment,e),$m=h(e),ai=a(e,"P",{});var fx=i(ai);xg=r(fx,"or:"),fx.forEach(s),Em=h(e),c(Qo.$$.fragment,e),gm=h(e),ii=a(e,"P",{});var hx=i(ii);Pg=r(hx,"Note that the latter is defined as:"),hx.forEach(s),bm=h(e),c(er.$$.fragment,e),km=h(e),ni=a(e,"P",{});var ux=i(ni);Og=r(ux,"which tells pytest to:"),ux.forEach(s),Cm=h(e),Q=a(e,"UL",{});var bo=i(Q);Pp=a(bo,"LI",{});var dx=i(Pp);Ig=r(dx,"run as many test processes as they are CPU cores (which could be too many if you don\u2019t have a ton of RAM!)"),dx.forEach(s),Tg=h(bo),Op=a(bo,"LI",{});var cx=i(Op);Ag=r(cx,"ensure that all tests from the same file will be run by the same test process"),cx.forEach(s),Dg=h(bo),Ip=a(bo,"LI",{});var mx=i(Ip);qg=r(mx,"do not capture output"),mx.forEach(s),Sg=h(bo),Tp=a(bo,"LI",{});var _x=i(Tp);Lg=r(_x,"run in verbose mode"),_x.forEach(s),bo.forEach(s),jm=h(e),Ke=a(e,"H3",{class:!0});var e$=i(Ke);Bt=a(e$,"A",{id:!0,class:!0,href:!0});var vx=i(Bt);Ap=a(vx,"SPAN",{});var wx=i(Ap);c(tr.$$.fragment,wx),wx.forEach(s),vx.forEach(s),Ug=h(e$),Dp=a(e$,"SPAN",{});var yx=i(Dp);Ng=r(yx,"Getting the list of all tests"),yx.forEach(s),e$.forEach(s),xm=h(e),pi=a(e,"P",{});var $x=i(pi);Hg=r($x,"All tests of the test suite:"),$x.forEach(s),Pm=h(e),c(sr.$$.fragment,e),Om=h(e),fi=a(e,"P",{});var Ex=i(fi);zg=r(Ex,"All tests of a given test file:"),Ex.forEach(s),Im=h(e),c(or.$$.fragment,e),Tm=h(e),Qe=a(e,"H3",{class:!0});var t$=i(Qe);Yt=a(t$,"A",{id:!0,class:!0,href:!0});var gx=i(Yt);qp=a(gx,"SPAN",{});var bx=i(qp);c(rr.$$.fragment,bx),bx.forEach(s),gx.forEach(s),Rg=h(t$),Sp=a(t$,"SPAN",{});var kx=i(Sp);Gg=r(kx,"Run a specific test module"),kx.forEach(s),t$.forEach(s),Am=h(e),hi=a(e,"P",{});var Cx=i(hi);Fg=r(Cx,"To run an individual test module:"),Cx.forEach(s),Dm=h(e),c(lr.$$.fragment,e),qm=h(e),et=a(e,"H3",{class:!0});var s$=i(et);Vt=a(s$,"A",{id:!0,class:!0,href:!0});var jx=i(Vt);Lp=a(jx,"SPAN",{});var xx=i(Lp);c(ar.$$.fragment,xx),xx.forEach(s),jx.forEach(s),Mg=h(s$),Up=a(s$,"SPAN",{});var Px=i(Up);Wg=r(Px,"Run specific tests"),Px.forEach(s),s$.forEach(s),Sm=h(e),ui=a(e,"P",{});var Ox=i(ui);Bg=r(Ox,`Since unittest is used inside most of the tests, to run specific subtests you need to know the name of the unittest class containing those tests. For example, it could be:`),Ox.forEach(s),Lm=h(e),c(ir.$$.fragment,e),Um=h(e),di=a(e,"P",{});var Ix=i(di);Yg=r(Ix,"Here:"),Ix.forEach(s),Nm=h(e),Ee=a(e,"UL",{});var Gn=i(Ee);ci=a(Gn,"LI",{});var uj=i(ci);Np=a(uj,"CODE",{});var Tx=i(Np);Vg=r(Tx,"tests/test_optimization.py"),Tx.forEach(s),Xg=r(uj," - the file with tests"),uj.forEach(s),Jg=h(Gn),mi=a(Gn,"LI",{});var dj=i(mi);Hp=a(dj,"CODE",{});var Ax=i(Hp);Zg=r(Ax,"OptimizationTest"),Ax.forEach(s),Kg=r(dj," - the name of the class"),dj.forEach(s),Qg=h(Gn),_i=a(Gn,"LI",{});var cj=i(_i);zp=a(cj,"CODE",{});var Dx=i(zp);eb=r(Dx,"test_adam_w"),Dx.forEach(s),tb=r(cj," - the name of the specific test function"),cj.forEach(s),Gn.forEach(s),Hm=h(e),vi=a(e,"P",{});var qx=i(vi);sb=r(qx,"If the file contains multiple classes, you can choose to run only tests of a given class. For example:"),qx.forEach(s),zm=h(e),c(nr.$$.fragment,e),Rm=h(e),wi=a(e,"P",{});var Sx=i(wi);ob=r(Sx,"will run all the tests inside that class."),Sx.forEach(s),Gm=h(e),Xt=a(e,"P",{});var o$=i(Xt);rb=r(o$,"As mentioned earlier you can see what tests are contained inside the "),Rp=a(o$,"CODE",{});var Lx=i(Rp);lb=r(Lx,"OptimizationTest"),Lx.forEach(s),ab=r(o$," class by running:"),o$.forEach(s),Fm=h(e),c(pr.$$.fragment,e),Mm=h(e),yi=a(e,"P",{});var Ux=i(yi);ib=r(Ux,"You can run tests by keyword expressions."),Ux.forEach(s),Wm=h(e),Jt=a(e,"P",{});var r$=i(Jt);nb=r(r$,"To run only tests whose name contains "),Gp=a(r$,"CODE",{});var Nx=i(Gp);pb=r(Nx,"adam"),Nx.forEach(s),fb=r(r$,":"),r$.forEach(s),Bm=h(e),c(fr.$$.fragment,e),Ym=h(e),ee=a(e,"P",{});var ko=i(ee);hb=r(ko,"Logical "),Fp=a(ko,"CODE",{});var Hx=i(Fp);ub=r(Hx,"and"),Hx.forEach(s),db=r(ko," and "),Mp=a(ko,"CODE",{});var zx=i(Mp);cb=r(zx,"or"),zx.forEach(s),mb=r(ko," can be used to indicate whether all keywords should match or either. "),Wp=a(ko,"CODE",{});var Rx=i(Wp);_b=r(Rx,"not"),Rx.forEach(s),vb=r(ko,` can be used to negate.`),ko.forEach(s),Vm=h(e),Zt=a(e,"P",{});var l$=i(Zt);wb=r(l$,"To run all tests except those whose name contains "),Bp=a(l$,"CODE",{});var Gx=i(Bp);yb=r(Gx,"adam"),Gx.forEach(s),$b=r(l$,":"),l$.forEach(s),Xm=h(e),c(hr.$$.fragment,e),Jm=h(e),$i=a(e,"P",{});var Fx=i($i);Eb=r(Fx,"And you can combine the two patterns in one:"),Fx.forEach(s),Zm=h(e),c(ur.$$.fragment,e),Km=h(e),ge=a(e,"P",{});var Fn=i(ge);gb=r(Fn,"For example to run both "),Yp=a(Fn,"CODE",{});var Mx=i(Yp);bb=r(Mx,"test_adafactor"),Mx.forEach(s),kb=r(Fn," and "),Vp=a(Fn,"CODE",{});var Wx=i(Vp);Cb=r(Wx,"test_adam_w"),Wx.forEach(s),jb=r(Fn," you can use:"),Fn.forEach(s),Qm=h(e),c(dr.$$.fragment,e),e_=h(e),Kt=a(e,"P",{});var a$=i(Kt);xb=r(a$,"Note that we use "),Xp=a(a$,"CODE",{});var Bx=i(Xp);Pb=r(Bx,"or"),Bx.forEach(s),Ob=r(a$," here, since we want either of the keywords to match to include both."),a$.forEach(s),t_=h(e),Qt=a(e,"P",{});var i$=i(Qt);Ib=r(i$,"If you want to include only tests that include both patterns, "),Jp=a(i$,"CODE",{});var Yx=i(Jp);Tb=r(Yx,"and"),Yx.forEach(s),Ab=r(i$," is to be used:"),i$.forEach(s),s_=h(e),c(cr.$$.fragment,e),o_=h(e),tt=a(e,"H3",{class:!0});var n$=i(tt);es=a(n$,"A",{id:!0,class:!0,href:!0});var Vx=i(es);Zp=a(Vx,"SPAN",{});var Xx=i(Zp);c(mr.$$.fragment,Xx),Xx.forEach(s),Vx.forEach(s),Db=h(n$),Kp=a(n$,"SPAN",{});var Jx=i(Kp);qb=r(Jx,"Run only modified tests"),Jx.forEach(s),n$.forEach(s),r_=h(e),ts=a(e,"P",{});var p$=i(ts);Sb=r(p$,"You can run the tests related to the unstaged files or the current branch (according to Git) by using "),_r=a(p$,"A",{href:!0,rel:!0});var Zx=i(_r);Lb=r(Zx,"pytest-picked"),Zx.forEach(s),Ub=r(p$,`. This is a great way of quickly testing your changes didn\u2019t break anything, since it won\u2019t run the tests related to files you didn\u2019t touch.`),p$.forEach(s),l_=h(e),c(vr.$$.fragment,e),a_=h(e),c(wr.$$.fragment,e),i_=h(e),Ei=a(e,"P",{});var Kx=i(Ei);Nb=r(Kx,"All tests will be run from files and folders which are modified, but not yet committed."),Kx.forEach(s),n_=h(e),st=a(e,"H3",{class:!0});var f$=i(st);ss=a(f$,"A",{id:!0,class:!0,href:!0});var Qx=i(ss);Qp=a(Qx,"SPAN",{});var eP=i(Qp);c(yr.$$.fragment,eP),eP.forEach(s),Qx.forEach(s),Hb=h(f$),ef=a(f$,"SPAN",{});var tP=i(ef);zb=r(tP,"Automatically rerun failed tests on source modification"),tP.forEach(s),f$.forEach(s),p_=h(e),$r=a(e,"P",{});var mj=i($r);Er=a(mj,"A",{href:!0,rel:!0});var sP=i(Er);Rb=r(sP,"pytest-xdist"),sP.forEach(s),Gb=r(mj,` provides a very useful feature of detecting all failed tests, and then waiting for you to modify files and continuously re-rerun those failing tests until they pass while you fix them. So that you don\u2019t need to re start pytest after you made the fix. This is repeated until all tests pass after which again a full run is performed.`),mj.forEach(s),f_=h(e),c(gr.$$.fragment,e),h_=h(e),ot=a(e,"P",{});var tm=i(ot);Fb=r(tm,"To enter the mode: "),tf=a(tm,"CODE",{});var oP=i(tf);Mb=r(oP,"pytest -f"),oP.forEach(s),Wb=r(tm," or "),sf=a(tm,"CODE",{});var rP=i(sf);Bb=r(rP,"pytest --looponfail"),rP.forEach(s),tm.forEach(s),u_=h(e),be=a(e,"P",{});var Mn=i(be);Yb=r(Mn,"File changes are detected by looking at "),of=a(Mn,"CODE",{});var lP=i(of);Vb=r(lP,"looponfailroots"),lP.forEach(s),Xb=r(Mn,` root directories and all of their contents (recursively). If the default for this value does not work for you, you can change it in your project by setting a configuration option in `),rf=a(Mn,"CODE",{});var aP=i(rf);Jb=r(aP,"setup.cfg"),aP.forEach(s),Zb=r(Mn,":"),Mn.forEach(s),d_=h(e),c(br.$$.fragment,e),c_=h(e),ke=a(e,"P",{});var Wn=i(ke);Kb=r(Wn,"or "),lf=a(Wn,"CODE",{});var iP=i(lf);Qb=r(iP,"pytest.ini"),iP.forEach(s),e3=r(Wn,"/"),af=a(Wn,"CODE",{});var nP=i(af);t3=r(nP,"tox.ini"),nP.forEach(s),s3=r(Wn," files:"),Wn.forEach(s),m_=h(e),c(kr.$$.fragment,e),__=h(e),gi=a(e,"P",{});var pP=i(gi);o3=r(pP,`This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file\u2019s directory.`),pP.forEach(s),v_=h(e),Cr=a(e,"P",{});var _j=i(Cr);jr=a(_j,"A",{href:!0,rel:!0});var fP=i(jr);r3=r(fP,"pytest-watch"),fP.forEach(s),l3=r(_j," is an alternative implementation of this functionality."),_j.forEach(s),w_=h(e),rt=a(e,"H3",{class:!0});var h$=i(rt);os=a(h$,"A",{id:!0,class:!0,href:!0});var hP=i(os);nf=a(hP,"SPAN",{});var uP=i(nf);c(xr.$$.fragment,uP),uP.forEach(s),hP.forEach(s),a3=h(h$),pf=a(h$,"SPAN",{});var dP=i(pf);i3=r(dP,"Skip a test module"),dP.forEach(s),h$.forEach(s),y_=h(e),rs=a(e,"P",{});var u$=i(rs);n3=r(u$,`If you want to run all test modules, except a few you can exclude them by giving an explicit list of tests to run. For example, to run all except `),ff=a(u$,"CODE",{});var cP=i(ff);p3=r(cP,"test_modeling_*.py"),cP.forEach(s),f3=r(u$," tests:"),u$.forEach(s),$_=h(e),c(Pr.$$.fragment,e),E_=h(e),lt=a(e,"H3",{class:!0});var d$=i(lt);ls=a(d$,"A",{id:!0,class:!0,href:!0});var mP=i(ls);hf=a(mP,"SPAN",{});var _P=i(hf);c(Or.$$.fragment,_P),_P.forEach(s),mP.forEach(s),h3=h(d$),uf=a(d$,"SPAN",{});var vP=i(uf);u3=r(vP,"Clearing state"),vP.forEach(s),d$.forEach(s),g_=h(e),bi=a(e,"P",{});var wP=i(bi);d3=r(wP,"CI builds and when isolation is important (against speed), cache should be cleared:"),wP.forEach(s),b_=h(e),c(Ir.$$.fragment,e),k_=h(e),at=a(e,"H3",{class:!0});var c$=i(at);as=a(c$,"A",{id:!0,class:!0,href:!0});var yP=i(as);df=a(yP,"SPAN",{});var $P=i(df);c(Tr.$$.fragment,$P),$P.forEach(s),yP.forEach(s),c3=h(c$),cf=a(c$,"SPAN",{});var EP=i(cf);m3=r(EP,"Running tests in parallel"),EP.forEach(s),c$.forEach(s),C_=h(e),M=a(e,"P",{});var Re=i(M);_3=r(Re,"As mentioned earlier "),mf=a(Re,"CODE",{});var gP=i(mf);v3=r(gP,"make test"),gP.forEach(s),w3=r(Re," runs tests in parallel via "),_f=a(Re,"CODE",{});var bP=i(_f);y3=r(bP,"pytest-xdist"),bP.forEach(s),$3=r(Re," plugin ("),vf=a(Re,"CODE",{});var kP=i(vf);E3=r(kP,"-n X"),kP.forEach(s),g3=r(Re," argument, e.g. "),wf=a(Re,"CODE",{});var CP=i(wf);b3=r(CP,"-n 2"),CP.forEach(s),k3=r(Re,` to run 2 parallel jobs).`),Re.forEach(s),j_=h(e),ve=a(e,"P",{});var Qa=i(ve);yf=a(Qa,"CODE",{});var jP=i(yf);C3=r(jP,"pytest-xdist"),jP.forEach(s),j3=r(Qa,"\u2019s "),$f=a(Qa,"CODE",{});var xP=i($f);x3=r(xP,"--dist="),xP.forEach(s),P3=r(Qa," option allows one to control how the tests are grouped. "),Ef=a(Qa,"CODE",{});var PP=i(Ef);O3=r(PP,"--dist=loadfile"),PP.forEach(s),I3=r(Qa,` puts the tests located in one file onto the same process.`),Qa.forEach(s),x_=h(e),Ce=a(e,"P",{});var Bn=i(Ce);T3=r(Bn,"Since the order of executed tests is different and unpredictable, if running the test suite with "),gf=a(Bn,"CODE",{});var OP=i(gf);A3=r(OP,"pytest-xdist"),OP.forEach(s),D3=r(Bn,` produces failures (meaning we have some undetected coupled tests), use `),Ar=a(Bn,"A",{href:!0,rel:!0});var IP=i(Ar);q3=r(IP,"pytest-replay"),IP.forEach(s),S3=r(Bn,` to replay the tests in the same order, which should help with then somehow reducing that failing sequence to a minimum.`),Bn.forEach(s),P_=h(e),it=a(e,"H3",{class:!0});var m$=i(it);is=a(m$,"A",{id:!0,class:!0,href:!0});var TP=i(is);bf=a(TP,"SPAN",{});var AP=i(bf);c(Dr.$$.fragment,AP),AP.forEach(s),TP.forEach(s),L3=h(m$),kf=a(m$,"SPAN",{});var DP=i(kf);U3=r(DP,"Test order and repetition"),DP.forEach(s),m$.forEach(s),O_=h(e),ki=a(e,"P",{});var qP=i(ki);N3=r(qP,`It\u2019s good to repeat the tests several times, in sequence, randomly, or in sets, to detect any potential inter-dependency and state-related bugs (tear down). And the straightforward multiple repetition is just good to detect some problems that get uncovered by randomness of DL.`),qP.forEach(s),I_=h(e),nt=a(e,"H4",{class:!0});var _$=i(nt);ns=a(_$,"A",{id:!0,class:!0,href:!0});var SP=i(ns);Cf=a(SP,"SPAN",{});var LP=i(Cf);c(qr.$$.fragment,LP),LP.forEach(s),SP.forEach(s),H3=h(_$),jf=a(_$,"SPAN",{});var UP=i(jf);z3=r(UP,"Repeat tests"),UP.forEach(s),_$.forEach(s),T_=h(e),Ci=a(e,"UL",{});var NP=i(Ci);ji=a(NP,"LI",{});var vj=i(ji);Sr=a(vj,"A",{href:!0,rel:!0});var HP=i(Sr);R3=r(HP,"pytest-flakefinder"),HP.forEach(s),G3=r(vj,":"),vj.forEach(s),NP.forEach(s),A_=h(e),c(Lr.$$.fragment,e),D_=h(e),xi=a(e,"P",{});var zP=i(xi);F3=r(zP,"And then run every test multiple times (50 by default):"),zP.forEach(s),q_=h(e),c(Ur.$$.fragment,e),S_=h(e),c(ps.$$.fragment,e),L_=h(e),c(fs.$$.fragment,e),U_=h(e),pt=a(e,"H4",{class:!0});var v$=i(pt);hs=a(v$,"A",{id:!0,class:!0,href:!0});var RP=i(hs);xf=a(RP,"SPAN",{});var GP=i(xf);c(Nr.$$.fragment,GP),GP.forEach(s),RP.forEach(s),M3=h(v$),Pf=a(v$,"SPAN",{});var FP=i(Pf);W3=r(FP,"Run tests in a random order"),FP.forEach(s),v$.forEach(s),N_=h(e),c(Hr.$$.fragment,e),H_=h(e),us=a(e,"P",{});var w$=i(us);B3=r(w$,"Important: the presence of "),Of=a(w$,"CODE",{});var MP=i(Of);Y3=r(MP,"pytest-random-order"),MP.forEach(s),V3=r(w$,` will automatically randomize tests, no configuration change or command line options is required.`),w$.forEach(s),z_=h(e),ds=a(e,"P",{});var y$=i(ds);X3=r(y$,`As explained earlier this allows detection of coupled tests - where one test\u2019s state affects the state of another. When `),If=a(y$,"CODE",{});var WP=i(If);J3=r(WP,"pytest-random-order"),WP.forEach(s),Z3=r(y$," is installed it will print the random seed it used for that session, e.g:"),y$.forEach(s),R_=h(e),c(zr.$$.fragment,e),G_=h(e),Pi=a(e,"P",{});var BP=i(Pi);K3=r(BP,"So that if the given particular sequence fails, you can reproduce it by adding that exact seed, e.g.:"),BP.forEach(s),F_=h(e),c(Rr.$$.fragment,e),M_=h(e),cs=a(e,"P",{});var $$=i(cs);Q3=r($$,`It will only reproduce the exact order if you use the exact same list of tests (or no list at all). Once you start to manually narrowing down the list you can no longer rely on the seed, but have to list them manually in the exact order they failed and tell pytest to not randomize them instead using `),Tf=a($$,"CODE",{});var YP=i(Tf);e2=r(YP,"--random-order-bucket=none"),YP.forEach(s),t2=r($$,", e.g.:"),$$.forEach(s),W_=h(e),c(Gr.$$.fragment,e),B_=h(e),Oi=a(e,"P",{});var VP=i(Oi);s2=r(VP,"To disable the shuffling for all tests:"),VP.forEach(s),Y_=h(e),c(Fr.$$.fragment,e),V_=h(e),I=a(e,"P",{});var X=i(I);o2=r(X,"By default "),Af=a(X,"CODE",{});var XP=i(Af);r2=r(XP,"--random-order-bucket=module"),XP.forEach(s),l2=r(X,` is implied, which will shuffle the files on the module levels. It can also shuffle on `),Df=a(X,"CODE",{});var JP=i(Df);a2=r(JP,"class"),JP.forEach(s),i2=r(X,", "),qf=a(X,"CODE",{});var ZP=i(qf);n2=r(ZP,"package"),ZP.forEach(s),p2=r(X,", "),Sf=a(X,"CODE",{});var KP=i(Sf);f2=r(KP,"global"),KP.forEach(s),h2=r(X," and "),Lf=a(X,"CODE",{});var QP=i(Lf);u2=r(QP,"none"),QP.forEach(s),d2=r(X,` levels. For the complete details please see its `),Mr=a(X,"A",{href:!0,rel:!0});var eO=i(Mr);c2=r(eO,"documentation"),eO.forEach(s),m2=r(X,"."),X.forEach(s),X_=h(e),je=a(e,"P",{});var Yn=i(je);_2=r(Yn,"Another randomization alternative is: "),Wr=a(Yn,"A",{href:!0,rel:!0});var tO=i(Wr);Uf=a(tO,"CODE",{});var sO=i(Uf);v2=r(sO,"pytest-randomly"),sO.forEach(s),tO.forEach(s),w2=r(Yn,`. This module has a very similar functionality/interface, but it doesn\u2019t have the bucket modes available in `),Nf=a(Yn,"CODE",{});var oO=i(Nf);y2=r(oO,"pytest-random-order"),oO.forEach(s),$2=r(Yn,". It has the same problem of imposing itself once installed."),Yn.forEach(s),J_=h(e),ft=a(e,"H3",{class:!0});var E$=i(ft);ms=a(E$,"A",{id:!0,class:!0,href:!0});var rO=i(ms);Hf=a(rO,"SPAN",{});var lO=i(Hf);c(Br.$$.fragment,lO),lO.forEach(s),rO.forEach(s),E2=h(E$),zf=a(E$,"SPAN",{});var aO=i(zf);g2=r(aO,"Look and feel variations"),aO.forEach(s),E$.forEach(s),Z_=h(e),ht=a(e,"H4",{class:!0});var g$=i(ht);_s=a(g$,"A",{id:!0,class:!0,href:!0});var iO=i(_s);Rf=a(iO,"SPAN",{});var nO=i(Rf);c(Yr.$$.fragment,nO),nO.forEach(s),iO.forEach(s),b2=h(g$),Gf=a(g$,"SPAN",{});var pO=i(Gf);k2=r(pO,"pytest-sugar"),pO.forEach(s),g$.forEach(s),K_=h(e),Vr=a(e,"P",{});var wj=i(Vr);Xr=a(wj,"A",{href:!0,rel:!0});var fO=i(Xr);C2=r(fO,"pytest-sugar"),fO.forEach(s),j2=r(wj,` is a plugin that improves the look-n-feel, adds a progressbar, and show tests that fail and the assert instantly. It gets activated automatically upon installation.`),wj.forEach(s),Q_=h(e),c(Jr.$$.fragment,e),ev=h(e),Ii=a(e,"P",{});var hO=i(Ii);x2=r(hO,"To run tests without it, run:"),hO.forEach(s),tv=h(e),c(Zr.$$.fragment,e),sv=h(e),Ti=a(e,"P",{});var uO=i(Ti);P2=r(uO,"or uninstall it."),uO.forEach(s),ov=h(e),ut=a(e,"H4",{class:!0});var b$=i(ut);vs=a(b$,"A",{id:!0,class:!0,href:!0});var dO=i(vs);Ff=a(dO,"SPAN",{});var cO=i(Ff);c(Kr.$$.fragment,cO),cO.forEach(s),dO.forEach(s),O2=h(b$),Mf=a(b$,"SPAN",{});var mO=i(Mf);I2=r(mO,"Report each sub-test name and its progress"),mO.forEach(s),b$.forEach(s),rv=h(e),xe=a(e,"P",{});var Vn=i(xe);T2=r(Vn,"For a single or a group of tests via "),Wf=a(Vn,"CODE",{});var _O=i(Wf);A2=r(_O,"pytest"),_O.forEach(s),D2=r(Vn," (after "),Bf=a(Vn,"CODE",{});var vO=i(Bf);q2=r(vO,"pip install pytest-pspec"),vO.forEach(s),S2=r(Vn,"):"),Vn.forEach(s),lv=h(e),c(Qr.$$.fragment,e),av=h(e),dt=a(e,"H4",{class:!0});var k$=i(dt);ws=a(k$,"A",{id:!0,class:!0,href:!0});var wO=i(ws);Yf=a(wO,"SPAN",{});var yO=i(Yf);c(el.$$.fragment,yO),yO.forEach(s),wO.forEach(s),L2=h(k$),Vf=a(k$,"SPAN",{});var $O=i(Vf);U2=r($O,"Instantly shows failed tests"),$O.forEach(s),k$.forEach(s),iv=h(e),tl=a(e,"P",{});var yj=i(tl);sl=a(yj,"A",{href:!0,rel:!0});var EO=i(sl);N2=r(EO,"pytest-instafail"),EO.forEach(s),H2=r(yj,` shows failures and errors instantly instead of waiting until the end of test session.`),yj.forEach(s),nv=h(e),c(ol.$$.fragment,e),pv=h(e),c(rl.$$.fragment,e),fv=h(e),ct=a(e,"H3",{class:!0});var C$=i(ct);ys=a(C$,"A",{id:!0,class:!0,href:!0});var gO=i(ys);Xf=a(gO,"SPAN",{});var bO=i(Xf);c(ll.$$.fragment,bO),bO.forEach(s),gO.forEach(s),z2=h(C$),Jf=a(C$,"SPAN",{});var kO=i(Jf);R2=r(kO,"To GPU or not to GPU"),kO.forEach(s),C$.forEach(s),hv=h(e),$s=a(e,"P",{});var j$=i($s);G2=r(j$,"On a GPU-enabled setup, to test in CPU-only mode add "),Zf=a(j$,"CODE",{});var CO=i(Zf);F2=r(CO,'CUDA_VISIBLE_DEVICES=""'),CO.forEach(s),M2=r(j$,":"),j$.forEach(s),uv=h(e),c(al.$$.fragment,e),dv=h(e),te=a(e,"P",{});var Co=i(te);W2=r(Co,"or if you have multiple gpus, you can specify which one is to be used by "),Kf=a(Co,"CODE",{});var jO=i(Kf);B2=r(jO,"pytest"),jO.forEach(s),Y2=r(Co,`. For example, to use only the second gpu if you have gpus `),Qf=a(Co,"CODE",{});var xO=i(Qf);V2=r(xO,"0"),xO.forEach(s),X2=r(Co," and "),eh=a(Co,"CODE",{});var PO=i(eh);J2=r(PO,"1"),PO.forEach(s),Z2=r(Co,", you can run:"),Co.forEach(s),cv=h(e),c(il.$$.fragment,e),mv=h(e),Ai=a(e,"P",{});var OO=i(Ai);K2=r(OO,"This is handy when you want to run different tasks on different GPUs."),OO.forEach(s),_v=h(e),Di=a(e,"P",{});var IO=i(Di);Q2=r(IO,`Some tests must be run on CPU-only, others on either CPU or GPU or TPU, yet others on multiple-GPUs. The following skip decorators are used to set the requirements of tests CPU/GPU/TPU-wise:`),IO.forEach(s),vv=h(e),U=a(e,"UL",{});var he=i(U);qi=a(he,"LI",{});var $j=i(qi);th=a($j,"CODE",{});var TO=i(th);e5=r(TO,"require_torch"),TO.forEach(s),t5=r($j," - this test will run only under torch"),$j.forEach(s),s5=h(he),Es=a(he,"LI",{});var sm=i(Es);sh=a(sm,"CODE",{});var AO=i(sh);o5=r(AO,"require_torch_gpu"),AO.forEach(s),r5=r(sm," - as "),oh=a(sm,"CODE",{});var DO=i(oh);l5=r(DO,"require_torch"),DO.forEach(s),a5=r(sm," plus requires at least 1 GPU"),sm.forEach(s),i5=h(he),gs=a(he,"LI",{});var om=i(gs);rh=a(om,"CODE",{});var qO=i(rh);n5=r(qO,"require_torch_multi_gpu"),qO.forEach(s),p5=r(om," - as "),lh=a(om,"CODE",{});var SO=i(lh);f5=r(SO,"require_torch"),SO.forEach(s),h5=r(om," plus requires at least 2 GPUs"),om.forEach(s),u5=h(he),bs=a(he,"LI",{});var rm=i(bs);ah=a(rm,"CODE",{});var LO=i(ah);d5=r(LO,"require_torch_non_multi_gpu"),LO.forEach(s),c5=r(rm," - as "),ih=a(rm,"CODE",{});var UO=i(ih);m5=r(UO,"require_torch"),UO.forEach(s),_5=r(rm," plus requires 0 or 1 GPUs"),rm.forEach(s),v5=h(he),ks=a(he,"LI",{});var lm=i(ks);nh=a(lm,"CODE",{});var NO=i(nh);w5=r(NO,"require_torch_up_to_2_gpus"),NO.forEach(s),y5=r(lm," - as "),ph=a(lm,"CODE",{});var HO=i(ph);$5=r(HO,"require_torch"),HO.forEach(s),E5=r(lm," plus requires 0 or 1 or 2 GPUs"),lm.forEach(s),g5=h(he),Cs=a(he,"LI",{});var am=i(Cs);fh=a(am,"CODE",{});var zO=i(fh);b5=r(zO,"require_torch_tpu"),zO.forEach(s),k5=r(am," - as "),hh=a(am,"CODE",{});var RO=i(hh);C5=r(RO,"require_torch"),RO.forEach(s),j5=r(am," plus requires at least 1 TPU"),am.forEach(s),he.forEach(s),wv=h(e),Si=a(e,"P",{});var GO=i(Si);x5=r(GO,"Let\u2019s depict the GPU requirements in the following table:"),GO.forEach(s),yv=h(e),C=a(e,"P",{});var P=i(C);P5=r(P,`| n gpus | decorator | |--------+--------------------------------| | `),uh=a(P,"CODE",{});var FO=i(uh);O5=r(FO,">= 0"),FO.forEach(s),I5=r(P," | "),dh=a(P,"CODE",{});var MO=i(dh);T5=r(MO,"@require_torch"),MO.forEach(s),A5=r(P,` | | `),ch=a(P,"CODE",{});var WO=i(ch);D5=r(WO,">= 1"),WO.forEach(s),q5=r(P," | "),mh=a(P,"CODE",{});var BO=i(mh);S5=r(BO,"@require_torch_gpu"),BO.forEach(s),L5=r(P,` | | `),_h=a(P,"CODE",{});var YO=i(_h);U5=r(YO,">= 2"),YO.forEach(s),N5=r(P," | "),vh=a(P,"CODE",{});var VO=i(vh);H5=r(VO,"@require_torch_multi_gpu"),VO.forEach(s),z5=r(P,` | | `),wh=a(P,"CODE",{});var XO=i(wh);R5=r(XO,"< 2"),XO.forEach(s),G5=r(P," | "),yh=a(P,"CODE",{});var JO=i(yh);F5=r(JO,"@require_torch_non_multi_gpu"),JO.forEach(s),M5=r(P,` | | `),$h=a(P,"CODE",{});var ZO=i($h);W5=r(ZO,"< 3"),ZO.forEach(s),B5=r(P," | "),Eh=a(P,"CODE",{});var KO=i(Eh);Y5=r(KO,"@require_torch_up_to_2_gpus"),KO.forEach(s),V5=r(P," |"),P.forEach(s),$v=h(e),Li=a(e,"P",{});var QO=i(Li);X5=r(QO,"For example, here is a test that must be run only when there are 2 or more GPUs available and pytorch is installed:"),QO.forEach(s),Ev=h(e),c(nl.$$.fragment,e),gv=h(e),Pe=a(e,"P",{});var Xn=i(Pe);J5=r(Xn,"If a test requires "),gh=a(Xn,"CODE",{});var eI=i(gh);Z5=r(eI,"tensorflow"),eI.forEach(s),K5=r(Xn," use the "),bh=a(Xn,"CODE",{});var tI=i(bh);Q5=r(tI,"require_tf"),tI.forEach(s),ek=r(Xn," decorator. For example:"),Xn.forEach(s),bv=h(e),c(pl.$$.fragment,e),kv=h(e),Ui=a(e,"P",{});var sI=i(Ui);tk=r(sI,`These decorators can be stacked. For example, if a test is slow and requires at least one GPU under pytorch, here is how to set it up:`),sI.forEach(s),Cv=h(e),c(fl.$$.fragment,e),jv=h(e),Oe=a(e,"P",{});var Jn=i(Oe);sk=r(Jn,"Some decorators like "),kh=a(Jn,"CODE",{});var oI=i(kh);ok=r(oI,"@parametrized"),oI.forEach(s),rk=r(Jn," rewrite test names, therefore "),Ch=a(Jn,"CODE",{});var rI=i(Ch);lk=r(rI,"@require_*"),rI.forEach(s),ak=r(Jn,` skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage:`),Jn.forEach(s),xv=h(e),c(hl.$$.fragment,e),Pv=h(e),js=a(e,"P",{});var x$=i(js);ik=r(x$,"This order problem doesn\u2019t exist with "),jh=a(x$,"CODE",{});var lI=i(jh);nk=r(lI,"@pytest.mark.parametrize"),lI.forEach(s),pk=r(x$,`, you can put it first or last and it will still work. But it only works with non-unittests.`),x$.forEach(s),Ov=h(e),Ni=a(e,"P",{});var aI=i(Ni);fk=r(aI,"Inside tests:"),aI.forEach(s),Iv=h(e),Hi=a(e,"UL",{});var iI=i(Hi);xh=a(iI,"LI",{});var nI=i(xh);hk=r(nI,"How many GPUs are available:"),nI.forEach(s),iI.forEach(s),Tv=h(e),c(ul.$$.fragment,e),Av=h(e),mt=a(e,"H3",{class:!0});var P$=i(mt);xs=a(P$,"A",{id:!0,class:!0,href:!0});var pI=i(xs);Ph=a(pI,"SPAN",{});var fI=i(Ph);c(dl.$$.fragment,fI),fI.forEach(s),pI.forEach(s),uk=h(P$),Oh=a(P$,"SPAN",{});var hI=i(Oh);dk=r(hI,"Distributed training"),hI.forEach(s),P$.forEach(s),Dv=h(e),_t=a(e,"P",{});var im=i(_t);Ih=a(im,"CODE",{});var uI=i(Ih);ck=r(uI,"pytest"),uI.forEach(s),mk=r(im,` can\u2019t deal with distributed training directly. If this is attempted - the sub-processes don\u2019t do the right thing and end up thinking they are `),Th=a(im,"CODE",{});var dI=i(Th);_k=r(dI,"pytest"),dI.forEach(s),vk=r(im,` and start running the test suite in loops. It works, however, if one spawns a normal process that then spawns off multiple workers and manages the IO pipes.`),im.forEach(s),qv=h(e),zi=a(e,"P",{});var cI=i(zi);wk=r(cI,"Here are some tests that use it:"),cI.forEach(s),Sv=h(e),Ps=a(e,"UL",{});var O$=i(Ps);Ah=a(O$,"LI",{});var mI=i(Ah);cl=a(mI,"A",{href:!0,rel:!0});var _I=i(cl);yk=r(_I,"test_trainer_distributed.py"),_I.forEach(s),mI.forEach(s),$k=h(O$),Dh=a(O$,"LI",{});var vI=i(Dh);ml=a(vI,"A",{href:!0,rel:!0});var wI=i(ml);Ek=r(wI,"test_deepspeed.py"),wI.forEach(s),vI.forEach(s),O$.forEach(s),Lv=h(e),Os=a(e,"P",{});var I$=i(Os);gk=r(I$,"To jump right into the execution point, search for the "),qh=a(I$,"CODE",{});var yI=i(qh);bk=r(yI,"execute_subprocess_async"),yI.forEach(s),kk=r(I$," call in those tests."),I$.forEach(s),Uv=h(e),Ri=a(e,"P",{});var $I=i(Ri);Ck=r($I,"You will need at least 2 GPUs to see these tests in action:"),$I.forEach(s),Nv=h(e),c(_l.$$.fragment,e),Hv=h(e),vt=a(e,"H3",{class:!0});var T$=i(vt);Is=a(T$,"A",{id:!0,class:!0,href:!0});var EI=i(Is);Sh=a(EI,"SPAN",{});var gI=i(Sh);c(vl.$$.fragment,gI),gI.forEach(s),EI.forEach(s),jk=h(T$),Lh=a(T$,"SPAN",{});var bI=i(Lh);xk=r(bI,"Output capture"),bI.forEach(s),T$.forEach(s),zv=h(e),Ie=a(e,"P",{});var Zn=i(Ie);Pk=r(Zn,"During test execution any output sent to "),Uh=a(Zn,"CODE",{});var kI=i(Uh);Ok=r(kI,"stdout"),kI.forEach(s),Ik=r(Zn," and "),Nh=a(Zn,"CODE",{});var CI=i(Nh);Tk=r(CI,"stderr"),CI.forEach(s),Ak=r(Zn,` is captured. If a test or a setup method fails, its according captured output will usually be shown along with the failure traceback.`),Zn.forEach(s),Rv=h(e),W=a(e,"P",{});var Ge=i(W);Dk=r(Ge,"To disable output capturing and to get the "),Hh=a(Ge,"CODE",{});var jI=i(Hh);qk=r(jI,"stdout"),jI.forEach(s),Sk=r(Ge," and "),zh=a(Ge,"CODE",{});var xI=i(zh);Lk=r(xI,"stderr"),xI.forEach(s),Uk=r(Ge," normally, use "),Rh=a(Ge,"CODE",{});var PI=i(Rh);Nk=r(PI,"-s"),PI.forEach(s),Hk=r(Ge," or "),Gh=a(Ge,"CODE",{});var OI=i(Gh);zk=r(OI,"--capture=no"),OI.forEach(s),Rk=r(Ge,":"),Ge.forEach(s),Gv=h(e),c(wl.$$.fragment,e),Fv=h(e),Gi=a(e,"P",{});var II=i(Gi);Gk=r(II,"To send test results to JUnit format output:"),II.forEach(s),Mv=h(e),c(yl.$$.fragment,e),Wv=h(e),wt=a(e,"H3",{class:!0});var A$=i(wt);Ts=a(A$,"A",{id:!0,class:!0,href:!0});var TI=i(Ts);Fh=a(TI,"SPAN",{});var AI=i(Fh);c($l.$$.fragment,AI),AI.forEach(s),TI.forEach(s),Fk=h(A$),Mh=a(A$,"SPAN",{});var DI=i(Mh);Mk=r(DI,"Color control"),DI.forEach(s),A$.forEach(s),Bv=h(e),Fi=a(e,"P",{});var qI=i(Fi);Wk=r(qI,"To have no color (e.g., yellow on white background is not readable):"),qI.forEach(s),Yv=h(e),c(El.$$.fragment,e),Vv=h(e),yt=a(e,"H3",{class:!0});var D$=i(yt);As=a(D$,"A",{id:!0,class:!0,href:!0});var SI=i(As);Wh=a(SI,"SPAN",{});var LI=i(Wh);c(gl.$$.fragment,LI),LI.forEach(s),SI.forEach(s),Bk=h(D$),Bh=a(D$,"SPAN",{});var UI=i(Bh);Yk=r(UI,"Sending test report to online pastebin service"),UI.forEach(s),D$.forEach(s),Xv=h(e),Mi=a(e,"P",{});var NI=i(Mi);Vk=r(NI,"Creating a URL for each test failure:"),NI.forEach(s),Jv=h(e),c(bl.$$.fragment,e),Zv=h(e),Wi=a(e,"P",{});var HI=i(Wi);Xk=r(HI,`This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example -x if you only want to send one particular failure.`),HI.forEach(s),Kv=h(e),Bi=a(e,"P",{});var zI=i(Bi);Jk=r(zI,"Creating a URL for a whole test session log:"),zI.forEach(s),Qv=h(e),c(kl.$$.fragment,e),e1=h(e),$t=a(e,"H2",{class:!0});var q$=i($t);Ds=a(q$,"A",{id:!0,class:!0,href:!0});var RI=i(Ds);Yh=a(RI,"SPAN",{});var GI=i(Yh);c(Cl.$$.fragment,GI),GI.forEach(s),RI.forEach(s),Zk=h(q$),Vh=a(q$,"SPAN",{});var FI=i(Vh);Kk=r(FI,"Writing tests"),FI.forEach(s),q$.forEach(s),t1=h(e),Te=a(e,"P",{});var Kn=i(Te);Qk=r(Kn,"\u{1F917} transformers tests are based on "),Xh=a(Kn,"CODE",{});var MI=i(Xh);e4=r(MI,"unittest"),MI.forEach(s),t4=r(Kn,", but run by "),Jh=a(Kn,"CODE",{});var WI=i(Jh);s4=r(WI,"pytest"),WI.forEach(s),o4=r(Kn,`, so most of the time features from both systems can be used.`),Kn.forEach(s),s1=h(e),se=a(e,"P",{});var jo=i(se);r4=r(jo,"You can read "),jl=a(jo,"A",{href:!0,rel:!0});var BI=i(jl);l4=r(BI,"here"),BI.forEach(s),a4=r(jo,` which features are supported, but the important thing to remember is that most `),Zh=a(jo,"CODE",{});var YI=i(Zh);i4=r(YI,"pytest"),YI.forEach(s),n4=r(jo,` fixtures don\u2019t work. Neither parametrization, but we use the module `),Kh=a(jo,"CODE",{});var VI=i(Kh);p4=r(VI,"parameterized"),VI.forEach(s),f4=r(jo," that works in a similar way."),jo.forEach(s),o1=h(e),Et=a(e,"H3",{class:!0});var S$=i(Et);qs=a(S$,"A",{id:!0,class:!0,href:!0});var XI=i(qs);Qh=a(XI,"SPAN",{});var JI=i(Qh);c(xl.$$.fragment,JI),JI.forEach(s),XI.forEach(s),h4=h(S$),eu=a(S$,"SPAN",{});var ZI=i(eu);u4=r(ZI,"Parametrization"),ZI.forEach(s),S$.forEach(s),r1=h(e),Yi=a(e,"P",{});var KI=i(Yi);d4=r(KI,`Often, there is a need to run the same test multiple times, but with different arguments. It could be done from within the test, but then there is no way of running that test for just one set of arguments.`),KI.forEach(s),l1=h(e),c(Pl.$$.fragment,e),a1=h(e),Ss=a(e,"P",{});var L$=i(Ss);c4=r(L$,"Now, by default this test will be run 3 times, each time with the last 3 arguments of "),tu=a(L$,"CODE",{});var QI=i(tu);m4=r(QI,"test_floor"),QI.forEach(s),_4=r(L$,` being assigned the corresponding arguments in the parameter list.`),L$.forEach(s),i1=h(e),Ae=a(e,"P",{});var Qn=i(Ae);v4=r(Qn,"and you could run just the "),su=a(Qn,"CODE",{});var eT=i(su);w4=r(eT,"negative"),eT.forEach(s),y4=r(Qn," and "),ou=a(Qn,"CODE",{});var tT=i(ou);$4=r(tT,"integer"),tT.forEach(s),E4=r(Qn," sets of params with:"),Qn.forEach(s),n1=h(e),c(Ol.$$.fragment,e),p1=h(e),Ls=a(e,"P",{});var U$=i(Ls);g4=r(U$,"or all but "),ru=a(U$,"CODE",{});var sT=i(ru);b4=r(sT,"negative"),sT.forEach(s),k4=r(U$," sub-tests, with:"),U$.forEach(s),f1=h(e),c(Il.$$.fragment,e),h1=h(e),Us=a(e,"P",{});var N$=i(Us);C4=r(N$,"Besides using the "),lu=a(N$,"CODE",{});var oT=i(lu);j4=r(oT,"-k"),oT.forEach(s),x4=r(N$,` filter that was just mentioned, you can find out the exact name of each sub-test and run any or all of them using their exact names.`),N$.forEach(s),u1=h(e),c(Tl.$$.fragment,e),d1=h(e),Vi=a(e,"P",{});var rT=i(Vi);P4=r(rT,"and it will list:"),rT.forEach(s),c1=h(e),c(Al.$$.fragment,e),m1=h(e),Xi=a(e,"P",{});var lT=i(Xi);O4=r(lT,"So now you can run just 2 specific sub-tests:"),lT.forEach(s),_1=h(e),c(Dl.$$.fragment,e),v1=h(e),B=a(e,"P",{});var Fe=i(B);I4=r(Fe,"The module "),ql=a(Fe,"A",{href:!0,rel:!0});var aT=i(ql);T4=r(aT,"parameterized"),aT.forEach(s),A4=r(Fe,` which is already in the developer dependencies of `),au=a(Fe,"CODE",{});var iT=i(au);D4=r(iT,"transformers"),iT.forEach(s),q4=r(Fe," works for both: "),iu=a(Fe,"CODE",{});var nT=i(iu);S4=r(nT,"unittests"),nT.forEach(s),L4=r(Fe," and "),nu=a(Fe,"CODE",{});var pT=i(nu);U4=r(pT,"pytest"),pT.forEach(s),N4=r(Fe," tests."),Fe.forEach(s),w1=h(e),oe=a(e,"P",{});var xo=i(oe);H4=r(xo,"If, however, the test is not a "),pu=a(xo,"CODE",{});var fT=i(pu);z4=r(fT,"unittest"),fT.forEach(s),R4=r(xo,", you may use "),fu=a(xo,"CODE",{});var hT=i(fu);G4=r(hT,"pytest.mark.parametrize"),hT.forEach(s),F4=r(xo,` (or you may see it being used in some existing tests, mostly under `),hu=a(xo,"CODE",{});var uT=i(hu);M4=r(uT,"examples"),uT.forEach(s),W4=r(xo,")."),xo.forEach(s),y1=h(e),De=a(e,"P",{});var ep=i(De);B4=r(ep,"Here is the same example, this time using "),uu=a(ep,"CODE",{});var dT=i(uu);Y4=r(dT,"pytest"),dT.forEach(s),V4=r(ep,"\u2019s "),du=a(ep,"CODE",{});var cT=i(du);X4=r(cT,"parametrize"),cT.forEach(s),J4=r(ep," marker:"),ep.forEach(s),$1=h(e),c(Sl.$$.fragment,e),E1=h(e),re=a(e,"P",{});var Po=i(re);Z4=r(Po,"Same as with "),cu=a(Po,"CODE",{});var mT=i(cu);K4=r(mT,"parameterized"),mT.forEach(s),Q4=r(Po,", with "),mu=a(Po,"CODE",{});var _T=i(mu);e0=r(_T,"pytest.mark.parametrize"),_T.forEach(s),t0=r(Po,` you can have a fine control over which sub-tests are run, if the `),_u=a(Po,"CODE",{});var vT=i(_u);s0=r(vT,"-k"),vT.forEach(s),o0=r(Po,` filter doesn\u2019t do the job. Except, this parametrization function creates a slightly different set of names for the sub-tests. Here is what they look like:`),Po.forEach(s),g1=h(e),c(Ll.$$.fragment,e),b1=h(e),Ji=a(e,"P",{});var wT=i(Ji);r0=r(wT,"and it will list:"),wT.forEach(s),k1=h(e),c(Ul.$$.fragment,e),C1=h(e),Zi=a(e,"P",{});var yT=i(Zi);l0=r(yT,"So now you can run just the specific test:"),yT.forEach(s),j1=h(e),c(Nl.$$.fragment,e),x1=h(e),Ki=a(e,"P",{});var $T=i(Ki);a0=r($T,"as in the previous example."),$T.forEach(s),P1=h(e),gt=a(e,"H3",{class:!0});var H$=i(gt);Ns=a(H$,"A",{id:!0,class:!0,href:!0});var ET=i(Ns);vu=a(ET,"SPAN",{});var gT=i(vu);c(Hl.$$.fragment,gT),gT.forEach(s),ET.forEach(s),i0=h(H$),wu=a(H$,"SPAN",{});var bT=i(wu);n0=r(bT,"Files and directories"),bT.forEach(s),H$.forEach(s),O1=h(e),Hs=a(e,"P",{});var z$=i(Hs);p0=r(z$,`In tests often we need to know where things are relative to the current test file, and it\u2019s not trivial since the test could be invoked from more than one directory or could reside in sub-directories with different depths. A helper class `),yu=a(z$,"CODE",{});var kT=i(yu);f0=r(kT,"transformers.test_utils.TestCasePlus"),kT.forEach(s),h0=r(z$,` solves this problem by sorting out all the basic paths and provides easy accessors to them:`),z$.forEach(s),I1=h(e),zs=a(e,"UL",{});var R$=i(zs);zl=a(R$,"LI",{});var G$=i(zl);Qi=a(G$,"P",{});var Ej=i(Qi);$u=a(Ej,"CODE",{});var CT=i($u);u0=r(CT,"pathlib"),CT.forEach(s),d0=r(Ej," objects (all fully resolved):"),Ej.forEach(s),c0=h(G$),G=a(G$,"UL",{});var ue=i(G);Rl=a(ue,"LI",{});var F$=i(Rl);Eu=a(F$,"CODE",{});var jT=i(Eu);m0=r(jT,"test_file_path"),jT.forEach(s),_0=r(F$," - the current test file path, i.e. "),gu=a(F$,"CODE",{});var xT=i(gu);v0=r(xT,"__file__"),xT.forEach(s),F$.forEach(s),w0=h(ue),en=a(ue,"LI",{});var gj=i(en);bu=a(gj,"CODE",{});var PT=i(bu);y0=r(PT,"test_file_dir"),PT.forEach(s),$0=r(gj," - the directory containing the current test file"),gj.forEach(s),E0=h(ue),Rs=a(ue,"LI",{});var nm=i(Rs);ku=a(nm,"CODE",{});var OT=i(ku);g0=r(OT,"tests_dir"),OT.forEach(s),b0=r(nm," - the directory of the "),Cu=a(nm,"CODE",{});var IT=i(Cu);k0=r(IT,"tests"),IT.forEach(s),C0=r(nm," test suite"),nm.forEach(s),j0=h(ue),Gs=a(ue,"LI",{});var pm=i(Gs);ju=a(pm,"CODE",{});var TT=i(ju);x0=r(TT,"examples_dir"),TT.forEach(s),P0=r(pm," - the directory of the "),xu=a(pm,"CODE",{});var AT=i(xu);O0=r(AT,"examples"),AT.forEach(s),I0=r(pm," test suite"),pm.forEach(s),T0=h(ue),tn=a(ue,"LI",{});var bj=i(tn);Pu=a(bj,"CODE",{});var DT=i(Pu);A0=r(DT,"repo_root_dir"),DT.forEach(s),D0=r(bj," - the directory of the repository"),bj.forEach(s),q0=h(ue),qe=a(ue,"LI",{});var ei=i(qe);Ou=a(ei,"CODE",{});var qT=i(Ou);S0=r(qT,"src_dir"),qT.forEach(s),L0=r(ei," - the directory of "),Iu=a(ei,"CODE",{});var ST=i(Iu);U0=r(ST,"src"),ST.forEach(s),N0=r(ei," (i.e. where the "),Tu=a(ei,"CODE",{});var LT=i(Tu);H0=r(LT,"transformers"),LT.forEach(s),z0=r(ei," sub-dir resides)"),ei.forEach(s),ue.forEach(s),G$.forEach(s),R0=h(R$),Gl=a(R$,"LI",{});var M$=i(Gl);Fl=a(M$,"P",{});var W$=i(Fl);G0=r(W$,"stringified paths---same as above but these return paths as strings, rather than "),Au=a(W$,"CODE",{});var UT=i(Au);F0=r(UT,"pathlib"),UT.forEach(s),M0=r(W$," objects:"),W$.forEach(s),W0=h(M$),F=a(M$,"UL",{});var de=i(F);Du=a(de,"LI",{});var NT=i(Du);qu=a(NT,"CODE",{});var HT=i(qu);B0=r(HT,"test_file_path_str"),HT.forEach(s),NT.forEach(s),Y0=h(de),Su=a(de,"LI",{});var zT=i(Su);Lu=a(zT,"CODE",{});var RT=i(Lu);V0=r(RT,"test_file_dir_str"),RT.forEach(s),zT.forEach(s),X0=h(de),Uu=a(de,"LI",{});var GT=i(Uu);Nu=a(GT,"CODE",{});var FT=i(Nu);J0=r(FT,"tests_dir_str"),FT.forEach(s),GT.forEach(s),Z0=h(de),Hu=a(de,"LI",{});var MT=i(Hu);zu=a(MT,"CODE",{});var WT=i(zu);K0=r(WT,"examples_dir_str"),WT.forEach(s),MT.forEach(s),Q0=h(de),Ru=a(de,"LI",{});var BT=i(Ru);Gu=a(BT,"CODE",{});var YT=i(Gu);e6=r(YT,"repo_root_dir_str"),YT.forEach(s),BT.forEach(s),t6=h(de),Fu=a(de,"LI",{});var VT=i(Fu);Mu=a(VT,"CODE",{});var XT=i(Mu);s6=r(XT,"src_dir_str"),XT.forEach(s),VT.forEach(s),de.forEach(s),M$.forEach(s),R$.forEach(s),T1=h(e),Fs=a(e,"P",{});var B$=i(Fs);o6=r(B$,`To start using those all you need is to make sure that the test resides in a subclass of `),Wu=a(B$,"CODE",{});var JT=i(Wu);r6=r(JT,"transformers.test_utils.TestCasePlus"),JT.forEach(s),l6=r(B$,". For example:"),B$.forEach(s),A1=h(e),c(Ml.$$.fragment,e),D1=h(e),Y=a(e,"P",{});var Me=i(Y);a6=r(Me,"If you don\u2019t need to manipulate paths via "),Bu=a(Me,"CODE",{});var ZT=i(Bu);i6=r(ZT,"pathlib"),ZT.forEach(s),n6=r(Me,` or you just need a path as a string, you can always invoked `),Yu=a(Me,"CODE",{});var KT=i(Yu);p6=r(KT,"str()"),KT.forEach(s),f6=r(Me," on the "),Vu=a(Me,"CODE",{});var QT=i(Vu);h6=r(QT,"pathlib"),QT.forEach(s),u6=r(Me," object or use the accessors ending with "),Xu=a(Me,"CODE",{});var eA=i(Xu);d6=r(eA,"_str"),eA.forEach(s),c6=r(Me,". For example:"),Me.forEach(s),q1=h(e),c(Wl.$$.fragment,e),S1=h(e),bt=a(e,"H3",{class:!0});var Y$=i(bt);Ms=a(Y$,"A",{id:!0,class:!0,href:!0});var tA=i(Ms);Ju=a(tA,"SPAN",{});var sA=i(Ju);c(Bl.$$.fragment,sA),sA.forEach(s),tA.forEach(s),m6=h(Y$),Zu=a(Y$,"SPAN",{});var oA=i(Zu);_6=r(oA,"Temporary files and directories"),oA.forEach(s),Y$.forEach(s),L1=h(e),Ws=a(e,"P",{});var V$=i(Ws);v6=r(V$,`Using unique temporary files and directories are essential for parallel test running, so that the tests won\u2019t overwrite each other\u2019s data. Also we want to get the temporary files and directories removed at the end of each test that created them. Therefore, using packages like `),Ku=a(V$,"CODE",{});var rA=i(Ku);w6=r(rA,"tempfile"),rA.forEach(s),y6=r(V$,", which address these needs is essential."),V$.forEach(s),U1=h(e),sn=a(e,"P",{});var lA=i(sn);$6=r(lA,`However, when debugging tests, you need to be able to see what goes into the temporary file or directory and you want to know it\u2019s exact path and not having it randomized on every test re-run.`),lA.forEach(s),N1=h(e),Se=a(e,"P",{});var tp=i(Se);E6=r(tp,"A helper class "),Qu=a(tp,"CODE",{});var aA=i(Qu);g6=r(aA,"transformers.test_utils.TestCasePlus"),aA.forEach(s),b6=r(tp,` is best used for such purposes. It\u2019s a sub-class of `),ed=a(tp,"CODE",{});var iA=i(ed);k6=r(iA,"unittest.TestCase"),iA.forEach(s),C6=r(tp,", so we can easily inherit from it in the test modules."),tp.forEach(s),H1=h(e),on=a(e,"P",{});var nA=i(on);j6=r(nA,"Here is an example of its usage:"),nA.forEach(s),z1=h(e),c(Yl.$$.fragment,e),R1=h(e),Bs=a(e,"P",{});var X$=i(Bs);x6=r(X$,"This code creates a unique temporary directory, and sets "),td=a(X$,"CODE",{});var pA=i(td);P6=r(pA,"tmp_dir"),pA.forEach(s),O6=r(X$," to its location."),X$.forEach(s),G1=h(e),rn=a(e,"UL",{});var fA=i(rn);sd=a(fA,"LI",{});var hA=i(sd);I6=r(hA,"Create a unique temporary dir:"),hA.forEach(s),fA.forEach(s),F1=h(e),c(Vl.$$.fragment,e),M1=h(e),Xl=a(e,"P",{});var kj=i(Xl);od=a(kj,"CODE",{});var uA=i(od);T6=r(uA,"tmp_dir"),uA.forEach(s),A6=r(kj,` will contain the path to the created temporary dir. It will be automatically removed at the end of the test.`),kj.forEach(s),W1=h(e),ln=a(e,"UL",{});var dA=i(ln);rd=a(dA,"LI",{});var cA=i(rd);D6=r(cA,"Create a temporary dir of my choice, ensure it\u2019s empty before the test starts and don\u2019t empty it after the test."),cA.forEach(s),dA.forEach(s),B1=h(e),c(Jl.$$.fragment,e),Y1=h(e),an=a(e,"P",{});var mA=i(an);q6=r(mA,`This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests didn\u2019t leave any data in there.`),mA.forEach(s),V1=h(e),nn=a(e,"UL",{});var _A=i(nn);Zl=a(_A,"LI",{});var J$=i(Zl);kt=a(J$,"P",{});var sp=i(kt);S6=r(sp,"You can override the default behavior by directly overriding the "),ld=a(sp,"CODE",{});var vA=i(ld);L6=r(vA,"before"),vA.forEach(s),U6=r(sp," and "),ad=a(sp,"CODE",{});var wA=i(ad);N6=r(wA,"after"),wA.forEach(s),H6=r(sp,` args, leading to one of the following behaviors:`),sp.forEach(s),z6=h(J$),we=a(J$,"UL",{});var Oo=i(we);pn=a(Oo,"LI",{});var Cj=i(pn);id=a(Cj,"CODE",{});var yA=i(id);R6=r(yA,"before=True"),yA.forEach(s),G6=r(Cj,": the temporary dir will always be cleared at the beginning of the test."),Cj.forEach(s),F6=h(Oo),fn=a(Oo,"LI",{});var jj=i(fn);nd=a(jj,"CODE",{});var $A=i(nd);M6=r($A,"before=False"),$A.forEach(s),W6=r(jj,": if the temporary dir already existed, any existing files will remain there."),jj.forEach(s),B6=h(Oo),hn=a(Oo,"LI",{});var xj=i(hn);pd=a(xj,"CODE",{});var EA=i(pd);Y6=r(EA,"after=True"),EA.forEach(s),V6=r(xj,": the temporary dir will always be deleted at the end of the test."),xj.forEach(s),X6=h(Oo),un=a(Oo,"LI",{});var Pj=i(un);fd=a(Pj,"CODE",{});var gA=i(fd);J6=r(gA,"after=False"),gA.forEach(s),Z6=r(Pj,": the temporary dir will always be left intact at the end of the test."),Pj.forEach(s),Oo.forEach(s),J$.forEach(s),_A.forEach(s),X1=h(e),c(Ys.$$.fragment,e),J1=h(e),c(Vs.$$.fragment,e),Z1=h(e),Ct=a(e,"H3",{class:!0});var Z$=i(Ct);Xs=a(Z$,"A",{id:!0,class:!0,href:!0});var bA=i(Xs);hd=a(bA,"SPAN",{});var kA=i(hd);c(Kl.$$.fragment,kA),kA.forEach(s),bA.forEach(s),K6=h(Z$),ud=a(Z$,"SPAN",{});var CA=i(ud);Q6=r(CA,"Temporary sys.path override"),CA.forEach(s),Z$.forEach(s),K1=h(e),Le=a(e,"P",{});var op=i(Le);e7=r(op,"If you need to temporary override "),dd=a(op,"CODE",{});var jA=i(dd);t7=r(jA,"sys.path"),jA.forEach(s),s7=r(op,` to import from another test for example, you can use the `),cd=a(op,"CODE",{});var xA=i(cd);o7=r(xA,"ExtendSysPath"),xA.forEach(s),r7=r(op," context manager. Example:"),op.forEach(s),Q1=h(e),c(Ql.$$.fragment,e),ew=h(e),jt=a(e,"H3",{class:!0});var K$=i(jt);Js=a(K$,"A",{id:!0,class:!0,href:!0});var PA=i(Js);md=a(PA,"SPAN",{});var OA=i(md);c(ea.$$.fragment,OA),OA.forEach(s),PA.forEach(s),l7=h(K$),_d=a(K$,"SPAN",{});var IA=i(_d);a7=r(IA,"Skipping tests"),IA.forEach(s),K$.forEach(s),tw=h(e),Zs=a(e,"P",{});var Q$=i(Zs);i7=r(Q$,`This is useful when a bug is found and a new test is written, yet the bug is not fixed yet. In order to be able to commit it to the main repository we need make sure it\u2019s skipped during `),vd=a(Q$,"CODE",{});var TA=i(vd);n7=r(TA,"make test"),TA.forEach(s),p7=r(Q$,"."),Q$.forEach(s),sw=h(e),dn=a(e,"P",{});var AA=i(dn);f7=r(AA,"Methods:"),AA.forEach(s),ow=h(e),Ks=a(e,"UL",{});var eE=i(Ks);wd=a(eE,"LI",{});var DA=i(wd);ta=a(DA,"P",{});var tE=i(ta);h7=r(tE,"A "),yd=a(tE,"STRONG",{});var qA=i(yd);u7=r(qA,"skip"),qA.forEach(s),d7=r(tE,` means that you expect your test to pass only if some conditions are met, otherwise pytest should skip running the test altogether. Common examples are skipping windows-only tests on non-windows platforms, or skipping tests that depend on an external resource which is not available at the moment (for example a database).`),tE.forEach(s),DA.forEach(s),c7=h(eE),$d=a(eE,"LI",{});var SA=i($d);sa=a(SA,"P",{});var sE=i(sa);m7=r(sE,"A "),Ed=a(sE,"STRONG",{});var LA=i(Ed);_7=r(LA,"xfail"),LA.forEach(s),v7=r(sE,` means that you expect a test to fail for some reason. A common example is a test for a feature not yet implemented, or a bug not yet fixed. When a test passes despite being expected to fail (marked with pytest.mark.xfail), it\u2019s an xpass and will be reported in the test summary.`),sE.forEach(s),SA.forEach(s),eE.forEach(s),rw=h(e),le=a(e,"P",{});var Io=i(le);w7=r(Io,"One of the important differences between the two is that "),gd=a(Io,"CODE",{});var UA=i(gd);y7=r(UA,"skip"),UA.forEach(s),$7=r(Io," doesn\u2019t run the test, and "),bd=a(Io,"CODE",{});var NA=i(bd);E7=r(NA,"xfail"),NA.forEach(s),g7=r(Io,` does. So if the code that\u2019s buggy causes some bad state that will affect other tests, do not use `),kd=a(Io,"CODE",{});var HA=i(kd);b7=r(HA,"xfail"),HA.forEach(s),k7=r(Io,"."),Io.forEach(s),lw=h(e),xt=a(e,"H4",{class:!0});var oE=i(xt);Qs=a(oE,"A",{id:!0,class:!0,href:!0});var zA=i(Qs);Cd=a(zA,"SPAN",{});var RA=i(Cd);c(oa.$$.fragment,RA),RA.forEach(s),zA.forEach(s),C7=h(oE),jd=a(oE,"SPAN",{});var GA=i(jd);j7=r(GA,"Implementation"),GA.forEach(s),oE.forEach(s),aw=h(e),cn=a(e,"UL",{});var FA=i(cn);xd=a(FA,"LI",{});var MA=i(xd);x7=r(MA,"Here is how to skip whole test unconditionally:"),MA.forEach(s),FA.forEach(s),iw=h(e),c(ra.$$.fragment,e),nw=h(e),mn=a(e,"P",{});var WA=i(mn);P7=r(WA,"or via pytest:"),WA.forEach(s),pw=h(e),c(la.$$.fragment,e),fw=h(e),eo=a(e,"P",{});var rE=i(eo);O7=r(rE,"or the "),Pd=a(rE,"CODE",{});var BA=i(Pd);I7=r(BA,"xfail"),BA.forEach(s),T7=r(rE," way:"),rE.forEach(s),hw=h(e),c(aa.$$.fragment,e),uw=h(e),_n=a(e,"UL",{});var YA=i(_n);Od=a(YA,"LI",{});var VA=i(Od);A7=r(VA,"Here is how to skip a test based on some internal check inside the test:"),VA.forEach(s),YA.forEach(s),dw=h(e),c(ia.$$.fragment,e),cw=h(e),vn=a(e,"P",{});var XA=i(vn);D7=r(XA,"or the whole module:"),XA.forEach(s),mw=h(e),c(na.$$.fragment,e),_w=h(e),to=a(e,"P",{});var lE=i(to);q7=r(lE,"or the "),Id=a(lE,"CODE",{});var JA=i(Id);S7=r(JA,"xfail"),JA.forEach(s),L7=r(lE," way:"),lE.forEach(s),vw=h(e),c(pa.$$.fragment,e),ww=h(e),wn=a(e,"UL",{});var ZA=i(wn);Td=a(ZA,"LI",{});var KA=i(Td);U7=r(KA,"Here is how to skip all tests in a module if some import is missing:"),KA.forEach(s),ZA.forEach(s),yw=h(e),c(fa.$$.fragment,e),$w=h(e),yn=a(e,"UL",{});var QA=i(yn);Ad=a(QA,"LI",{});var eD=i(Ad);N7=r(eD,"Skip a test based on a condition:"),eD.forEach(s),QA.forEach(s),Ew=h(e),c(ha.$$.fragment,e),gw=h(e),$n=a(e,"P",{});var tD=i($n);H7=r(tD,"or:"),tD.forEach(s),bw=h(e),c(ua.$$.fragment,e),kw=h(e),En=a(e,"P",{});var sD=i(En);z7=r(sD,"or skip the whole module:"),sD.forEach(s),Cw=h(e),c(da.$$.fragment,e),jw=h(e),so=a(e,"P",{});var aE=i(so);R7=r(aE,"More details, example and ways are "),ca=a(aE,"A",{href:!0,rel:!0});var oD=i(ca);G7=r(oD,"here"),oD.forEach(s),F7=r(aE,"."),aE.forEach(s),xw=h(e),Pt=a(e,"H3",{class:!0});var iE=i(Pt);oo=a(iE,"A",{id:!0,class:!0,href:!0});var rD=i(oo);Dd=a(rD,"SPAN",{});var lD=i(Dd);c(ma.$$.fragment,lD),lD.forEach(s),rD.forEach(s),M7=h(iE),qd=a(iE,"SPAN",{});var aD=i(qd);W7=r(aD,"Slow tests"),aD.forEach(s),iE.forEach(s),Pw=h(e),gn=a(e,"P",{});var iD=i(gn);B7=r(iD,`The library of tests is ever-growing, and some of the tests take minutes to run, therefore we can\u2019t afford waiting for an hour for the test suite to complete on CI. Therefore, with some exceptions for essential tests, slow tests should be marked as in the example below:`),iD.forEach(s),Ow=h(e),c(_a.$$.fragment,e),Iw=h(e),Ue=a(e,"P",{});var rp=i(Ue);Y7=r(rp,"Once a test is marked as "),Sd=a(rp,"CODE",{});var nD=i(Sd);V7=r(nD,"@slow"),nD.forEach(s),X7=r(rp,", to run such tests set "),Ld=a(rp,"CODE",{});var pD=i(Ld);J7=r(pD,"RUN_SLOW=1"),pD.forEach(s),Z7=r(rp," env var, e.g.:"),rp.forEach(s),Tw=h(e),c(va.$$.fragment,e),Aw=h(e),ae=a(e,"P",{});var To=i(ae);K7=r(To,"Some decorators like "),Ud=a(To,"CODE",{});var fD=i(Ud);Q7=r(fD,"@parameterized"),fD.forEach(s),e8=r(To," rewrite test names, therefore "),Nd=a(To,"CODE",{});var hD=i(Nd);t8=r(hD,"@slow"),hD.forEach(s),s8=r(To,` and the rest of the skip decorators `),Hd=a(To,"CODE",{});var uD=i(Hd);o8=r(uD,"@require_*"),uD.forEach(s),r8=r(To," have to be listed last for them to work correctly. Here is an example of the correct usage:"),To.forEach(s),Dw=h(e),c(wa.$$.fragment,e),qw=h(e),bn=a(e,"P",{});var dD=i(bn);l8=r(dD,`As explained at the beginning of this document, slow tests get to run on a scheduled basis, rather than in PRs CI checks. So it\u2019s possible that some problems will be missed during a PR submission and get merged. Such problems will get caught during the next scheduled CI job. But it also means that it\u2019s important to run the slow tests on your machine before submitting the PR.`),dD.forEach(s),Sw=h(e),kn=a(e,"P",{});var cD=i(kn);a8=r(cD,"Here is a rough decision making mechanism for choosing which tests should be marked as slow:"),cD.forEach(s),Lw=h(e),Cn=a(e,"P",{});var mD=i(Cn);i8=r(mD,`If the test is focused on one of the library\u2019s internal components (e.g., modeling files, tokenization files, pipelines), then we should run that test in the non-slow test suite. If it\u2019s focused on an other aspect of the library, such as the documentation or the examples, then we should run these tests in the slow test suite. And then, to refine this approach we should have exceptions:`),mD.forEach(s),Uw=h(e),ie=a(e,"UL",{});var Ao=i(ie);zd=a(Ao,"LI",{});var _D=i(zd);n8=r(_D,`All tests that need to download a heavy set of weights or a dataset that is larger than ~50MB (e.g., model or tokenizer integration tests, pipeline integration tests) should be set to slow. If you\u2019re adding a new model, you should create and upload to the hub a tiny version of it (with random weights) for integration tests. This is discussed in the following paragraphs.`),_D.forEach(s),p8=h(Ao),Rd=a(Ao,"LI",{});var vD=i(Rd);f8=r(vD,"All tests that need to do a training not specifically optimized to be fast should be set to slow."),vD.forEach(s),h8=h(Ao),Ot=a(Ao,"LI",{});var lp=i(Ot);u8=r(lp,`We can introduce exceptions if some of these should-be-non-slow tests are excruciatingly slow, and set them to `),Gd=a(lp,"CODE",{});var wD=i(Gd);d8=r(wD,"@slow"),wD.forEach(s),c8=r(lp,`. Auto-modeling tests, which save and load large files to disk, are a good example of tests that are marked as `),Fd=a(lp,"CODE",{});var yD=i(Fd);m8=r(yD,"@slow"),yD.forEach(s),_8=r(lp,"."),lp.forEach(s),v8=h(Ao),Md=a(Ao,"LI",{});var $D=i(Md);w8=r($D,"If a test completes under 1 second on CI (including downloads if any) then it should be a normal test regardless."),$D.forEach(s),Ao.forEach(s),Nw=h(e),Ne=a(e,"P",{});var ap=i(Ne);y8=r(ap,`Collectively, all the non-slow tests need to cover entirely the different internals, while remaining fast. For example, a significant coverage can be achieved by testing with specially created tiny models with random weights. Such models have the very minimal number of layers (e.g., 2), vocab size (e.g., 1000), etc. Then the `),Wd=a(ap,"CODE",{});var ED=i(Wd);$8=r(ED,"@slow"),ED.forEach(s),E8=r(ap,` tests can use large slow models to do qualitative testing. To see the use of these simply look for `),Bd=a(ap,"EM",{});var gD=i(Bd);g8=r(gD,"tiny"),gD.forEach(s),b8=r(ap," models with:"),ap.forEach(s),Hw=h(e),c(ya.$$.fragment,e),zw=h(e),He=a(e,"P",{});var ip=i(He);k8=r(ip,"Here is a an example of a "),$a=a(ip,"A",{href:!0,rel:!0});var bD=i($a);C8=r(bD,"script"),bD.forEach(s),j8=r(ip,` that created the tiny model `),Ea=a(ip,"A",{href:!0,rel:!0});var kD=i(Ea);x8=r(kD,"stas/tiny-wmt19-en-de"),kD.forEach(s),P8=r(ip,`. You can easily adjust it to your specific model\u2019s architecture.`),ip.forEach(s),Rw=h(e),ro=a(e,"P",{});var nE=i(ro);O8=r(nE,`It\u2019s easy to measure the run-time incorrectly if for example there is an overheard of downloading a huge model, but if you test it locally the downloaded files would be cached and thus the download time not measured. Hence check the execution speed report in CI logs instead (the output of `),Yd=a(nE,"CODE",{});var CD=i(Yd);I8=r(CD,"pytest --durations=0 tests"),CD.forEach(s),T8=r(nE,")."),nE.forEach(s),Gw=h(e),jn=a(e,"P",{});var jD=i(jn);A8=r(jD,`That report is also useful to find slow outliers that aren\u2019t marked as such, or which need to be re-written to be fast. If you notice that the test suite starts getting slow on CI, the top listing of this report will show the slowest tests.`),jD.forEach(s),Fw=h(e),It=a(e,"H3",{class:!0});var pE=i(It);lo=a(pE,"A",{id:!0,class:!0,href:!0});var xD=i(lo);Vd=a(xD,"SPAN",{});var PD=i(Vd);c(ga.$$.fragment,PD),PD.forEach(s),xD.forEach(s),D8=h(pE),Xd=a(pE,"SPAN",{});var OD=i(Xd);q8=r(OD,"Testing the stdout/stderr output"),OD.forEach(s),pE.forEach(s),Mw=h(e),V=a(e,"P",{});var We=i(V);S8=r(We,"In order to test functions that write to "),Jd=a(We,"CODE",{});var ID=i(Jd);L8=r(ID,"stdout"),ID.forEach(s),U8=r(We," and/or "),Zd=a(We,"CODE",{});var TD=i(Zd);N8=r(TD,"stderr"),TD.forEach(s),H8=r(We,`, the test can access those streams using the `),Kd=a(We,"CODE",{});var AD=i(Kd);z8=r(AD,"pytest"),AD.forEach(s),R8=r(We,"\u2019s "),ba=a(We,"A",{href:!0,rel:!0});var DD=i(ba);G8=r(DD,"capsys system"),DD.forEach(s),F8=r(We,". Here is how this is accomplished:"),We.forEach(s),Ww=h(e),c(ka.$$.fragment,e),Bw=h(e),ao=a(e,"P",{});var fE=i(ao);M8=r(fE,"And, of course, most of the time, "),Qd=a(fE,"CODE",{});var qD=i(Qd);W8=r(qD,"stderr"),qD.forEach(s),B8=r(fE,` will come as a part of an exception, so try/except has to be used in such a case:`),fE.forEach(s),Yw=h(e),c(Ca.$$.fragment,e),Vw=h(e),io=a(e,"P",{});var hE=i(io);Y8=r(hE,"Another approach to capturing stdout is via "),ec=a(hE,"CODE",{});var SD=i(ec);V8=r(SD,"contextlib.redirect_stdout"),SD.forEach(s),X8=r(hE,":"),hE.forEach(s),Xw=h(e),c(ja.$$.fragment,e),Jw=h(e),T=a(e,"P",{});var J=i(T);J8=r(J,"An important potential issue with capturing stdout is that it may contain "),tc=a(J,"CODE",{});var LD=i(tc);Z8=r(LD,"\\r"),LD.forEach(s),K8=r(J," characters that in normal "),sc=a(J,"CODE",{});var UD=i(sc);Q8=r(UD,"print"),UD.forEach(s),e9=r(J,` reset everything that has been printed so far. There is no problem with `),oc=a(J,"CODE",{});var ND=i(oc);t9=r(ND,"pytest"),ND.forEach(s),s9=r(J,", but with "),rc=a(J,"CODE",{});var HD=i(rc);o9=r(HD,"pytest -s"),HD.forEach(s),r9=r(J,` these characters get included in the buffer, so to be able to have the test run with and without `),lc=a(J,"CODE",{});var zD=i(lc);l9=r(zD,"-s"),zD.forEach(s),a9=r(J,`, you have to make an extra cleanup to the captured output, using `),ac=a(J,"CODE",{});var RD=i(ac);i9=r(RD,"re.sub(r'~.*\\r', '', buf, 0, re.M)"),RD.forEach(s),n9=r(J,"."),J.forEach(s),Zw=h(e),no=a(e,"P",{});var uE=i(no);p9=r(uE,`But, then we have a helper context manager wrapper to automatically take care of it all, regardless of whether it has some `),ic=a(uE,"CODE",{});var GD=i(ic);f9=r(GD,"\\r"),GD.forEach(s),h9=r(uE,"\u2019s in it or not, so it\u2019s a simple:"),uE.forEach(s),Kw=h(e),c(xa.$$.fragment,e),Qw=h(e),xn=a(e,"P",{});var FD=i(xn);u9=r(FD,"Here is a full test example:"),FD.forEach(s),ey=h(e),c(Pa.$$.fragment,e),ty=h(e),ze=a(e,"P",{});var np=i(ze);d9=r(np,"If you\u2019d like to capture "),nc=a(np,"CODE",{});var MD=i(nc);c9=r(MD,"stderr"),MD.forEach(s),m9=r(np," use the "),pc=a(np,"CODE",{});var WD=i(pc);_9=r(WD,"CaptureStderr"),WD.forEach(s),v9=r(np," class instead:"),np.forEach(s),sy=h(e),c(Oa.$$.fragment,e),oy=h(e),po=a(e,"P",{});var dE=i(po);w9=r(dE,"If you need to capture both streams at once, use the parent "),fc=a(dE,"CODE",{});var BD=i(fc);y9=r(BD,"CaptureStd"),BD.forEach(s),$9=r(dE," class:"),dE.forEach(s),ry=h(e),c(Ia.$$.fragment,e),ly=h(e),Pn=a(e,"P",{});var YD=i(Pn);E9=r(YD,`Also, to aid debugging test issues, by default these context managers automatically replay the captured streams on exit from the context.`),YD.forEach(s),ay=h(e),Tt=a(e,"H3",{class:!0});var cE=i(Tt);fo=a(cE,"A",{id:!0,class:!0,href:!0});var VD=i(fo);hc=a(VD,"SPAN",{});var XD=i(hc);c(Ta.$$.fragment,XD),XD.forEach(s),VD.forEach(s),g9=h(cE),uc=a(cE,"SPAN",{});var JD=i(uc);b9=r(JD,"Capturing logger stream"),JD.forEach(s),cE.forEach(s),iy=h(e),ho=a(e,"P",{});var mE=i(ho);k9=r(mE,"If you need to validate the output of a logger, you can use "),dc=a(mE,"CODE",{});var ZD=i(dc);C9=r(ZD,"CaptureLogger"),ZD.forEach(s),j9=r(mE,":"),mE.forEach(s),ny=h(e),c(Aa.$$.fragment,e),py=h(e),At=a(e,"H3",{class:!0});var _E=i(At);uo=a(_E,"A",{id:!0,class:!0,href:!0});var KD=i(uo);cc=a(KD,"SPAN",{});var QD=i(cc);c(Da.$$.fragment,QD),QD.forEach(s),KD.forEach(s),x9=h(_E),mc=a(_E,"SPAN",{});var eq=i(mc);P9=r(eq,"Testing with environment variables"),eq.forEach(s),_E.forEach(s),fy=h(e),qa=a(e,"P",{});var Oj=i(qa);O9=r(Oj,`If you want to test the impact of environment variables for a specific test you can use a helper decorator `),_c=a(Oj,"CODE",{});var tq=i(_c);I9=r(tq,"transformers.testing_utils.mockenv"),tq.forEach(s),Oj.forEach(s),hy=h(e),c(Sa.$$.fragment,e),uy=h(e),ne=a(e,"P",{});var Do=i(ne);T9=r(Do,"At times an external program needs to be called, which requires setting "),vc=a(Do,"CODE",{});var sq=i(vc);A9=r(sq,"PYTHONPATH"),sq.forEach(s),D9=r(Do," in "),wc=a(Do,"CODE",{});var oq=i(wc);q9=r(oq,"os.environ"),oq.forEach(s),S9=r(Do,` to include multiple local paths. A helper class `),yc=a(Do,"CODE",{});var rq=i(yc);L9=r(rq,"transformers.test_utils.TestCasePlus"),rq.forEach(s),U9=r(Do," comes to help:"),Do.forEach(s),dy=h(e),c(La.$$.fragment,e),cy=h(e),N=a(e,"P",{});var ce=i(N);N9=r(ce,"Depending on whether the test file was under the "),$c=a(ce,"CODE",{});var lq=i($c);H9=r(lq,"tests"),lq.forEach(s),z9=r(ce," test suite or "),Ec=a(ce,"CODE",{});var aq=i(Ec);R9=r(aq,"examples"),aq.forEach(s),G9=r(ce,` it\u2019ll correctly set up `),gc=a(ce,"CODE",{});var iq=i(gc);F9=r(iq,"env[PYTHONPATH]"),iq.forEach(s),M9=r(ce," to include one of these two directories, and also the "),bc=a(ce,"CODE",{});var nq=i(bc);W9=r(nq,"src"),nq.forEach(s),B9=r(ce,` directory to ensure the testing is done against the current repo, and finally with whatever `),kc=a(ce,"CODE",{});var pq=i(kc);Y9=r(pq,"env[PYTHONPATH]"),pq.forEach(s),V9=r(ce,` was already set to before the test was called if anything.`),ce.forEach(s),my=h(e),co=a(e,"P",{});var vE=i(co);X9=r(vE,"This helper method creates a copy of the "),Cc=a(vE,"CODE",{});var fq=i(Cc);J9=r(fq,"os.environ"),fq.forEach(s),Z9=r(vE," object, so the original remains intact."),vE.forEach(s),_y=h(e),Dt=a(e,"H3",{class:!0});var wE=i(Dt);mo=a(wE,"A",{id:!0,class:!0,href:!0});var hq=i(mo);jc=a(hq,"SPAN",{});var uq=i(jc);c(Ua.$$.fragment,uq),uq.forEach(s),hq.forEach(s),K9=h(wE),xc=a(wE,"SPAN",{});var dq=i(xc);Q9=r(dq,"Getting reproducible results"),dq.forEach(s),wE.forEach(s),vy=h(e),On=a(e,"P",{});var cq=i(On);eC=r(cq,`In some situations you may want to remove randomness for your tests. To get identical reproducable results set, you will need to fix the seed:`),cq.forEach(s),wy=h(e),c(Na.$$.fragment,e),yy=h(e),qt=a(e,"H3",{class:!0});var yE=i(qt);_o=a(yE,"A",{id:!0,class:!0,href:!0});var mq=i(_o);Pc=a(mq,"SPAN",{});var _q=i(Pc);c(Ha.$$.fragment,_q),_q.forEach(s),mq.forEach(s),tC=h(yE),Oc=a(yE,"SPAN",{});var vq=i(Oc);sC=r(vq,"Debugging tests"),vq.forEach(s),yE.forEach(s),$y=h(e),In=a(e,"P",{});var wq=i(In);oC=r(wq,"To start a debugger at the point of the warning, do this:"),wq.forEach(s),Ey=h(e),c(za.$$.fragment,e),gy=h(e),St=a(e,"H2",{class:!0});var $E=i(St);vo=a($E,"A",{id:!0,class:!0,href:!0});var yq=i(vo);Ic=a(yq,"SPAN",{});var $q=i(Ic);c(Ra.$$.fragment,$q),$q.forEach(s),yq.forEach(s),rC=h($E),Tc=a($E,"SPAN",{});var Eq=i(Tc);lC=r(Eq,"Working with github actions workflows"),Eq.forEach(s),$E.forEach(s),by=h(e),Tn=a(e,"P",{});var gq=i(Tn);aC=r(gq,"To trigger a self-push workflow CI job, you must:"),gq.forEach(s),ky=h(e),pe=a(e,"OL",{});var qo=i(pe);Ga=a(qo,"LI",{});var EE=i(Ga);iC=r(EE,"Create a new branch on "),Ac=a(EE,"CODE",{});var bq=i(Ac);nC=r(bq,"transformers"),bq.forEach(s),pC=r(EE," origin (not a fork!)."),EE.forEach(s),fC=h(qo),H=a(qo,"LI",{});var K=i(H);hC=r(K,"The branch name has to start with either "),Dc=a(K,"CODE",{});var kq=i(Dc);uC=r(kq,"ci_"),kq.forEach(s),dC=r(K," or "),qc=a(K,"CODE",{});var Cq=i(qc);cC=r(Cq,"ci-"),Cq.forEach(s),mC=r(K," ("),Sc=a(K,"CODE",{});var jq=i(Sc);_C=r(jq,"master"),jq.forEach(s),vC=r(K,` triggers it too, but we can\u2019t do PRs on `),Lc=a(K,"CODE",{});var xq=i(Lc);wC=r(xq,"master"),xq.forEach(s),yC=r(K,`). It also gets triggered only for specific paths - you can find the up-to-date definition in case it changed since this document has been written `),Fa=a(K,"A",{href:!0,rel:!0});var Pq=i(Fa);$C=r(Pq,"here"),Pq.forEach(s),EC=r(K," under "),Uc=a(K,"EM",{});var Oq=i(Uc);gC=r(Oq,"push:"),Oq.forEach(s),K.forEach(s),bC=h(qo),Nc=a(qo,"LI",{});var Iq=i(Nc);kC=r(Iq,"Create a PR from this branch."),Iq.forEach(s),CC=h(qo),Ma=a(qo,"LI",{});var gE=i(Ma);jC=r(gE,"Then you can see the job appear "),Wa=a(gE,"A",{href:!0,rel:!0});var Tq=i(Wa);xC=r(Tq,"here"),Tq.forEach(s),PC=r(gE,`. It may not run right away if there is a backlog.`),gE.forEach(s),qo.forEach(s),Cy=h(e),Lt=a(e,"H2",{class:!0});var bE=i(Lt);wo=a(bE,"A",{id:!0,class:!0,href:!0});var Aq=i(wo);Hc=a(Aq,"SPAN",{});var Dq=i(Hc);c(Ba.$$.fragment,Dq),Dq.forEach(s),Aq.forEach(s),OC=h(bE),zc=a(bE,"SPAN",{});var qq=i(zc);IC=r(qq,"Testing Experimental CI Features"),qq.forEach(s),bE.forEach(s),jy=h(e),An=a(e,"P",{});var Sq=i(An);TC=r(Sq,`Testing CI features can be potentially problematic as it can interfere with the normal CI functioning. Therefore if a new CI feature is to be added, it should be done as following.`),Sq.forEach(s),xy=h(e),fe=a(e,"OL",{});var So=i(fe);Rc=a(So,"LI",{});var Lq=i(Rc);AC=r(Lq,"Create a new dedicated job that tests what needs to be tested"),Lq.forEach(s),DC=h(So),Gc=a(So,"LI",{});var Uq=i(Gc);qC=r(Uq,"The new job must always succeed so that it gives us a green \u2713 (details below)."),Uq.forEach(s),SC=h(So),Fc=a(So,"LI",{});var Nq=i(Fc);LC=r(Nq,`Let it run for some days to see that a variety of different PR types get to run on it (user fork branches, non-forked branches, branches originating from github.com UI direct file edit, various forced pushes, etc. - there are so many) while monitoring the experimental job\u2019s logs (not the overall job green as it\u2019s purposefully always green)`),Nq.forEach(s),UC=h(So),Mc=a(So,"LI",{});var Hq=i(Mc);NC=r(Hq,"When it\u2019s clear that everything is solid, then merge the new changes into existing jobs."),Hq.forEach(s),So.forEach(s),Py=h(e),Dn=a(e,"P",{});var zq=i(Dn);HC=r(zq,"That way experiments on CI functionality itself won\u2019t interfere with the normal workflow."),zq.forEach(s),Oy=h(e),qn=a(e,"P",{});var Rq=i(qn);zC=r(Rq,"Now how can we make the job always succeed while the new CI feature is being developed?"),Rq.forEach(s),Iy=h(e),Sn=a(e,"P",{});var Gq=i(Sn);RC=r(Gq,`Some CIs, like TravisCI support ignore-step-failure and will report the overall job as successful, but CircleCI and Github Actions as of this writing don\u2019t support that.`),Gq.forEach(s),Ty=h(e),Ln=a(e,"P",{});var Fq=i(Ln);GC=r(Fq,"So the following workaround can be used:"),Fq.forEach(s),Ay=h(e),yo=a(e,"OL",{});var kE=i(yo);Un=a(kE,"LI",{});var Ij=i(Un);Wc=a(Ij,"CODE",{});var Mq=i(Wc);FC=r(Mq,"set +euo pipefail"),Mq.forEach(s),MC=r(Ij," at the beginning of the run command to suppress most potential failures in the bash script."),Ij.forEach(s),WC=h(kE),Ut=a(kE,"LI",{});var pp=i(Ut);BC=r(pp,"the last command must be a success: "),Bc=a(pp,"CODE",{});var Wq=i(Bc);YC=r(Wq,'echo "done"'),Wq.forEach(s),VC=r(pp," or just "),Yc=a(pp,"CODE",{});var Bq=i(Yc);XC=r(Bq,"true"),Bq.forEach(s),JC=r(pp," will do"),pp.forEach(s),kE.forEach(s),Dy=h(e),Nn=a(e,"P",{});var Yq=i(Nn);ZC=r(Yq,"Here is an example:"),Yq.forEach(s),qy=h(e),c(Ya.$$.fragment,e),Sy=h(e),Hn=a(e,"P",{});var Vq=i(Hn);KC=r(Vq,"For simple commands you could also do:"),Vq.forEach(s),Ly=h(e),c(Va.$$.fragment,e),Uy=h(e),$o=a(e,"P",{});var CE=i($o);QC=r(CE,`Of course, once satisfied with the results, integrate the experimental step or job with the rest of the normal jobs, while removing `),Vc=a(CE,"CODE",{});var Xq=i(Vc);ej=r(Xq,"set +euo pipefail"),Xq.forEach(s),tj=r(CE,` or any other things you may have added to ensure that the experimental job doesn\u2019t interfere with the normal CI functioning.`),CE.forEach(s),Ny=h(e),Eo=a(e,"P",{});var jE=i(Eo);sj=r(jE,"This whole process would have been much easier if we only could set something like "),Xc=a(jE,"CODE",{});var Jq=i(Xc);oj=r(Jq,"allow-failure"),Jq.forEach(s),rj=r(jE,` for the experimental step, and let it fail without impacting the overall status of PRs. But as mentioned earlier CircleCI and Github Actions don\u2019t support it at the moment.`),jE.forEach(s),Hy=h(e),zn=a(e,"P",{});var Zq=i(zn);lj=r(Zq,"You can vote for this feature and see where it is at at these CI-specific threads:"),Zq.forEach(s),zy=h(e),go=a(e,"UL",{});var xE=i(go);Jc=a(xE,"LI",{});var Kq=i(Jc);Xa=a(Kq,"A",{href:!0,rel:!0});var Qq=i(Xa);aj=r(Qq,"Github Actions:"),Qq.forEach(s),Kq.forEach(s),ij=h(xE),Zc=a(xE,"LI",{});var eS=i(Zc);Ja=a(eS,"A",{href:!0,rel:!0});var tS=i(Ja);nj=r(tS,"CircleCI:"),tS.forEach(s),eS.forEach(s),xE.forEach(s),this.h()},h(){u($,"name","hf:doc:metadata"),u($,"content",JSON.stringify(hS)),u(b,"id","testing"),u(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(b,"href","#testing"),u(g,"class","relative group"),u(zt,"id","how-transformers-are-tested"),u(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(zt,"href","#how-transformers-are-tested"),u(Ve,"class","relative group"),u(Ho,"href","https://github.com/huggingface/transformers-doc2mdx/tree/master/.circleci/config.yml"),u(Ho,"rel","nofollow"),u(Fo,"href","https://github.com/huggingface/transformers/actions"),u(Fo,"rel","nofollow"),u(Mo,"href","https://github.com/huggingface/transformers-doc2mdx/tree/master/.github/workflows/github-torch-hub.yml"),u(Mo,"rel","nofollow"),u(Wo,"href","https://github.com/huggingface/transformers-doc2mdx/tree/master/.github/workflows/self-push.yml"),u(Wo,"rel","nofollow"),u(Bo,"href","https://github.com/huggingface/transformers-doc2mdx/tree/master/.github/workflows/self-scheduled.yml"),u(Bo,"rel","nofollow"),u(Vo,"href","https://github.com/huggingface/transformers/actions"),u(Vo,"rel","nofollow"),u(Ft,"id","running-tests"),u(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ft,"href","#running-tests"),u(Je,"class","relative group"),u(Mt,"id","choosing-which-tests-to-run"),u(Mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Mt,"href","#choosing-which-tests-to-run"),u(Ze,"class","relative group"),u(Zo,"href","https://docs.pytest.org/en/latest/usage.html"),u(Zo,"rel","nofollow"),u(Bt,"id","getting-the-list-of-all-tests"),u(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Bt,"href","#getting-the-list-of-all-tests"),u(Ke,"class","relative group"),u(Yt,"id","run-a-specific-test-module"),u(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Yt,"href","#run-a-specific-test-module"),u(Qe,"class","relative group"),u(Vt,"id","run-specific-tests"),u(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Vt,"href","#run-specific-tests"),u(et,"class","relative group"),u(es,"id","run-only-modified-tests"),u(es,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(es,"href","#run-only-modified-tests"),u(tt,"class","relative group"),u(_r,"href","https://github.com/anapaulagomes/pytest-picked"),u(_r,"rel","nofollow"),u(ss,"id","automatically-rerun-failed-tests-on-source-modification"),u(ss,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ss,"href","#automatically-rerun-failed-tests-on-source-modification"),u(st,"class","relative group"),u(Er,"href","https://github.com/pytest-dev/pytest-xdist"),u(Er,"rel","nofollow"),u(jr,"href","https://github.com/joeyespo/pytest-watch"),u(jr,"rel","nofollow"),u(os,"id","skip-a-test-module"),u(os,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(os,"href","#skip-a-test-module"),u(rt,"class","relative group"),u(ls,"id","clearing-state"),u(ls,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ls,"href","#clearing-state"),u(lt,"class","relative group"),u(as,"id","running-tests-in-parallel"),u(as,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(as,"href","#running-tests-in-parallel"),u(at,"class","relative group"),u(Ar,"href","https://github.com/ESSS/pytest-replay"),u(Ar,"rel","nofollow"),u(is,"id","test-order-and-repetition"),u(is,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(is,"href","#test-order-and-repetition"),u(it,"class","relative group"),u(ns,"id","repeat-tests"),u(ns,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ns,"href","#repeat-tests"),u(nt,"class","relative group"),u(Sr,"href","https://github.com/dropbox/pytest-flakefinder"),u(Sr,"rel","nofollow"),u(hs,"id","run-tests-in-a-random-order"),u(hs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(hs,"href","#run-tests-in-a-random-order"),u(pt,"class","relative group"),u(Mr,"href","https://github.com/jbasko/pytest-random-order"),u(Mr,"rel","nofollow"),u(Wr,"href","https://github.com/pytest-dev/pytest-randomly"),u(Wr,"rel","nofollow"),u(ms,"id","look-and-feel-variations"),u(ms,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ms,"href","#look-and-feel-variations"),u(ft,"class","relative group"),u(_s,"id","pytestsugar"),u(_s,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(_s,"href","#pytestsugar"),u(ht,"class","relative group"),u(Xr,"href","https://github.com/Frozenball/pytest-sugar"),u(Xr,"rel","nofollow"),u(vs,"id","report-each-subtest-name-and-its-progress"),u(vs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(vs,"href","#report-each-subtest-name-and-its-progress"),u(ut,"class","relative group"),u(ws,"id","instantly-shows-failed-tests"),u(ws,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ws,"href","#instantly-shows-failed-tests"),u(dt,"class","relative group"),u(sl,"href","https://github.com/pytest-dev/pytest-instafail"),u(sl,"rel","nofollow"),u(ys,"id","to-gpu-or-not-to-gpu"),u(ys,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ys,"href","#to-gpu-or-not-to-gpu"),u(ct,"class","relative group"),u(xs,"id","distributed-training"),u(xs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(xs,"href","#distributed-training"),u(mt,"class","relative group"),u(cl,"href","https://github.com/huggingface/transformers-doc2mdx/tree/master/tests/test_trainer_distributed.py"),u(cl,"rel","nofollow"),u(ml,"href","https://github.com/huggingface/transformers-doc2mdx/tree/master/tests/deepspeed/test_deepspeed.py"),u(ml,"rel","nofollow"),u(Is,"id","output-capture"),u(Is,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Is,"href","#output-capture"),u(vt,"class","relative group"),u(Ts,"id","color-control"),u(Ts,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ts,"href","#color-control"),u(wt,"class","relative group"),u(As,"id","sending-test-report-to-online-pastebin-service"),u(As,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(As,"href","#sending-test-report-to-online-pastebin-service"),u(yt,"class","relative group"),u(Ds,"id","writing-tests"),u(Ds,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ds,"href","#writing-tests"),u($t,"class","relative group"),u(jl,"href","https://docs.pytest.org/en/stable/unittest.html"),u(jl,"rel","nofollow"),u(qs,"id","parametrization"),u(qs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(qs,"href","#parametrization"),u(Et,"class","relative group"),u(ql,"href","https://pypi.org/project/parameterized/"),u(ql,"rel","nofollow"),u(Ns,"id","files-and-directories"),u(Ns,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ns,"href","#files-and-directories"),u(gt,"class","relative group"),u(Ms,"id","temporary-files-and-directories"),u(Ms,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Ms,"href","#temporary-files-and-directories"),u(bt,"class","relative group"),u(Xs,"id","temporary-syspath-override"),u(Xs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Xs,"href","#temporary-syspath-override"),u(Ct,"class","relative group"),u(Js,"id","skipping-tests"),u(Js,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Js,"href","#skipping-tests"),u(jt,"class","relative group"),u(Qs,"id","implementation"),u(Qs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(Qs,"href","#implementation"),u(xt,"class","relative group"),u(ca,"href","https://docs.pytest.org/en/latest/skipping.html"),u(ca,"rel","nofollow"),u(oo,"id","slow-tests"),u(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(oo,"href","#slow-tests"),u(Pt,"class","relative group"),u($a,"href","https://github.com/huggingface/transformers-doc2mdx/tree/master/scripts/fsmt/fsmt-make-tiny-model.py"),u($a,"rel","nofollow"),u(Ea,"href","https://huggingface.co/stas/tiny-wmt19-en-de"),u(Ea,"rel","nofollow"),u(lo,"id","testing-the-stdoutstderr-output"),u(lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(lo,"href","#testing-the-stdoutstderr-output"),u(It,"class","relative group"),u(ba,"href","https://docs.pytest.org/en/latest/capture.html"),u(ba,"rel","nofollow"),u(fo,"id","capturing-logger-stream"),u(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(fo,"href","#capturing-logger-stream"),u(Tt,"class","relative group"),u(uo,"id","testing-with-environment-variables"),u(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(uo,"href","#testing-with-environment-variables"),u(At,"class","relative group"),u(mo,"id","getting-reproducible-results"),u(mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(mo,"href","#getting-reproducible-results"),u(Dt,"class","relative group"),u(_o,"id","debugging-tests"),u(_o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(_o,"href","#debugging-tests"),u(qt,"class","relative group"),u(vo,"id","working-with-github-actions-workflows"),u(vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(vo,"href","#working-with-github-actions-workflows"),u(St,"class","relative group"),u(Fa,"href","https://github.com/huggingface/transformers/blob/master/.github/workflows/self-push.yml"),u(Fa,"rel","nofollow"),u(Wa,"href","https://github.com/huggingface/transformers/actions/workflows/self-push.yml"),u(Wa,"rel","nofollow"),u(wo,"id","testing-experimental-ci-features"),u(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(wo,"href","#testing-experimental-ci-features"),u(Lt,"class","relative group"),u(Xa,"href","https://github.com/actions/toolkit/issues/399"),u(Xa,"rel","nofollow"),u(Ja,"href","https://ideas.circleci.com/ideas/CCI-I-344"),u(Ja,"rel","nofollow")},m(e,n){t(document.head,$),p(e,x,n),p(e,g,n),t(g,b),t(b,A),m(j,A,null),t(g,z),t(g,D),t(D,O),p(e,k,n),p(e,S,n),t(S,R),p(e,Nt,n),p(e,ye,n),t(ye,Be),p(e,q,n),p(e,_e,n),t(_e,Ye),t(Ye,Ht),t(Ht,ti),t(Ye,OE),t(_e,IE),t(_e,si),t(si,fp),t(fp,TE),t(si,AE),p(e,fm,n),p(e,Ve,n),t(Ve,zt),t(zt,hp),m(Lo,hp,null),t(Ve,DE),t(Ve,up),t(up,qE),p(e,hm,n),p(e,Rt,n),t(Rt,Uo),t(Uo,No),t(No,SE),t(No,Ho),t(Ho,LE),t(No,UE),t(Uo,NE),t(Uo,zo),t(zo,HE),t(zo,dp),t(dp,zE),t(zo,RE),t(Rt,GE),t(Rt,Ro),t(Ro,Go),t(Go,FE),t(Go,Fo),t(Fo,ME),t(Go,WE),t(Ro,BE),t(Ro,Xe),t(Xe,cp),t(cp,oi),t(oi,Mo),t(Mo,YE),t(oi,VE),t(Xe,XE),t(Xe,mp),t(mp,L),t(L,Wo),t(Wo,JE),t(L,ZE),t(L,_p),t(_p,KE),t(L,QE),t(L,vp),t(vp,eg),t(L,tg),t(L,wp),t(wp,sg),t(L,og),t(L,yp),t(yp,rg),t(L,lg),t(L,$p),t($p,ag),t(L,ig),t(Xe,ng),t(Xe,Ep),t(Ep,$e),t($e,Bo),t(Bo,pg),t($e,fg),t($e,gp),t(gp,hg),t($e,ug),t($e,bp),t(bp,dg),t($e,cg),p(e,um,n),m(Yo,e,n),p(e,dm,n),p(e,Gt,n),t(Gt,mg),t(Gt,Vo),t(Vo,_g),t(Gt,vg),p(e,cm,n),p(e,Je,n),t(Je,Ft),t(Ft,kp),m(Xo,kp,null),t(Je,wg),t(Je,Cp),t(Cp,yg),p(e,mm,n),p(e,Ze,n),t(Ze,Mt),t(Mt,jp),m(Jo,jp,null),t(Ze,$g),t(Ze,xp),t(xp,Eg),p(e,_m,n),p(e,Wt,n),t(Wt,gg),t(Wt,Zo),t(Zo,bg),t(Wt,kg),p(e,vm,n),p(e,ri,n),t(ri,Cg),p(e,wm,n),p(e,li,n),t(li,jg),p(e,ym,n),m(Ko,e,n),p(e,$m,n),p(e,ai,n),t(ai,xg),p(e,Em,n),m(Qo,e,n),p(e,gm,n),p(e,ii,n),t(ii,Pg),p(e,bm,n),m(er,e,n),p(e,km,n),p(e,ni,n),t(ni,Og),p(e,Cm,n),p(e,Q,n),t(Q,Pp),t(Pp,Ig),t(Q,Tg),t(Q,Op),t(Op,Ag),t(Q,Dg),t(Q,Ip),t(Ip,qg),t(Q,Sg),t(Q,Tp),t(Tp,Lg),p(e,jm,n),p(e,Ke,n),t(Ke,Bt),t(Bt,Ap),m(tr,Ap,null),t(Ke,Ug),t(Ke,Dp),t(Dp,Ng),p(e,xm,n),p(e,pi,n),t(pi,Hg),p(e,Pm,n),m(sr,e,n),p(e,Om,n),p(e,fi,n),t(fi,zg),p(e,Im,n),m(or,e,n),p(e,Tm,n),p(e,Qe,n),t(Qe,Yt),t(Yt,qp),m(rr,qp,null),t(Qe,Rg),t(Qe,Sp),t(Sp,Gg),p(e,Am,n),p(e,hi,n),t(hi,Fg),p(e,Dm,n),m(lr,e,n),p(e,qm,n),p(e,et,n),t(et,Vt),t(Vt,Lp),m(ar,Lp,null),t(et,Mg),t(et,Up),t(Up,Wg),p(e,Sm,n),p(e,ui,n),t(ui,Bg),p(e,Lm,n),m(ir,e,n),p(e,Um,n),p(e,di,n),t(di,Yg),p(e,Nm,n),p(e,Ee,n),t(Ee,ci),t(ci,Np),t(Np,Vg),t(ci,Xg),t(Ee,Jg),t(Ee,mi),t(mi,Hp),t(Hp,Zg),t(mi,Kg),t(Ee,Qg),t(Ee,_i),t(_i,zp),t(zp,eb),t(_i,tb),p(e,Hm,n),p(e,vi,n),t(vi,sb),p(e,zm,n),m(nr,e,n),p(e,Rm,n),p(e,wi,n),t(wi,ob),p(e,Gm,n),p(e,Xt,n),t(Xt,rb),t(Xt,Rp),t(Rp,lb),t(Xt,ab),p(e,Fm,n),m(pr,e,n),p(e,Mm,n),p(e,yi,n),t(yi,ib),p(e,Wm,n),p(e,Jt,n),t(Jt,nb),t(Jt,Gp),t(Gp,pb),t(Jt,fb),p(e,Bm,n),m(fr,e,n),p(e,Ym,n),p(e,ee,n),t(ee,hb),t(ee,Fp),t(Fp,ub),t(ee,db),t(ee,Mp),t(Mp,cb),t(ee,mb),t(ee,Wp),t(Wp,_b),t(ee,vb),p(e,Vm,n),p(e,Zt,n),t(Zt,wb),t(Zt,Bp),t(Bp,yb),t(Zt,$b),p(e,Xm,n),m(hr,e,n),p(e,Jm,n),p(e,$i,n),t($i,Eb),p(e,Zm,n),m(ur,e,n),p(e,Km,n),p(e,ge,n),t(ge,gb),t(ge,Yp),t(Yp,bb),t(ge,kb),t(ge,Vp),t(Vp,Cb),t(ge,jb),p(e,Qm,n),m(dr,e,n),p(e,e_,n),p(e,Kt,n),t(Kt,xb),t(Kt,Xp),t(Xp,Pb),t(Kt,Ob),p(e,t_,n),p(e,Qt,n),t(Qt,Ib),t(Qt,Jp),t(Jp,Tb),t(Qt,Ab),p(e,s_,n),m(cr,e,n),p(e,o_,n),p(e,tt,n),t(tt,es),t(es,Zp),m(mr,Zp,null),t(tt,Db),t(tt,Kp),t(Kp,qb),p(e,r_,n),p(e,ts,n),t(ts,Sb),t(ts,_r),t(_r,Lb),t(ts,Ub),p(e,l_,n),m(vr,e,n),p(e,a_,n),m(wr,e,n),p(e,i_,n),p(e,Ei,n),t(Ei,Nb),p(e,n_,n),p(e,st,n),t(st,ss),t(ss,Qp),m(yr,Qp,null),t(st,Hb),t(st,ef),t(ef,zb),p(e,p_,n),p(e,$r,n),t($r,Er),t(Er,Rb),t($r,Gb),p(e,f_,n),m(gr,e,n),p(e,h_,n),p(e,ot,n),t(ot,Fb),t(ot,tf),t(tf,Mb),t(ot,Wb),t(ot,sf),t(sf,Bb),p(e,u_,n),p(e,be,n),t(be,Yb),t(be,of),t(of,Vb),t(be,Xb),t(be,rf),t(rf,Jb),t(be,Zb),p(e,d_,n),m(br,e,n),p(e,c_,n),p(e,ke,n),t(ke,Kb),t(ke,lf),t(lf,Qb),t(ke,e3),t(ke,af),t(af,t3),t(ke,s3),p(e,m_,n),m(kr,e,n),p(e,__,n),p(e,gi,n),t(gi,o3),p(e,v_,n),p(e,Cr,n),t(Cr,jr),t(jr,r3),t(Cr,l3),p(e,w_,n),p(e,rt,n),t(rt,os),t(os,nf),m(xr,nf,null),t(rt,a3),t(rt,pf),t(pf,i3),p(e,y_,n),p(e,rs,n),t(rs,n3),t(rs,ff),t(ff,p3),t(rs,f3),p(e,$_,n),m(Pr,e,n),p(e,E_,n),p(e,lt,n),t(lt,ls),t(ls,hf),m(Or,hf,null),t(lt,h3),t(lt,uf),t(uf,u3),p(e,g_,n),p(e,bi,n),t(bi,d3),p(e,b_,n),m(Ir,e,n),p(e,k_,n),p(e,at,n),t(at,as),t(as,df),m(Tr,df,null),t(at,c3),t(at,cf),t(cf,m3),p(e,C_,n),p(e,M,n),t(M,_3),t(M,mf),t(mf,v3),t(M,w3),t(M,_f),t(_f,y3),t(M,$3),t(M,vf),t(vf,E3),t(M,g3),t(M,wf),t(wf,b3),t(M,k3),p(e,j_,n),p(e,ve,n),t(ve,yf),t(yf,C3),t(ve,j3),t(ve,$f),t($f,x3),t(ve,P3),t(ve,Ef),t(Ef,O3),t(ve,I3),p(e,x_,n),p(e,Ce,n),t(Ce,T3),t(Ce,gf),t(gf,A3),t(Ce,D3),t(Ce,Ar),t(Ar,q3),t(Ce,S3),p(e,P_,n),p(e,it,n),t(it,is),t(is,bf),m(Dr,bf,null),t(it,L3),t(it,kf),t(kf,U3),p(e,O_,n),p(e,ki,n),t(ki,N3),p(e,I_,n),p(e,nt,n),t(nt,ns),t(ns,Cf),m(qr,Cf,null),t(nt,H3),t(nt,jf),t(jf,z3),p(e,T_,n),p(e,Ci,n),t(Ci,ji),t(ji,Sr),t(Sr,R3),t(ji,G3),p(e,A_,n),m(Lr,e,n),p(e,D_,n),p(e,xi,n),t(xi,F3),p(e,q_,n),m(Ur,e,n),p(e,S_,n),m(ps,e,n),p(e,L_,n),m(fs,e,n),p(e,U_,n),p(e,pt,n),t(pt,hs),t(hs,xf),m(Nr,xf,null),t(pt,M3),t(pt,Pf),t(Pf,W3),p(e,N_,n),m(Hr,e,n),p(e,H_,n),p(e,us,n),t(us,B3),t(us,Of),t(Of,Y3),t(us,V3),p(e,z_,n),p(e,ds,n),t(ds,X3),t(ds,If),t(If,J3),t(ds,Z3),p(e,R_,n),m(zr,e,n),p(e,G_,n),p(e,Pi,n),t(Pi,K3),p(e,F_,n),m(Rr,e,n),p(e,M_,n),p(e,cs,n),t(cs,Q3),t(cs,Tf),t(Tf,e2),t(cs,t2),p(e,W_,n),m(Gr,e,n),p(e,B_,n),p(e,Oi,n),t(Oi,s2),p(e,Y_,n),m(Fr,e,n),p(e,V_,n),p(e,I,n),t(I,o2),t(I,Af),t(Af,r2),t(I,l2),t(I,Df),t(Df,a2),t(I,i2),t(I,qf),t(qf,n2),t(I,p2),t(I,Sf),t(Sf,f2),t(I,h2),t(I,Lf),t(Lf,u2),t(I,d2),t(I,Mr),t(Mr,c2),t(I,m2),p(e,X_,n),p(e,je,n),t(je,_2),t(je,Wr),t(Wr,Uf),t(Uf,v2),t(je,w2),t(je,Nf),t(Nf,y2),t(je,$2),p(e,J_,n),p(e,ft,n),t(ft,ms),t(ms,Hf),m(Br,Hf,null),t(ft,E2),t(ft,zf),t(zf,g2),p(e,Z_,n),p(e,ht,n),t(ht,_s),t(_s,Rf),m(Yr,Rf,null),t(ht,b2),t(ht,Gf),t(Gf,k2),p(e,K_,n),p(e,Vr,n),t(Vr,Xr),t(Xr,C2),t(Vr,j2),p(e,Q_,n),m(Jr,e,n),p(e,ev,n),p(e,Ii,n),t(Ii,x2),p(e,tv,n),m(Zr,e,n),p(e,sv,n),p(e,Ti,n),t(Ti,P2),p(e,ov,n),p(e,ut,n),t(ut,vs),t(vs,Ff),m(Kr,Ff,null),t(ut,O2),t(ut,Mf),t(Mf,I2),p(e,rv,n),p(e,xe,n),t(xe,T2),t(xe,Wf),t(Wf,A2),t(xe,D2),t(xe,Bf),t(Bf,q2),t(xe,S2),p(e,lv,n),m(Qr,e,n),p(e,av,n),p(e,dt,n),t(dt,ws),t(ws,Yf),m(el,Yf,null),t(dt,L2),t(dt,Vf),t(Vf,U2),p(e,iv,n),p(e,tl,n),t(tl,sl),t(sl,N2),t(tl,H2),p(e,nv,n),m(ol,e,n),p(e,pv,n),m(rl,e,n),p(e,fv,n),p(e,ct,n),t(ct,ys),t(ys,Xf),m(ll,Xf,null),t(ct,z2),t(ct,Jf),t(Jf,R2),p(e,hv,n),p(e,$s,n),t($s,G2),t($s,Zf),t(Zf,F2),t($s,M2),p(e,uv,n),m(al,e,n),p(e,dv,n),p(e,te,n),t(te,W2),t(te,Kf),t(Kf,B2),t(te,Y2),t(te,Qf),t(Qf,V2),t(te,X2),t(te,eh),t(eh,J2),t(te,Z2),p(e,cv,n),m(il,e,n),p(e,mv,n),p(e,Ai,n),t(Ai,K2),p(e,_v,n),p(e,Di,n),t(Di,Q2),p(e,vv,n),p(e,U,n),t(U,qi),t(qi,th),t(th,e5),t(qi,t5),t(U,s5),t(U,Es),t(Es,sh),t(sh,o5),t(Es,r5),t(Es,oh),t(oh,l5),t(Es,a5),t(U,i5),t(U,gs),t(gs,rh),t(rh,n5),t(gs,p5),t(gs,lh),t(lh,f5),t(gs,h5),t(U,u5),t(U,bs),t(bs,ah),t(ah,d5),t(bs,c5),t(bs,ih),t(ih,m5),t(bs,_5),t(U,v5),t(U,ks),t(ks,nh),t(nh,w5),t(ks,y5),t(ks,ph),t(ph,$5),t(ks,E5),t(U,g5),t(U,Cs),t(Cs,fh),t(fh,b5),t(Cs,k5),t(Cs,hh),t(hh,C5),t(Cs,j5),p(e,wv,n),p(e,Si,n),t(Si,x5),p(e,yv,n),p(e,C,n),t(C,P5),t(C,uh),t(uh,O5),t(C,I5),t(C,dh),t(dh,T5),t(C,A5),t(C,ch),t(ch,D5),t(C,q5),t(C,mh),t(mh,S5),t(C,L5),t(C,_h),t(_h,U5),t(C,N5),t(C,vh),t(vh,H5),t(C,z5),t(C,wh),t(wh,R5),t(C,G5),t(C,yh),t(yh,F5),t(C,M5),t(C,$h),t($h,W5),t(C,B5),t(C,Eh),t(Eh,Y5),t(C,V5),p(e,$v,n),p(e,Li,n),t(Li,X5),p(e,Ev,n),m(nl,e,n),p(e,gv,n),p(e,Pe,n),t(Pe,J5),t(Pe,gh),t(gh,Z5),t(Pe,K5),t(Pe,bh),t(bh,Q5),t(Pe,ek),p(e,bv,n),m(pl,e,n),p(e,kv,n),p(e,Ui,n),t(Ui,tk),p(e,Cv,n),m(fl,e,n),p(e,jv,n),p(e,Oe,n),t(Oe,sk),t(Oe,kh),t(kh,ok),t(Oe,rk),t(Oe,Ch),t(Ch,lk),t(Oe,ak),p(e,xv,n),m(hl,e,n),p(e,Pv,n),p(e,js,n),t(js,ik),t(js,jh),t(jh,nk),t(js,pk),p(e,Ov,n),p(e,Ni,n),t(Ni,fk),p(e,Iv,n),p(e,Hi,n),t(Hi,xh),t(xh,hk),p(e,Tv,n),m(ul,e,n),p(e,Av,n),p(e,mt,n),t(mt,xs),t(xs,Ph),m(dl,Ph,null),t(mt,uk),t(mt,Oh),t(Oh,dk),p(e,Dv,n),p(e,_t,n),t(_t,Ih),t(Ih,ck),t(_t,mk),t(_t,Th),t(Th,_k),t(_t,vk),p(e,qv,n),p(e,zi,n),t(zi,wk),p(e,Sv,n),p(e,Ps,n),t(Ps,Ah),t(Ah,cl),t(cl,yk),t(Ps,$k),t(Ps,Dh),t(Dh,ml),t(ml,Ek),p(e,Lv,n),p(e,Os,n),t(Os,gk),t(Os,qh),t(qh,bk),t(Os,kk),p(e,Uv,n),p(e,Ri,n),t(Ri,Ck),p(e,Nv,n),m(_l,e,n),p(e,Hv,n),p(e,vt,n),t(vt,Is),t(Is,Sh),m(vl,Sh,null),t(vt,jk),t(vt,Lh),t(Lh,xk),p(e,zv,n),p(e,Ie,n),t(Ie,Pk),t(Ie,Uh),t(Uh,Ok),t(Ie,Ik),t(Ie,Nh),t(Nh,Tk),t(Ie,Ak),p(e,Rv,n),p(e,W,n),t(W,Dk),t(W,Hh),t(Hh,qk),t(W,Sk),t(W,zh),t(zh,Lk),t(W,Uk),t(W,Rh),t(Rh,Nk),t(W,Hk),t(W,Gh),t(Gh,zk),t(W,Rk),p(e,Gv,n),m(wl,e,n),p(e,Fv,n),p(e,Gi,n),t(Gi,Gk),p(e,Mv,n),m(yl,e,n),p(e,Wv,n),p(e,wt,n),t(wt,Ts),t(Ts,Fh),m($l,Fh,null),t(wt,Fk),t(wt,Mh),t(Mh,Mk),p(e,Bv,n),p(e,Fi,n),t(Fi,Wk),p(e,Yv,n),m(El,e,n),p(e,Vv,n),p(e,yt,n),t(yt,As),t(As,Wh),m(gl,Wh,null),t(yt,Bk),t(yt,Bh),t(Bh,Yk),p(e,Xv,n),p(e,Mi,n),t(Mi,Vk),p(e,Jv,n),m(bl,e,n),p(e,Zv,n),p(e,Wi,n),t(Wi,Xk),p(e,Kv,n),p(e,Bi,n),t(Bi,Jk),p(e,Qv,n),m(kl,e,n),p(e,e1,n),p(e,$t,n),t($t,Ds),t(Ds,Yh),m(Cl,Yh,null),t($t,Zk),t($t,Vh),t(Vh,Kk),p(e,t1,n),p(e,Te,n),t(Te,Qk),t(Te,Xh),t(Xh,e4),t(Te,t4),t(Te,Jh),t(Jh,s4),t(Te,o4),p(e,s1,n),p(e,se,n),t(se,r4),t(se,jl),t(jl,l4),t(se,a4),t(se,Zh),t(Zh,i4),t(se,n4),t(se,Kh),t(Kh,p4),t(se,f4),p(e,o1,n),p(e,Et,n),t(Et,qs),t(qs,Qh),m(xl,Qh,null),t(Et,h4),t(Et,eu),t(eu,u4),p(e,r1,n),p(e,Yi,n),t(Yi,d4),p(e,l1,n),m(Pl,e,n),p(e,a1,n),p(e,Ss,n),t(Ss,c4),t(Ss,tu),t(tu,m4),t(Ss,_4),p(e,i1,n),p(e,Ae,n),t(Ae,v4),t(Ae,su),t(su,w4),t(Ae,y4),t(Ae,ou),t(ou,$4),t(Ae,E4),p(e,n1,n),m(Ol,e,n),p(e,p1,n),p(e,Ls,n),t(Ls,g4),t(Ls,ru),t(ru,b4),t(Ls,k4),p(e,f1,n),m(Il,e,n),p(e,h1,n),p(e,Us,n),t(Us,C4),t(Us,lu),t(lu,j4),t(Us,x4),p(e,u1,n),m(Tl,e,n),p(e,d1,n),p(e,Vi,n),t(Vi,P4),p(e,c1,n),m(Al,e,n),p(e,m1,n),p(e,Xi,n),t(Xi,O4),p(e,_1,n),m(Dl,e,n),p(e,v1,n),p(e,B,n),t(B,I4),t(B,ql),t(ql,T4),t(B,A4),t(B,au),t(au,D4),t(B,q4),t(B,iu),t(iu,S4),t(B,L4),t(B,nu),t(nu,U4),t(B,N4),p(e,w1,n),p(e,oe,n),t(oe,H4),t(oe,pu),t(pu,z4),t(oe,R4),t(oe,fu),t(fu,G4),t(oe,F4),t(oe,hu),t(hu,M4),t(oe,W4),p(e,y1,n),p(e,De,n),t(De,B4),t(De,uu),t(uu,Y4),t(De,V4),t(De,du),t(du,X4),t(De,J4),p(e,$1,n),m(Sl,e,n),p(e,E1,n),p(e,re,n),t(re,Z4),t(re,cu),t(cu,K4),t(re,Q4),t(re,mu),t(mu,e0),t(re,t0),t(re,_u),t(_u,s0),t(re,o0),p(e,g1,n),m(Ll,e,n),p(e,b1,n),p(e,Ji,n),t(Ji,r0),p(e,k1,n),m(Ul,e,n),p(e,C1,n),p(e,Zi,n),t(Zi,l0),p(e,j1,n),m(Nl,e,n),p(e,x1,n),p(e,Ki,n),t(Ki,a0),p(e,P1,n),p(e,gt,n),t(gt,Ns),t(Ns,vu),m(Hl,vu,null),t(gt,i0),t(gt,wu),t(wu,n0),p(e,O1,n),p(e,Hs,n),t(Hs,p0),t(Hs,yu),t(yu,f0),t(Hs,h0),p(e,I1,n),p(e,zs,n),t(zs,zl),t(zl,Qi),t(Qi,$u),t($u,u0),t(Qi,d0),t(zl,c0),t(zl,G),t(G,Rl),t(Rl,Eu),t(Eu,m0),t(Rl,_0),t(Rl,gu),t(gu,v0),t(G,w0),t(G,en),t(en,bu),t(bu,y0),t(en,$0),t(G,E0),t(G,Rs),t(Rs,ku),t(ku,g0),t(Rs,b0),t(Rs,Cu),t(Cu,k0),t(Rs,C0),t(G,j0),t(G,Gs),t(Gs,ju),t(ju,x0),t(Gs,P0),t(Gs,xu),t(xu,O0),t(Gs,I0),t(G,T0),t(G,tn),t(tn,Pu),t(Pu,A0),t(tn,D0),t(G,q0),t(G,qe),t(qe,Ou),t(Ou,S0),t(qe,L0),t(qe,Iu),t(Iu,U0),t(qe,N0),t(qe,Tu),t(Tu,H0),t(qe,z0),t(zs,R0),t(zs,Gl),t(Gl,Fl),t(Fl,G0),t(Fl,Au),t(Au,F0),t(Fl,M0),t(Gl,W0),t(Gl,F),t(F,Du),t(Du,qu),t(qu,B0),t(F,Y0),t(F,Su),t(Su,Lu),t(Lu,V0),t(F,X0),t(F,Uu),t(Uu,Nu),t(Nu,J0),t(F,Z0),t(F,Hu),t(Hu,zu),t(zu,K0),t(F,Q0),t(F,Ru),t(Ru,Gu),t(Gu,e6),t(F,t6),t(F,Fu),t(Fu,Mu),t(Mu,s6),p(e,T1,n),p(e,Fs,n),t(Fs,o6),t(Fs,Wu),t(Wu,r6),t(Fs,l6),p(e,A1,n),m(Ml,e,n),p(e,D1,n),p(e,Y,n),t(Y,a6),t(Y,Bu),t(Bu,i6),t(Y,n6),t(Y,Yu),t(Yu,p6),t(Y,f6),t(Y,Vu),t(Vu,h6),t(Y,u6),t(Y,Xu),t(Xu,d6),t(Y,c6),p(e,q1,n),m(Wl,e,n),p(e,S1,n),p(e,bt,n),t(bt,Ms),t(Ms,Ju),m(Bl,Ju,null),t(bt,m6),t(bt,Zu),t(Zu,_6),p(e,L1,n),p(e,Ws,n),t(Ws,v6),t(Ws,Ku),t(Ku,w6),t(Ws,y6),p(e,U1,n),p(e,sn,n),t(sn,$6),p(e,N1,n),p(e,Se,n),t(Se,E6),t(Se,Qu),t(Qu,g6),t(Se,b6),t(Se,ed),t(ed,k6),t(Se,C6),p(e,H1,n),p(e,on,n),t(on,j6),p(e,z1,n),m(Yl,e,n),p(e,R1,n),p(e,Bs,n),t(Bs,x6),t(Bs,td),t(td,P6),t(Bs,O6),p(e,G1,n),p(e,rn,n),t(rn,sd),t(sd,I6),p(e,F1,n),m(Vl,e,n),p(e,M1,n),p(e,Xl,n),t(Xl,od),t(od,T6),t(Xl,A6),p(e,W1,n),p(e,ln,n),t(ln,rd),t(rd,D6),p(e,B1,n),m(Jl,e,n),p(e,Y1,n),p(e,an,n),t(an,q6),p(e,V1,n),p(e,nn,n),t(nn,Zl),t(Zl,kt),t(kt,S6),t(kt,ld),t(ld,L6),t(kt,U6),t(kt,ad),t(ad,N6),t(kt,H6),t(Zl,z6),t(Zl,we),t(we,pn),t(pn,id),t(id,R6),t(pn,G6),t(we,F6),t(we,fn),t(fn,nd),t(nd,M6),t(fn,W6),t(we,B6),t(we,hn),t(hn,pd),t(pd,Y6),t(hn,V6),t(we,X6),t(we,un),t(un,fd),t(fd,J6),t(un,Z6),p(e,X1,n),m(Ys,e,n),p(e,J1,n),m(Vs,e,n),p(e,Z1,n),p(e,Ct,n),t(Ct,Xs),t(Xs,hd),m(Kl,hd,null),t(Ct,K6),t(Ct,ud),t(ud,Q6),p(e,K1,n),p(e,Le,n),t(Le,e7),t(Le,dd),t(dd,t7),t(Le,s7),t(Le,cd),t(cd,o7),t(Le,r7),p(e,Q1,n),m(Ql,e,n),p(e,ew,n),p(e,jt,n),t(jt,Js),t(Js,md),m(ea,md,null),t(jt,l7),t(jt,_d),t(_d,a7),p(e,tw,n),p(e,Zs,n),t(Zs,i7),t(Zs,vd),t(vd,n7),t(Zs,p7),p(e,sw,n),p(e,dn,n),t(dn,f7),p(e,ow,n),p(e,Ks,n),t(Ks,wd),t(wd,ta),t(ta,h7),t(ta,yd),t(yd,u7),t(ta,d7),t(Ks,c7),t(Ks,$d),t($d,sa),t(sa,m7),t(sa,Ed),t(Ed,_7),t(sa,v7),p(e,rw,n),p(e,le,n),t(le,w7),t(le,gd),t(gd,y7),t(le,$7),t(le,bd),t(bd,E7),t(le,g7),t(le,kd),t(kd,b7),t(le,k7),p(e,lw,n),p(e,xt,n),t(xt,Qs),t(Qs,Cd),m(oa,Cd,null),t(xt,C7),t(xt,jd),t(jd,j7),p(e,aw,n),p(e,cn,n),t(cn,xd),t(xd,x7),p(e,iw,n),m(ra,e,n),p(e,nw,n),p(e,mn,n),t(mn,P7),p(e,pw,n),m(la,e,n),p(e,fw,n),p(e,eo,n),t(eo,O7),t(eo,Pd),t(Pd,I7),t(eo,T7),p(e,hw,n),m(aa,e,n),p(e,uw,n),p(e,_n,n),t(_n,Od),t(Od,A7),p(e,dw,n),m(ia,e,n),p(e,cw,n),p(e,vn,n),t(vn,D7),p(e,mw,n),m(na,e,n),p(e,_w,n),p(e,to,n),t(to,q7),t(to,Id),t(Id,S7),t(to,L7),p(e,vw,n),m(pa,e,n),p(e,ww,n),p(e,wn,n),t(wn,Td),t(Td,U7),p(e,yw,n),m(fa,e,n),p(e,$w,n),p(e,yn,n),t(yn,Ad),t(Ad,N7),p(e,Ew,n),m(ha,e,n),p(e,gw,n),p(e,$n,n),t($n,H7),p(e,bw,n),m(ua,e,n),p(e,kw,n),p(e,En,n),t(En,z7),p(e,Cw,n),m(da,e,n),p(e,jw,n),p(e,so,n),t(so,R7),t(so,ca),t(ca,G7),t(so,F7),p(e,xw,n),p(e,Pt,n),t(Pt,oo),t(oo,Dd),m(ma,Dd,null),t(Pt,M7),t(Pt,qd),t(qd,W7),p(e,Pw,n),p(e,gn,n),t(gn,B7),p(e,Ow,n),m(_a,e,n),p(e,Iw,n),p(e,Ue,n),t(Ue,Y7),t(Ue,Sd),t(Sd,V7),t(Ue,X7),t(Ue,Ld),t(Ld,J7),t(Ue,Z7),p(e,Tw,n),m(va,e,n),p(e,Aw,n),p(e,ae,n),t(ae,K7),t(ae,Ud),t(Ud,Q7),t(ae,e8),t(ae,Nd),t(Nd,t8),t(ae,s8),t(ae,Hd),t(Hd,o8),t(ae,r8),p(e,Dw,n),m(wa,e,n),p(e,qw,n),p(e,bn,n),t(bn,l8),p(e,Sw,n),p(e,kn,n),t(kn,a8),p(e,Lw,n),p(e,Cn,n),t(Cn,i8),p(e,Uw,n),p(e,ie,n),t(ie,zd),t(zd,n8),t(ie,p8),t(ie,Rd),t(Rd,f8),t(ie,h8),t(ie,Ot),t(Ot,u8),t(Ot,Gd),t(Gd,d8),t(Ot,c8),t(Ot,Fd),t(Fd,m8),t(Ot,_8),t(ie,v8),t(ie,Md),t(Md,w8),p(e,Nw,n),p(e,Ne,n),t(Ne,y8),t(Ne,Wd),t(Wd,$8),t(Ne,E8),t(Ne,Bd),t(Bd,g8),t(Ne,b8),p(e,Hw,n),m(ya,e,n),p(e,zw,n),p(e,He,n),t(He,k8),t(He,$a),t($a,C8),t(He,j8),t(He,Ea),t(Ea,x8),t(He,P8),p(e,Rw,n),p(e,ro,n),t(ro,O8),t(ro,Yd),t(Yd,I8),t(ro,T8),p(e,Gw,n),p(e,jn,n),t(jn,A8),p(e,Fw,n),p(e,It,n),t(It,lo),t(lo,Vd),m(ga,Vd,null),t(It,D8),t(It,Xd),t(Xd,q8),p(e,Mw,n),p(e,V,n),t(V,S8),t(V,Jd),t(Jd,L8),t(V,U8),t(V,Zd),t(Zd,N8),t(V,H8),t(V,Kd),t(Kd,z8),t(V,R8),t(V,ba),t(ba,G8),t(V,F8),p(e,Ww,n),m(ka,e,n),p(e,Bw,n),p(e,ao,n),t(ao,M8),t(ao,Qd),t(Qd,W8),t(ao,B8),p(e,Yw,n),m(Ca,e,n),p(e,Vw,n),p(e,io,n),t(io,Y8),t(io,ec),t(ec,V8),t(io,X8),p(e,Xw,n),m(ja,e,n),p(e,Jw,n),p(e,T,n),t(T,J8),t(T,tc),t(tc,Z8),t(T,K8),t(T,sc),t(sc,Q8),t(T,e9),t(T,oc),t(oc,t9),t(T,s9),t(T,rc),t(rc,o9),t(T,r9),t(T,lc),t(lc,l9),t(T,a9),t(T,ac),t(ac,i9),t(T,n9),p(e,Zw,n),p(e,no,n),t(no,p9),t(no,ic),t(ic,f9),t(no,h9),p(e,Kw,n),m(xa,e,n),p(e,Qw,n),p(e,xn,n),t(xn,u9),p(e,ey,n),m(Pa,e,n),p(e,ty,n),p(e,ze,n),t(ze,d9),t(ze,nc),t(nc,c9),t(ze,m9),t(ze,pc),t(pc,_9),t(ze,v9),p(e,sy,n),m(Oa,e,n),p(e,oy,n),p(e,po,n),t(po,w9),t(po,fc),t(fc,y9),t(po,$9),p(e,ry,n),m(Ia,e,n),p(e,ly,n),p(e,Pn,n),t(Pn,E9),p(e,ay,n),p(e,Tt,n),t(Tt,fo),t(fo,hc),m(Ta,hc,null),t(Tt,g9),t(Tt,uc),t(uc,b9),p(e,iy,n),p(e,ho,n),t(ho,k9),t(ho,dc),t(dc,C9),t(ho,j9),p(e,ny,n),m(Aa,e,n),p(e,py,n),p(e,At,n),t(At,uo),t(uo,cc),m(Da,cc,null),t(At,x9),t(At,mc),t(mc,P9),p(e,fy,n),p(e,qa,n),t(qa,O9),t(qa,_c),t(_c,I9),p(e,hy,n),m(Sa,e,n),p(e,uy,n),p(e,ne,n),t(ne,T9),t(ne,vc),t(vc,A9),t(ne,D9),t(ne,wc),t(wc,q9),t(ne,S9),t(ne,yc),t(yc,L9),t(ne,U9),p(e,dy,n),m(La,e,n),p(e,cy,n),p(e,N,n),t(N,N9),t(N,$c),t($c,H9),t(N,z9),t(N,Ec),t(Ec,R9),t(N,G9),t(N,gc),t(gc,F9),t(N,M9),t(N,bc),t(bc,W9),t(N,B9),t(N,kc),t(kc,Y9),t(N,V9),p(e,my,n),p(e,co,n),t(co,X9),t(co,Cc),t(Cc,J9),t(co,Z9),p(e,_y,n),p(e,Dt,n),t(Dt,mo),t(mo,jc),m(Ua,jc,null),t(Dt,K9),t(Dt,xc),t(xc,Q9),p(e,vy,n),p(e,On,n),t(On,eC),p(e,wy,n),m(Na,e,n),p(e,yy,n),p(e,qt,n),t(qt,_o),t(_o,Pc),m(Ha,Pc,null),t(qt,tC),t(qt,Oc),t(Oc,sC),p(e,$y,n),p(e,In,n),t(In,oC),p(e,Ey,n),m(za,e,n),p(e,gy,n),p(e,St,n),t(St,vo),t(vo,Ic),m(Ra,Ic,null),t(St,rC),t(St,Tc),t(Tc,lC),p(e,by,n),p(e,Tn,n),t(Tn,aC),p(e,ky,n),p(e,pe,n),t(pe,Ga),t(Ga,iC),t(Ga,Ac),t(Ac,nC),t(Ga,pC),t(pe,fC),t(pe,H),t(H,hC),t(H,Dc),t(Dc,uC),t(H,dC),t(H,qc),t(qc,cC),t(H,mC),t(H,Sc),t(Sc,_C),t(H,vC),t(H,Lc),t(Lc,wC),t(H,yC),t(H,Fa),t(Fa,$C),t(H,EC),t(H,Uc),t(Uc,gC),t(pe,bC),t(pe,Nc),t(Nc,kC),t(pe,CC),t(pe,Ma),t(Ma,jC),t(Ma,Wa),t(Wa,xC),t(Ma,PC),p(e,Cy,n),p(e,Lt,n),t(Lt,wo),t(wo,Hc),m(Ba,Hc,null),t(Lt,OC),t(Lt,zc),t(zc,IC),p(e,jy,n),p(e,An,n),t(An,TC),p(e,xy,n),p(e,fe,n),t(fe,Rc),t(Rc,AC),t(fe,DC),t(fe,Gc),t(Gc,qC),t(fe,SC),t(fe,Fc),t(Fc,LC),t(fe,UC),t(fe,Mc),t(Mc,NC),p(e,Py,n),p(e,Dn,n),t(Dn,HC),p(e,Oy,n),p(e,qn,n),t(qn,zC),p(e,Iy,n),p(e,Sn,n),t(Sn,RC),p(e,Ty,n),p(e,Ln,n),t(Ln,GC),p(e,Ay,n),p(e,yo,n),t(yo,Un),t(Un,Wc),t(Wc,FC),t(Un,MC),t(yo,WC),t(yo,Ut),t(Ut,BC),t(Ut,Bc),t(Bc,YC),t(Ut,VC),t(Ut,Yc),t(Yc,XC),t(Ut,JC),p(e,Dy,n),p(e,Nn,n),t(Nn,ZC),p(e,qy,n),m(Ya,e,n),p(e,Sy,n),p(e,Hn,n),t(Hn,KC),p(e,Ly,n),m(Va,e,n),p(e,Uy,n),p(e,$o,n),t($o,QC),t($o,Vc),t(Vc,ej),t($o,tj),p(e,Ny,n),p(e,Eo,n),t(Eo,sj),t(Eo,Xc),t(Xc,oj),t(Eo,rj),p(e,Hy,n),p(e,zn,n),t(zn,lj),p(e,zy,n),p(e,go,n),t(go,Jc),t(Jc,Xa),t(Xa,aj),t(go,ij),t(go,Zc),t(Zc,Ja),t(Ja,nj),Ry=!0},p(e,[n]){const Za={};n&2&&(Za.$$scope={dirty:n,ctx:e}),ps.$set(Za);const Kc={};n&2&&(Kc.$$scope={dirty:n,ctx:e}),fs.$set(Kc);const Qc={};n&2&&(Qc.$$scope={dirty:n,ctx:e}),Ys.$set(Qc);const em={};n&2&&(em.$$scope={dirty:n,ctx:e}),Vs.$set(em)},i(e){Ry||(_(j.$$.fragment,e),_(Lo.$$.fragment,e),_(Yo.$$.fragment,e),_(Xo.$$.fragment,e),_(Jo.$$.fragment,e),_(Ko.$$.fragment,e),_(Qo.$$.fragment,e),_(er.$$.fragment,e),_(tr.$$.fragment,e),_(sr.$$.fragment,e),_(or.$$.fragment,e),_(rr.$$.fragment,e),_(lr.$$.fragment,e),_(ar.$$.fragment,e),_(ir.$$.fragment,e),_(nr.$$.fragment,e),_(pr.$$.fragment,e),_(fr.$$.fragment,e),_(hr.$$.fragment,e),_(ur.$$.fragment,e),_(dr.$$.fragment,e),_(cr.$$.fragment,e),_(mr.$$.fragment,e),_(vr.$$.fragment,e),_(wr.$$.fragment,e),_(yr.$$.fragment,e),_(gr.$$.fragment,e),_(br.$$.fragment,e),_(kr.$$.fragment,e),_(xr.$$.fragment,e),_(Pr.$$.fragment,e),_(Or.$$.fragment,e),_(Ir.$$.fragment,e),_(Tr.$$.fragment,e),_(Dr.$$.fragment,e),_(qr.$$.fragment,e),_(Lr.$$.fragment,e),_(Ur.$$.fragment,e),_(ps.$$.fragment,e),_(fs.$$.fragment,e),_(Nr.$$.fragment,e),_(Hr.$$.fragment,e),_(zr.$$.fragment,e),_(Rr.$$.fragment,e),_(Gr.$$.fragment,e),_(Fr.$$.fragment,e),_(Br.$$.fragment,e),_(Yr.$$.fragment,e),_(Jr.$$.fragment,e),_(Zr.$$.fragment,e),_(Kr.$$.fragment,e),_(Qr.$$.fragment,e),_(el.$$.fragment,e),_(ol.$$.fragment,e),_(rl.$$.fragment,e),_(ll.$$.fragment,e),_(al.$$.fragment,e),_(il.$$.fragment,e),_(nl.$$.fragment,e),_(pl.$$.fragment,e),_(fl.$$.fragment,e),_(hl.$$.fragment,e),_(ul.$$.fragment,e),_(dl.$$.fragment,e),_(_l.$$.fragment,e),_(vl.$$.fragment,e),_(wl.$$.fragment,e),_(yl.$$.fragment,e),_($l.$$.fragment,e),_(El.$$.fragment,e),_(gl.$$.fragment,e),_(bl.$$.fragment,e),_(kl.$$.fragment,e),_(Cl.$$.fragment,e),_(xl.$$.fragment,e),_(Pl.$$.fragment,e),_(Ol.$$.fragment,e),_(Il.$$.fragment,e),_(Tl.$$.fragment,e),_(Al.$$.fragment,e),_(Dl.$$.fragment,e),_(Sl.$$.fragment,e),_(Ll.$$.fragment,e),_(Ul.$$.fragment,e),_(Nl.$$.fragment,e),_(Hl.$$.fragment,e),_(Ml.$$.fragment,e),_(Wl.$$.fragment,e),_(Bl.$$.fragment,e),_(Yl.$$.fragment,e),_(Vl.$$.fragment,e),_(Jl.$$.fragment,e),_(Ys.$$.fragment,e),_(Vs.$$.fragment,e),_(Kl.$$.fragment,e),_(Ql.$$.fragment,e),_(ea.$$.fragment,e),_(oa.$$.fragment,e),_(ra.$$.fragment,e),_(la.$$.fragment,e),_(aa.$$.fragment,e),_(ia.$$.fragment,e),_(na.$$.fragment,e),_(pa.$$.fragment,e),_(fa.$$.fragment,e),_(ha.$$.fragment,e),_(ua.$$.fragment,e),_(da.$$.fragment,e),_(ma.$$.fragment,e),_(_a.$$.fragment,e),_(va.$$.fragment,e),_(wa.$$.fragment,e),_(ya.$$.fragment,e),_(ga.$$.fragment,e),_(ka.$$.fragment,e),_(Ca.$$.fragment,e),_(ja.$$.fragment,e),_(xa.$$.fragment,e),_(Pa.$$.fragment,e),_(Oa.$$.fragment,e),_(Ia.$$.fragment,e),_(Ta.$$.fragment,e),_(Aa.$$.fragment,e),_(Da.$$.fragment,e),_(Sa.$$.fragment,e),_(La.$$.fragment,e),_(Ua.$$.fragment,e),_(Na.$$.fragment,e),_(Ha.$$.fragment,e),_(za.$$.fragment,e),_(Ra.$$.fragment,e),_(Ba.$$.fragment,e),_(Ya.$$.fragment,e),_(Va.$$.fragment,e),Ry=!0)},o(e){v(j.$$.fragment,e),v(Lo.$$.fragment,e),v(Yo.$$.fragment,e),v(Xo.$$.fragment,e),v(Jo.$$.fragment,e),v(Ko.$$.fragment,e),v(Qo.$$.fragment,e),v(er.$$.fragment,e),v(tr.$$.fragment,e),v(sr.$$.fragment,e),v(or.$$.fragment,e),v(rr.$$.fragment,e),v(lr.$$.fragment,e),v(ar.$$.fragment,e),v(ir.$$.fragment,e),v(nr.$$.fragment,e),v(pr.$$.fragment,e),v(fr.$$.fragment,e),v(hr.$$.fragment,e),v(ur.$$.fragment,e),v(dr.$$.fragment,e),v(cr.$$.fragment,e),v(mr.$$.fragment,e),v(vr.$$.fragment,e),v(wr.$$.fragment,e),v(yr.$$.fragment,e),v(gr.$$.fragment,e),v(br.$$.fragment,e),v(kr.$$.fragment,e),v(xr.$$.fragment,e),v(Pr.$$.fragment,e),v(Or.$$.fragment,e),v(Ir.$$.fragment,e),v(Tr.$$.fragment,e),v(Dr.$$.fragment,e),v(qr.$$.fragment,e),v(Lr.$$.fragment,e),v(Ur.$$.fragment,e),v(ps.$$.fragment,e),v(fs.$$.fragment,e),v(Nr.$$.fragment,e),v(Hr.$$.fragment,e),v(zr.$$.fragment,e),v(Rr.$$.fragment,e),v(Gr.$$.fragment,e),v(Fr.$$.fragment,e),v(Br.$$.fragment,e),v(Yr.$$.fragment,e),v(Jr.$$.fragment,e),v(Zr.$$.fragment,e),v(Kr.$$.fragment,e),v(Qr.$$.fragment,e),v(el.$$.fragment,e),v(ol.$$.fragment,e),v(rl.$$.fragment,e),v(ll.$$.fragment,e),v(al.$$.fragment,e),v(il.$$.fragment,e),v(nl.$$.fragment,e),v(pl.$$.fragment,e),v(fl.$$.fragment,e),v(hl.$$.fragment,e),v(ul.$$.fragment,e),v(dl.$$.fragment,e),v(_l.$$.fragment,e),v(vl.$$.fragment,e),v(wl.$$.fragment,e),v(yl.$$.fragment,e),v($l.$$.fragment,e),v(El.$$.fragment,e),v(gl.$$.fragment,e),v(bl.$$.fragment,e),v(kl.$$.fragment,e),v(Cl.$$.fragment,e),v(xl.$$.fragment,e),v(Pl.$$.fragment,e),v(Ol.$$.fragment,e),v(Il.$$.fragment,e),v(Tl.$$.fragment,e),v(Al.$$.fragment,e),v(Dl.$$.fragment,e),v(Sl.$$.fragment,e),v(Ll.$$.fragment,e),v(Ul.$$.fragment,e),v(Nl.$$.fragment,e),v(Hl.$$.fragment,e),v(Ml.$$.fragment,e),v(Wl.$$.fragment,e),v(Bl.$$.fragment,e),v(Yl.$$.fragment,e),v(Vl.$$.fragment,e),v(Jl.$$.fragment,e),v(Ys.$$.fragment,e),v(Vs.$$.fragment,e),v(Kl.$$.fragment,e),v(Ql.$$.fragment,e),v(ea.$$.fragment,e),v(oa.$$.fragment,e),v(ra.$$.fragment,e),v(la.$$.fragment,e),v(aa.$$.fragment,e),v(ia.$$.fragment,e),v(na.$$.fragment,e),v(pa.$$.fragment,e),v(fa.$$.fragment,e),v(ha.$$.fragment,e),v(ua.$$.fragment,e),v(da.$$.fragment,e),v(ma.$$.fragment,e),v(_a.$$.fragment,e),v(va.$$.fragment,e),v(wa.$$.fragment,e),v(ya.$$.fragment,e),v(ga.$$.fragment,e),v(ka.$$.fragment,e),v(Ca.$$.fragment,e),v(ja.$$.fragment,e),v(xa.$$.fragment,e),v(Pa.$$.fragment,e),v(Oa.$$.fragment,e),v(Ia.$$.fragment,e),v(Ta.$$.fragment,e),v(Aa.$$.fragment,e),v(Da.$$.fragment,e),v(Sa.$$.fragment,e),v(La.$$.fragment,e),v(Ua.$$.fragment,e),v(Na.$$.fragment,e),v(Ha.$$.fragment,e),v(za.$$.fragment,e),v(Ra.$$.fragment,e),v(Ba.$$.fragment,e),v(Ya.$$.fragment,e),v(Va.$$.fragment,e),Ry=!1},d(e){s($),e&&s(x),e&&s(g),w(j),e&&s(k),e&&s(S),e&&s(Nt),e&&s(ye),e&&s(q),e&&s(_e),e&&s(fm),e&&s(Ve),w(Lo),e&&s(hm),e&&s(Rt),e&&s(um),w(Yo,e),e&&s(dm),e&&s(Gt),e&&s(cm),e&&s(Je),w(Xo),e&&s(mm),e&&s(Ze),w(Jo),e&&s(_m),e&&s(Wt),e&&s(vm),e&&s(ri),e&&s(wm),e&&s(li),e&&s(ym),w(Ko,e),e&&s($m),e&&s(ai),e&&s(Em),w(Qo,e),e&&s(gm),e&&s(ii),e&&s(bm),w(er,e),e&&s(km),e&&s(ni),e&&s(Cm),e&&s(Q),e&&s(jm),e&&s(Ke),w(tr),e&&s(xm),e&&s(pi),e&&s(Pm),w(sr,e),e&&s(Om),e&&s(fi),e&&s(Im),w(or,e),e&&s(Tm),e&&s(Qe),w(rr),e&&s(Am),e&&s(hi),e&&s(Dm),w(lr,e),e&&s(qm),e&&s(et),w(ar),e&&s(Sm),e&&s(ui),e&&s(Lm),w(ir,e),e&&s(Um),e&&s(di),e&&s(Nm),e&&s(Ee),e&&s(Hm),e&&s(vi),e&&s(zm),w(nr,e),e&&s(Rm),e&&s(wi),e&&s(Gm),e&&s(Xt),e&&s(Fm),w(pr,e),e&&s(Mm),e&&s(yi),e&&s(Wm),e&&s(Jt),e&&s(Bm),w(fr,e),e&&s(Ym),e&&s(ee),e&&s(Vm),e&&s(Zt),e&&s(Xm),w(hr,e),e&&s(Jm),e&&s($i),e&&s(Zm),w(ur,e),e&&s(Km),e&&s(ge),e&&s(Qm),w(dr,e),e&&s(e_),e&&s(Kt),e&&s(t_),e&&s(Qt),e&&s(s_),w(cr,e),e&&s(o_),e&&s(tt),w(mr),e&&s(r_),e&&s(ts),e&&s(l_),w(vr,e),e&&s(a_),w(wr,e),e&&s(i_),e&&s(Ei),e&&s(n_),e&&s(st),w(yr),e&&s(p_),e&&s($r),e&&s(f_),w(gr,e),e&&s(h_),e&&s(ot),e&&s(u_),e&&s(be),e&&s(d_),w(br,e),e&&s(c_),e&&s(ke),e&&s(m_),w(kr,e),e&&s(__),e&&s(gi),e&&s(v_),e&&s(Cr),e&&s(w_),e&&s(rt),w(xr),e&&s(y_),e&&s(rs),e&&s($_),w(Pr,e),e&&s(E_),e&&s(lt),w(Or),e&&s(g_),e&&s(bi),e&&s(b_),w(Ir,e),e&&s(k_),e&&s(at),w(Tr),e&&s(C_),e&&s(M),e&&s(j_),e&&s(ve),e&&s(x_),e&&s(Ce),e&&s(P_),e&&s(it),w(Dr),e&&s(O_),e&&s(ki),e&&s(I_),e&&s(nt),w(qr),e&&s(T_),e&&s(Ci),e&&s(A_),w(Lr,e),e&&s(D_),e&&s(xi),e&&s(q_),w(Ur,e),e&&s(S_),w(ps,e),e&&s(L_),w(fs,e),e&&s(U_),e&&s(pt),w(Nr),e&&s(N_),w(Hr,e),e&&s(H_),e&&s(us),e&&s(z_),e&&s(ds),e&&s(R_),w(zr,e),e&&s(G_),e&&s(Pi),e&&s(F_),w(Rr,e),e&&s(M_),e&&s(cs),e&&s(W_),w(Gr,e),e&&s(B_),e&&s(Oi),e&&s(Y_),w(Fr,e),e&&s(V_),e&&s(I),e&&s(X_),e&&s(je),e&&s(J_),e&&s(ft),w(Br),e&&s(Z_),e&&s(ht),w(Yr),e&&s(K_),e&&s(Vr),e&&s(Q_),w(Jr,e),e&&s(ev),e&&s(Ii),e&&s(tv),w(Zr,e),e&&s(sv),e&&s(Ti),e&&s(ov),e&&s(ut),w(Kr),e&&s(rv),e&&s(xe),e&&s(lv),w(Qr,e),e&&s(av),e&&s(dt),w(el),e&&s(iv),e&&s(tl),e&&s(nv),w(ol,e),e&&s(pv),w(rl,e),e&&s(fv),e&&s(ct),w(ll),e&&s(hv),e&&s($s),e&&s(uv),w(al,e),e&&s(dv),e&&s(te),e&&s(cv),w(il,e),e&&s(mv),e&&s(Ai),e&&s(_v),e&&s(Di),e&&s(vv),e&&s(U),e&&s(wv),e&&s(Si),e&&s(yv),e&&s(C),e&&s($v),e&&s(Li),e&&s(Ev),w(nl,e),e&&s(gv),e&&s(Pe),e&&s(bv),w(pl,e),e&&s(kv),e&&s(Ui),e&&s(Cv),w(fl,e),e&&s(jv),e&&s(Oe),e&&s(xv),w(hl,e),e&&s(Pv),e&&s(js),e&&s(Ov),e&&s(Ni),e&&s(Iv),e&&s(Hi),e&&s(Tv),w(ul,e),e&&s(Av),e&&s(mt),w(dl),e&&s(Dv),e&&s(_t),e&&s(qv),e&&s(zi),e&&s(Sv),e&&s(Ps),e&&s(Lv),e&&s(Os),e&&s(Uv),e&&s(Ri),e&&s(Nv),w(_l,e),e&&s(Hv),e&&s(vt),w(vl),e&&s(zv),e&&s(Ie),e&&s(Rv),e&&s(W),e&&s(Gv),w(wl,e),e&&s(Fv),e&&s(Gi),e&&s(Mv),w(yl,e),e&&s(Wv),e&&s(wt),w($l),e&&s(Bv),e&&s(Fi),e&&s(Yv),w(El,e),e&&s(Vv),e&&s(yt),w(gl),e&&s(Xv),e&&s(Mi),e&&s(Jv),w(bl,e),e&&s(Zv),e&&s(Wi),e&&s(Kv),e&&s(Bi),e&&s(Qv),w(kl,e),e&&s(e1),e&&s($t),w(Cl),e&&s(t1),e&&s(Te),e&&s(s1),e&&s(se),e&&s(o1),e&&s(Et),w(xl),e&&s(r1),e&&s(Yi),e&&s(l1),w(Pl,e),e&&s(a1),e&&s(Ss),e&&s(i1),e&&s(Ae),e&&s(n1),w(Ol,e),e&&s(p1),e&&s(Ls),e&&s(f1),w(Il,e),e&&s(h1),e&&s(Us),e&&s(u1),w(Tl,e),e&&s(d1),e&&s(Vi),e&&s(c1),w(Al,e),e&&s(m1),e&&s(Xi),e&&s(_1),w(Dl,e),e&&s(v1),e&&s(B),e&&s(w1),e&&s(oe),e&&s(y1),e&&s(De),e&&s($1),w(Sl,e),e&&s(E1),e&&s(re),e&&s(g1),w(Ll,e),e&&s(b1),e&&s(Ji),e&&s(k1),w(Ul,e),e&&s(C1),e&&s(Zi),e&&s(j1),w(Nl,e),e&&s(x1),e&&s(Ki),e&&s(P1),e&&s(gt),w(Hl),e&&s(O1),e&&s(Hs),e&&s(I1),e&&s(zs),e&&s(T1),e&&s(Fs),e&&s(A1),w(Ml,e),e&&s(D1),e&&s(Y),e&&s(q1),w(Wl,e),e&&s(S1),e&&s(bt),w(Bl),e&&s(L1),e&&s(Ws),e&&s(U1),e&&s(sn),e&&s(N1),e&&s(Se),e&&s(H1),e&&s(on),e&&s(z1),w(Yl,e),e&&s(R1),e&&s(Bs),e&&s(G1),e&&s(rn),e&&s(F1),w(Vl,e),e&&s(M1),e&&s(Xl),e&&s(W1),e&&s(ln),e&&s(B1),w(Jl,e),e&&s(Y1),e&&s(an),e&&s(V1),e&&s(nn),e&&s(X1),w(Ys,e),e&&s(J1),w(Vs,e),e&&s(Z1),e&&s(Ct),w(Kl),e&&s(K1),e&&s(Le),e&&s(Q1),w(Ql,e),e&&s(ew),e&&s(jt),w(ea),e&&s(tw),e&&s(Zs),e&&s(sw),e&&s(dn),e&&s(ow),e&&s(Ks),e&&s(rw),e&&s(le),e&&s(lw),e&&s(xt),w(oa),e&&s(aw),e&&s(cn),e&&s(iw),w(ra,e),e&&s(nw),e&&s(mn),e&&s(pw),w(la,e),e&&s(fw),e&&s(eo),e&&s(hw),w(aa,e),e&&s(uw),e&&s(_n),e&&s(dw),w(ia,e),e&&s(cw),e&&s(vn),e&&s(mw),w(na,e),e&&s(_w),e&&s(to),e&&s(vw),w(pa,e),e&&s(ww),e&&s(wn),e&&s(yw),w(fa,e),e&&s($w),e&&s(yn),e&&s(Ew),w(ha,e),e&&s(gw),e&&s($n),e&&s(bw),w(ua,e),e&&s(kw),e&&s(En),e&&s(Cw),w(da,e),e&&s(jw),e&&s(so),e&&s(xw),e&&s(Pt),w(ma),e&&s(Pw),e&&s(gn),e&&s(Ow),w(_a,e),e&&s(Iw),e&&s(Ue),e&&s(Tw),w(va,e),e&&s(Aw),e&&s(ae),e&&s(Dw),w(wa,e),e&&s(qw),e&&s(bn),e&&s(Sw),e&&s(kn),e&&s(Lw),e&&s(Cn),e&&s(Uw),e&&s(ie),e&&s(Nw),e&&s(Ne),e&&s(Hw),w(ya,e),e&&s(zw),e&&s(He),e&&s(Rw),e&&s(ro),e&&s(Gw),e&&s(jn),e&&s(Fw),e&&s(It),w(ga),e&&s(Mw),e&&s(V),e&&s(Ww),w(ka,e),e&&s(Bw),e&&s(ao),e&&s(Yw),w(Ca,e),e&&s(Vw),e&&s(io),e&&s(Xw),w(ja,e),e&&s(Jw),e&&s(T),e&&s(Zw),e&&s(no),e&&s(Kw),w(xa,e),e&&s(Qw),e&&s(xn),e&&s(ey),w(Pa,e),e&&s(ty),e&&s(ze),e&&s(sy),w(Oa,e),e&&s(oy),e&&s(po),e&&s(ry),w(Ia,e),e&&s(ly),e&&s(Pn),e&&s(ay),e&&s(Tt),w(Ta),e&&s(iy),e&&s(ho),e&&s(ny),w(Aa,e),e&&s(py),e&&s(At),w(Da),e&&s(fy),e&&s(qa),e&&s(hy),w(Sa,e),e&&s(uy),e&&s(ne),e&&s(dy),w(La,e),e&&s(cy),e&&s(N),e&&s(my),e&&s(co),e&&s(_y),e&&s(Dt),w(Ua),e&&s(vy),e&&s(On),e&&s(wy),w(Na,e),e&&s(yy),e&&s(qt),w(Ha),e&&s($y),e&&s(In),e&&s(Ey),w(za,e),e&&s(gy),e&&s(St),w(Ra),e&&s(by),e&&s(Tn),e&&s(ky),e&&s(pe),e&&s(Cy),e&&s(Lt),w(Ba),e&&s(jy),e&&s(An),e&&s(xy),e&&s(fe),e&&s(Py),e&&s(Dn),e&&s(Oy),e&&s(qn),e&&s(Iy),e&&s(Sn),e&&s(Ty),e&&s(Ln),e&&s(Ay),e&&s(yo),e&&s(Dy),e&&s(Nn),e&&s(qy),w(Ya,e),e&&s(Sy),e&&s(Hn),e&&s(Ly),w(Va,e),e&&s(Uy),e&&s($o),e&&s(Ny),e&&s(Eo),e&&s(Hy),e&&s(zn),e&&s(zy),e&&s(go)}}}const hS={local:"testing",sections:[{local:"how-transformers-are-tested",title:"How transformers are tested"},{local:"running-tests",sections:[{local:"choosing-which-tests-to-run",title:"Choosing which tests to run"},{local:"getting-the-list-of-all-tests",title:"Getting the list of all tests"},{local:"run-a-specific-test-module",title:"Run a specific test module"},{local:"run-specific-tests",title:"Run specific tests"},{local:"run-only-modified-tests",title:"Run only modified tests"},{local:"automatically-rerun-failed-tests-on-source-modification",title:"Automatically rerun failed tests on source modification"},{local:"skip-a-test-module",title:"Skip a test module"},{local:"clearing-state",title:"Clearing state"},{local:"running-tests-in-parallel",title:"Running tests in parallel"},{local:"test-order-and-repetition",sections:[{local:"repeat-tests",title:"Repeat tests"},{local:"run-tests-in-a-random-order",title:"Run tests in a random order"}],title:"Test order and repetition"},{local:"look-and-feel-variations",sections:[{local:"pytestsugar",title:"pytest-sugar"},{local:"report-each-subtest-name-and-its-progress",title:"Report each sub-test name and its progress"},{local:"instantly-shows-failed-tests",title:"Instantly shows failed tests"}],title:"Look and feel variations"},{local:"to-gpu-or-not-to-gpu",title:"To GPU or not to GPU"},{local:"distributed-training",title:"Distributed training"},{local:"output-capture",title:"Output capture"},{local:"color-control",title:"Color control"},{local:"sending-test-report-to-online-pastebin-service",title:"Sending test report to online pastebin service"}],title:"Running tests"},{local:"writing-tests",sections:[{local:"parametrization",title:"Parametrization"},{local:"files-and-directories",title:"Files and directories"},{local:"temporary-files-and-directories",title:"Temporary files and directories"},{local:"temporary-syspath-override",title:"Temporary sys.path override"},{local:"skipping-tests",sections:[{local:"implementation",title:"Implementation"}],title:"Skipping tests"},{local:"slow-tests",title:"Slow tests"},{local:"testing-the-stdoutstderr-output",title:"Testing the stdout/stderr output"},{local:"capturing-logger-stream",title:"Capturing logger stream"},{local:"testing-with-environment-variables",title:"Testing with environment variables"},{local:"getting-reproducible-results",title:"Getting reproducible results"},{local:"debugging-tests",title:"Debugging tests"}],title:"Writing tests"},{local:"working-with-github-actions-workflows",title:"Working with github actions workflows"},{local:"testing-experimental-ci-features",title:"Testing Experimental CI Features"}],title:"Testing"};function uS(me,$,x){let{fw:g}=$;return me.$$set=b=>{"fw"in b&&x(0,g=b.fw)},[g]}class wS extends sS{constructor($){super();oS(this,$,uS,fS,rS,{fw:0})}}export{wS as default,hS as metadata};
273
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/add_new_model.mdx-07427b7e.js
import{S as l9,i as d9,s as h9,e as a,k as h,w as p,t as r,M as f9,c as s,d as o,m as f,a as i,x as u,h as n,b as m,N as m9,F as t,g as d,y as c,q as y,o as g,B as w}from"../chunks/vendor-4833417e.js";import{T as p9}from"../chunks/Tip-fffd6df1.js";import{I as je}from"../chunks/IconCopyLink-4b81c553.js";import{C as E}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function u9(Ca){let P,ue,I,L,Oe,D,ao;return{c(){P=a("p"),ue=r("In case you are using Windows, you should replace "),I=a("code"),L=r("RUN_SLOW=1"),Oe=r(" with "),D=a("code"),ao=r("SET RUN_SLOW=1")},l(le){P=s(le,"P",{});var de=i(P);ue=n(de,"In case you are using Windows, you should replace "),I=s(de,"CODE",{});var so=i(I);L=n(so,"RUN_SLOW=1"),so.forEach(o),Oe=n(de," with "),D=s(de,"CODE",{});var z=i(D);ao=n(z,"SET RUN_SLOW=1"),z.forEach(o),de.forEach(o)},m(le,de){d(le,P,de),t(P,ue),t(P,I),t(I,L),t(P,Oe),t(P,D),t(D,ao)},d(le){le&&o(P)}}}function c9(Ca){let P,ue,I,L,Oe,D,ao,le,de,so,z,xu,qa,Cu,qu,Sa,Su,Ru,wh,et,Du,io,zu,Fu,vh,Cr,Hu,bh,W,Ra,Wu,Gu,Da,Uu,Yu,za,Ju,Xu,he,Zu,Fa,Ku,Qu,Ha,Vu,ec,Wa,tc,oc,_h,tt,rc,lo,nc,ac,Eh,qr,sc,kh,xe,ot,Ga,ho,ic,Ua,lc,Th,Sr,dc,$h,rt,hc,Rr,fc,mc,Ph,ce,Ya,pc,uc,Ja,cc,yc,fo,gc,Xa,wc,vc,Ih,nt,bc,Za,_c,Ec,Ah,Dr,kc,Nh,Ce,at,Ka,mo,Tc,Qa,$c,Mh,G,Pc,zr,Ic,Ac,Fr,Nc,Mc,Va,Bc,Lc,Bh,Hr,jc,Lh,Wr,h3,jh,v,Oc,es,xc,Cc,ts,qc,Sc,Gr,Rc,Dc,Ur,zc,Fc,Yr,Hc,Wc,Jr,Gc,Uc,os,Yc,Jc,rs,Xc,Zc,ns,Kc,Qc,as,Vc,ey,ss,ty,oy,is,ry,ny,Xr,ay,sy,ls,iy,ly,ds,dy,hy,Oh,po,xh,A,fy,Zr,my,py,hs,uy,cy,fs,yy,gy,Kr,wy,vy,Qr,by,_y,Ch,qe,st,ms,uo,Ey,ps,ky,qh,Vr,Ty,Sh,Se,it,us,co,$y,cs,Py,Rh,en,Iy,Dh,lt,yo,go,Ay,Ny,wo,My,By,vo,bo,Ly,jy,_o,Oy,zh,tn,xy,Fh,ye,fe,Cy,Eo,qy,Sy,ko,Ry,Dy,ys,zy,Fy,Hy,gs,Wy,Gy,ws,Uy,Hh,on,Yy,Wh,rn,Jy,Gh,b,vs,bs,_s,Xy,Zy,Es,nn,ks,Ky,Qy,Ts,an,$s,Vy,eg,Ps,sn,Is,tg,og,As,ln,Ns,rg,ng,Ms,dn,Bs,ag,sg,Ls,hn,js,ig,lg,Os,fn,xs,dg,hg,Cs,mn,qs,fg,mg,Ss,pn,Rs,pg,ug,Ds,un,zs,cg,yg,Fs,cn,Hs,gg,wg,Ws,yn,Gs,vg,bg,Us,gn,Ys,_g,Uh,O,Eg,Js,kg,Tg,Xs,$g,Pg,Zs,Ig,Ag,Ks,Ng,Mg,Yh,Re,dt,Qs,To,Bg,Vs,Lg,Jh,ht,jg,ei,Og,xg,Xh,x,De,Cg,ti,qg,Sg,wn,Rg,Dg,zg,ze,Fg,oi,Hg,Wg,ri,Gg,Ug,Yg,ni,Jg,Xg,Fe,Zg,$o,Kg,Qg,ai,Vg,ew,tw,si,ow,Zh,vn,rw,Kh,He,ft,ii,Po,nw,li,aw,Qh,mt,di,Io,sw,Ao,iw,lw,dw,hi,No,hw,fi,fw,mw,Vh,Mo,ef,Bo,mi,pw,tf,Lo,of,bn,uw,rf,jo,nf,Oo,We,cw,pi,yw,gw,xo,ww,vw,af,Co,ui,bw,_w,sf,qo,So,Ew,ci,kw,Tw,lf,Ro,df,pt,$w,yi,Pw,Iw,hf,Ge,ut,gi,Do,Aw,wi,Nw,ff,N,Mw,vi,Bw,Lw,bi,jw,Ow,_i,xw,Cw,Ei,qw,Sw,ki,Rw,Dw,mf,_n,zw,pf,ct,Fw,Ti,Hw,Ww,uf,M,$i,Gw,Uw,Pi,Yw,Jw,Ii,Xw,Zw,Ai,Kw,Qw,F,Vw,Ni,ev,tv,Mi,ov,rv,Bi,nv,av,Li,sv,iv,lv,Ue,dv,ji,hv,fv,Oi,mv,pv,cf,yt,uv,xi,cv,yv,yf,En,gv,gf,kn,wv,wf,gt,zo,Fo,vv,bv,Ho,_v,Ev,Ci,kv,vf,Tn,Tv,bf,wt,$v,qi,Pv,Iv,_f,vt,Av,Si,Nv,Mv,Ef,Wo,kf,$n,Bv,Tf,bt,Ri,Lv,jv,Ye,Ov,Di,xv,Cv,zi,qv,Sv,$f,Pn,Rv,Pf,_t,Dv,Fi,zv,Fv,If,U,Hi,Hv,Wv,Wi,Gv,Uv,Gi,Yv,Jv,Ui,Xv,Af,Go,Uo,Zv,Kv,Nf,Et,Qv,Yo,Vv,eb,Mf,In,tb,Bf,An,ob,Lf,B,Yi,rb,nb,Ji,ab,sb,Xi,ib,lb,Zi,db,hb,Ki,fb,mb,Qi,pb,jf,Je,ub,Vi,cb,yb,el,gb,Of,Nn,wb,xf,Jo,Cf,ge,vb,tl,bb,_b,ol,Eb,kb,qf,C,H,Tb,Xo,$b,Pb,rl,Ib,Ab,nl,Nb,Mb,Zo,Bb,Lb,jb,al,Ob,xb,T,Cb,sl,qb,Sb,il,Rb,Db,ll,zb,Fb,dl,Hb,Wb,hl,Gb,Ub,fl,Yb,Jb,ml,Xb,Zb,pl,Kb,Qb,ul,Vb,e_,cl,t_,o_,r_,Ko,n_,yl,a_,s_,i_,me,l_,gl,d_,h_,wl,f_,m_,vl,p_,u_,Sf,kt,c_,bl,y_,g_,Rf,Xe,Tt,_l,Qo,w_,El,v_,Df,Mn,b_,zf,Vo,Ff,$t,__,Bn,E_,k_,Hf,Ln,T_,Wf,jn,kl,$_,Gf,q,P_,er,I_,A_,Tl,N_,M_,$l,B_,L_,tr,j_,O_,Uf,On,Pl,x_,Yf,we,C_,Il,q_,S_,Al,R_,D_,Jf,xn,z_,Xf,Cn,Nl,F_,Zf,or,Kf,rr,Ml,H_,Qf,nr,Vf,ar,Bl,W_,em,sr,tm,ir,Ll,G_,om,lr,rm,Ze,jl,Ol,U_,Y_,xl,Cl,J_,nm,qn,X_,am,dr,sm,Sn,Z_,im,Rn,K_,lm,Dn,Q_,dm,zn,ql,V_,hm,ve,e1,Sl,t1,o1,Rl,r1,n1,fm,Y,a1,Dl,s1,i1,zl,l1,d1,Fl,h1,f1,mm,j,Hl,m1,p1,Wl,u1,c1,Gl,y1,g1,Ul,w1,v1,Yl,b1,_1,pm,hr,um,be,E1,Jl,k1,T1,Xl,$1,P1,cm,Fn,Zl,I1,ym,J,A1,Kl,N1,M1,Ql,B1,L1,Vl,j1,O1,gm,Pt,Hn,x1,fr,C1,q1,Wn,S1,mr,R1,wm,It,D1,ed,z1,F1,vm,pr,bm,X,H1,td,W1,G1,od,U1,Y1,rd,J1,X1,_m,ur,Em,Gn,Z1,km,cr,Tm,Un,K1,$m,yr,Pm,Yn,Q1,Im,gr,Am,wr,V1,nd,e0,Nm,vr,Mm,_e,t0,ad,o0,r0,sd,n0,a0,Bm,br,Lm,_r,s0,id,i0,jm,Er,Om,Jn,l0,xm,At,d0,ld,h0,f0,Cm,Z,m0,dd,p0,u0,hd,c0,y0,fd,g0,w0,qm,K,v0,md,b0,_0,pd,E0,k0,ud,T0,$0,Sm,kr,Rm,Xn,cd,P0,Dm,Nt,I0,Zn,A0,N0,zm,Tr,Fm,S,M0,yd,B0,L0,gd,j0,O0,wd,x0,C0,vd,q0,S0,Hm,Q,R0,bd,D0,z0,_d,F0,H0,Ed,W0,G0,Wm,V,Ke,U0,kd,Y0,J0,Td,X0,Z0,K0,$d,Q0,V0,Pd,e2,t2,ee,o2,Id,r2,n2,Ad,a2,s2,Nd,i2,l2,$r,d2,Gm,Ee,h2,Md,f2,m2,Bd,p2,u2,Um,Mt,c2,Ld,y2,g2,Ym,Kn,jd,w2,Jm,Bt,v2,Od,b2,_2,Xm,Pr,Zm,Qn,E2,Km,Lt,Vn,k2,xd,T2,$2,Cd,P2,Qm,jt,I2,qd,A2,N2,Vm,Ir,ep,Ot,tp,te,M2,Sd,B2,L2,Rd,j2,O2,Dd,x2,C2,op,xt,Ar,q2,zd,S2,R2,D2,Fd,z2,rp,ea,Hd,F2,np,Ct,H2,Wd,W2,G2,ap,ta,U2,sp,oa,Y2,ip,Nr,lp,qt,J2,Gd,X2,Z2,dp,Mr,hp,St,K2,Ud,Q2,V2,fp,ke,eE,Yd,tE,oE,Jd,rE,nE,mp,ra,Xd,aE,pp,oe,sE,Zd,iE,lE,Kd,dE,hE,Qd,fE,mE,up,na,Vd,pE,cp,re,uE,eh,cE,yE,th,gE,wE,oh,vE,bE,yp,Te,_E,rh,EE,kE,aa,TE,$E,gp,sa,nh,PE,wp,Rt,IE,ah,AE,NE,vp,Br,bp,ia,ME,_p,Lr,Ep,la,BE,kp,da,LE,Tp,ha,jE,$p,fa,sh,OE,Pp,R,xE,ma,CE,qE,ih,SE,RE,lh,DE,zE,dh,FE,HE,Ip,jr,Ap,Dt,WE,hh,GE,UE,Np,pa,fh,YE,Mp,zt,JE,mh,XE,ZE,Bp,ua,ph,KE,Lp,ca,QE,jp,Qe,Ft,uh,Or,VE,ch,e3,Op,ya,t3,xp,ga,yh,o3,Cp;return D=new je({}),ho=new je({}),mo=new je({}),po=new E({props:{code:`model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config`,highlighted:`model = BrandNewBertModel.from_pretrained(<span class="hljs-string">&quot;brandy/brand_new_bert&quot;</span>) model.config <span class="hljs-comment"># model has access to its config</span>`}}),uo=new je({}),co=new je({}),To=new je({}),Po=new je({}),Mo=new E({props:{code:`git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/[your Github handle]/transformers.git <span class="hljs-built_in">cd</span> transformers git remote add upstream https://github.com/huggingface/transformers.git`}}),Lo=new E({props:{code:`python -m venv .env source .env/bin/activate pip install -e ".[dev]"`,highlighted:`python -m venv .<span class="hljs-built_in">env</span> <span class="hljs-built_in">source</span> .<span class="hljs-built_in">env</span>/bin/activate pip install -e <span class="hljs-string">&quot;.[dev]&quot;</span>`}}),jo=new E({props:{code:"cd ..",highlighted:'<span class="hljs-built_in">cd</span> ..'}}),Ro=new E({props:{code:`git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e .`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git <span class="hljs-built_in">cd</span> brand_new_bert pip install -e .`}}),Do=new je({}),Wo=new E({props:{code:`model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids)`,highlighted:`model = BrandNewBertModel.load_pretrained_checkpoint(<span class="hljs-string">&quot;/path/to/checkpoint/&quot;</span>) input_ids = [<span class="hljs-number">0</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">7</span>, <span class="hljs-number">9</span>] <span class="hljs-comment"># vector of input ids</span> original_output = model.predict(input_ids)`}}),Jo=new E({props:{code:`[[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]],`,highlighted:`<span class="hljs-comment">[<span class="hljs-comment">[ <span class="hljs-comment">[-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024]</span>, <span class="hljs-comment">[-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132]</span>, <span class="hljs-comment">[-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648]</span>, ..., <span class="hljs-comment">[-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288]</span>, <span class="hljs-comment">[-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191]</span>, <span class="hljs-comment">[-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]</span>]</span>]</span>,`}}),Qo=new je({}),Vo=new E({props:{code:"cd transformers",highlighted:'<span class="hljs-built_in">cd</span> transformers'}}),or=new E({props:{code:"git checkout -b add_brand_new_bert",highlighted:"git checkout -b add_brand_new_bert"}}),nr=new E({props:{code:`git add . git commit`,highlighted:`git add . git commit`}}),sr=new E({props:{code:`git fetch upstream git rebase upstream/master`,highlighted:`git fetch upstream git rebase upstream/master`}}),lr=new E({props:{code:"git push -u origin a-descriptive-name-for-my-changes",highlighted:"git push -u origin a-descriptive-name-for-my-changes"}}),dr=new E({props:{code:`git fetch upstream git merge upstream/master`,highlighted:`git fetch upstream git merge upstream/master`}}),hr=new E({props:{code:`from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig())`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig())`}}),pr=new E({props:{code:`from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10)`,highlighted:`<span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-keyword">class</span> <span class="hljs-title class_">SimpleModel</span>(nn.Module): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__init__</span>(<span class="hljs-params">self</span>): <span class="hljs-built_in">super</span>().__init__() self.dense = nn.Linear(<span class="hljs-number">10</span>, <span class="hljs-number">10</span>) self.intermediate = nn.Linear(<span class="hljs-number">10</span>, <span class="hljs-number">10</span>) self.layer_norm = nn.LayerNorm(<span class="hljs-number">10</span>)`}}),ur=new E({props:{code:`model = SimpleModel() print(model)`,highlighted:`model = SimpleModel() <span class="hljs-built_in">print</span>(model)`}}),cr=new E({props:{code:`SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) )`,highlighted:`SimpleModel( (dense): Linear(<span class="hljs-attribute">in_features</span>=10, <span class="hljs-attribute">out_features</span>=10, <span class="hljs-attribute">bias</span>=<span class="hljs-literal">True</span>) (intermediate): Linear(<span class="hljs-attribute">in_features</span>=10, <span class="hljs-attribute">out_features</span>=10, <span class="hljs-attribute">bias</span>=<span class="hljs-literal">True</span>) (layer_norm): LayerNorm((10,), <span class="hljs-attribute">eps</span>=1e-05, <span class="hljs-attribute">elementwise_affine</span>=<span class="hljs-literal">True</span>) )`}}),yr=new E({props:{code:"print(model.dense.weight.data)",highlighted:'<span class="hljs-built_in">print</span>(model.dense.weight.data)'}}),gr=new E({props:{code:`tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]).`,highlighted:`tensor([[<span class="hljs-string">-0</span>.0818, 0.2207, <span class="hljs-string">-0</span>.0749, <span class="hljs-string">-0</span>.0030, 0.0045, <span class="hljs-string">-0</span>.1569, <span class="hljs-string">-0</span>.1598, 0.0212, <span class="hljs-string">-0</span>.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, <span class="hljs-string">-0</span>.2190, 0.2166, <span class="hljs-string">-0</span>.0212], [<span class="hljs-string">-0</span>.2000, 0.1107, <span class="hljs-string">-0</span>.1999, <span class="hljs-string">-0</span>.3119, 0.1559, 0.0993, 0.1776, <span class="hljs-string">-0</span>.1950, <span class="hljs-string">-0</span>.1023, <span class="hljs-string">-0</span>.0447], [<span class="hljs-string">-0</span>.0888, <span class="hljs-string">-0</span>.1092, 0.2281, 0.0336, 0.1817, <span class="hljs-string">-0</span>.0115, 0.2096, 0.1415, <span class="hljs-string">-0</span>.1876, <span class="hljs-string">-0</span>.2467], [ 0.2208, <span class="hljs-string">-0</span>.2352, <span class="hljs-string">-0</span>.1426, <span class="hljs-string">-0</span>.2636, <span class="hljs-string">-0</span>.2889, <span class="hljs-string">-0</span>.2061, <span class="hljs-string">-0</span>.2849, <span class="hljs-string">-0</span>.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, <span class="hljs-string">-0</span>.0530, 0.1859, <span class="hljs-string">-0</span>.0604, 0.2132, 0.1680], [ 0.1733, <span class="hljs-string">-0</span>.2407, <span class="hljs-string">-0</span>.1721, 0.1484, 0.0358, <span class="hljs-string">-0</span>.0633, <span class="hljs-string">-0</span>.0721, <span class="hljs-string">-0</span>.0090, 0.2707, <span class="hljs-string">-0</span>.2509], [<span class="hljs-string">-0</span>.1173, 0.1561, 0.2945, 0.0595, <span class="hljs-string">-0</span>.1996, 0.2988, <span class="hljs-string">-0</span>.0802, 0.0407, 0.1829, <span class="hljs-string">-0</span>.1568], [<span class="hljs-string">-0</span>.1164, <span class="hljs-string">-0</span>.2228, <span class="hljs-string">-0</span>.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, <span class="hljs-string">-0</span>.0536], [<span class="hljs-string">-0</span>.1492, <span class="hljs-string">-0</span>.1616, 0.1057, 0.1950, <span class="hljs-string">-0</span>.2807, <span class="hljs-string">-0</span>.2710, <span class="hljs-string">-0</span>.1586, 0.0739, 0.2220, 0.2358]]).`}}),vr=new E({props:{code:`# retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight)`,highlighted:`<span class="hljs-comment"># retrieve matching layer weights, e.g. by</span> <span class="hljs-comment"># recursive algorithm</span> layer_name = <span class="hljs-string">&quot;dense&quot;</span> pretrained_weight = array_of_dense_layer model_pointer = <span class="hljs-built_in">getattr</span>(model, <span class="hljs-string">&quot;dense&quot;</span>) model_pointer.weight.data = torch.from_numpy(pretrained_weight)`}}),br=new E({props:{code:`assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched"`,highlighted:`<span class="hljs-keyword">assert</span> ( model_pointer.weight.shape == pretrained_weight.shape ), <span class="hljs-string">f&quot;Pointer shape of random weight <span class="hljs-subst">{model_pointer.shape}</span> and array shape of checkpoint weight <span class="hljs-subst">{pretrained_weight.shape}</span> mismatched&quot;</span>`}}),Er=new E({props:{code:'logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}")',highlighted:'logger.info(<span class="hljs-string">f&quot;Initialize PyTorch weight <span class="hljs-subst">{layer_name}</span> from <span class="hljs-subst">{pretrained_weight.name}</span>&quot;</span>)'}}),kr=new E({props:{code:'model.save_pretrained("/path/to/converted/checkpoint/folder")',highlighted:'model.save_pretrained(<span class="hljs-string">&quot;/path/to/converted/checkpoint/folder&quot;</span>)'}}),Tr=new E({props:{code:`model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states`,highlighted:`model = BrandNewBertModel.from_pretrained(<span class="hljs-string">&quot;/path/to/converted/checkpoint/folder&quot;</span>) input_ids = [<span class="hljs-number">0</span>, <span class="hljs-number">4</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">1</span>, <span class="hljs-number">7</span>, <span class="hljs-number">19</span>] output = model(input_ids).last_hidden_states`}}),Pr=new E({props:{code:"pytest tests/test_modeling_brand_new_bert.py",highlighted:"pytest tests/test_modeling_brand_new_bert.py"}}),Ir=new E({props:{code:"RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests",highlighted:"RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests"}}),Ot=new p9({props:{$$slots:{default:[u9]},$$scope:{ctx:Ca}}}),Nr=new E({props:{code:`input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str)`,highlighted:`input_str = <span class="hljs-string">&quot;This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words.&quot;</span> model = BrandNewBertModel.load_pretrained_checkpoint(<span class="hljs-string">&quot;/path/to/checkpoint/&quot;</span>) input_ids = model.tokenize(input_str)`}}),Mr=new E({props:{code:`from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BrandNewBertTokenizer input_str = <span class="hljs-string">&quot;This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words.&quot;</span> tokenizer = BrandNewBertTokenizer.from_pretrained(<span class="hljs-string">&quot;/path/to/tokenizer/folder/&quot;</span>) input_ids = tokenizer(input_str).input_ids`}}),Br=new E({props:{code:"make style",highlighted:"make style"}}),Lr=new E({props:{code:"make quality",highlighted:"make quality"}}),jr=new E({props:{code:`brand_new_bert.push_to_hub( repo_path_or_name="brand_new_bert", # Uncomment the following line to push to an organization # organization="<ORGANIZATION>", commit_message="Add model", use_temp_dir=True, )`,highlighted:`brand_new_bert.push_to_hub( repo_path_or_name=<span class="hljs-string">&quot;brand_new_bert&quot;</span>, <span class="hljs-comment"># Uncomment the following line to push to an organization</span> <span class="hljs-comment"># organization=&quot;&lt;ORGANIZATION&gt;&quot;,</span> commit_message=<span class="hljs-string">&quot;Add model&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>, )`}}),Or=new je({}),{c(){P=a("meta"),ue=h(),I=a("h1"),L=a("a"),Oe=a("span"),p(D.$$.fragment),ao=h(),le=a("span"),de=r("How to add a model to \u{1F917} Transformers?"),so=h(),z=a("p"),xu=r(`Adding a new model is often difficult and requires an in-depth knowledge of the \u{1F917} Transformers library and ideally also of the model\u2019s original repository. At Hugging Face, we are trying to empower the community more and more to add models independently. Thus, for some new models that the community wants to be added to \u{1F917} Transformers, we create a customized `),qa=a("em"),Cu=r("call-for-model-addition"),qu=r(` that explains step-by-step how to add the requested model. With this `),Sa=a("em"),Su=r("call-for-model-addition"),Ru=r(`, we want to teach a motivated and experienced contributor of the community how to port a model to \u{1F917} Transformers.`),wh=h(),et=a("p"),Du=r(`If this sounds like something you would be interested in, feel free to check out the currently open \u201Ccalls-for-model-addition\u201D `),io=a("a"),zu=r("here"),Fu=r(` and to contact us.`),vh=h(),Cr=a("p"),Hu=r(`If selected, you will then work closely with one member of the Hugging Face team to integrate the model into \u{1F917} Transformers. By doing so, you will both gain a theoretical and deep practical understanding of the proposed model. But more importantly, you will have made a major open-source contribution to \u{1F917} Transformers. Along the way, you will:`),bh=h(),W=a("ul"),Ra=a("li"),Wu=r("get insights into open-source best practices"),Gu=h(),Da=a("li"),Uu=r("understand the design principles of one of the most popular NLP libraries"),Yu=h(),za=a("li"),Ju=r("learn how to do efficiently test large NLP models"),Xu=h(),he=a("li"),Zu=r("learn how to integrate Python utilities like "),Fa=a("code"),Ku=r("black"),Qu=r(", "),Ha=a("code"),Vu=r("isort"),ec=r(", "),Wa=a("code"),tc=r("make fix-copies"),oc=r(` into a library to always ensure clean and readable code`),_h=h(),tt=a("p"),rc=r(`We are also more than happy if you want to add a model that cannot be found in the \u201Ccalls-for-model-addition\u201D folder. The following sections explain in detail how to add a new model. It might also be very helpful to check out already added models to see if those resemble the model you would like to add `),lo=a("a"),nc=r("here"),ac=r("."),Eh=h(),qr=a("p"),sc=r("To start, let\u2019s try to get a general overview of the Transformers library."),kh=h(),xe=a("h2"),ot=a("a"),Ga=a("span"),p(ho.$$.fragment),ic=h(),Ua=a("span"),lc=r("General overview of \u{1F917} Transformers"),Th=h(),Sr=a("p"),dc=r(`First, you should get a general overview of \u{1F917} Transformers. \u{1F917} Transformers is a very opinionated library, so there is a chance that you don\u2019t agree with some of the library\u2019s philosophies or design choices. From our experience, however, we found that the fundamental design choices and philosophies of the library are crucial to efficiently scale \u{1F917} Transformers while keeping maintenance costs at a reasonable level.`),$h=h(),rt=a("p"),hc=r("A good first starting point to better understand the library is to read the "),Rr=a("a"),fc=r("documentation of our philosophy"),mc=r(". As a result of our way of working, there are some choices that we try to apply to all models:"),Ph=h(),ce=a("ul"),Ya=a("li"),pc=r("Composition is generally favored over-abstraction"),uc=h(),Ja=a("li"),cc=r("Duplicating code is not always bad if it strongly improves the readability or accessibility of a model"),yc=h(),fo=a("li"),gc=r(`Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only have to look into the respective `),Xa=a("code"),wc=r("modeling_....py"),vc=r(" file."),Ih=h(),nt=a("p"),bc=r("In our opinion, the library\u2019s code is not just a means to provide a product, "),Za=a("em"),_c=r("e.g."),Ec=r(` the ability to use BERT for inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the person that will use your model, but also everybody that will read, try to understand, and possibly tweak your code.`),Ah=h(),Dr=a("p"),kc=r("With this in mind, let\u2019s go a bit deeper into the general library design."),Nh=h(),Ce=a("h3"),at=a("a"),Ka=a("span"),p(mo.$$.fragment),Tc=h(),Qa=a("span"),$c=r("Overview of models"),Mh=h(),G=a("p"),Pc=r(`To successfully add a model, it is important to understand the interaction between your model and its config, `),zr=a("a"),Ic=r("PreTrainedModel"),Ac=r(", and "),Fr=a("a"),Nc=r("PretrainedConfig"),Mc=r(`. For exemplary purposes, we will call the model to be added to \u{1F917} Transformers `),Va=a("code"),Bc=r("BrandNewBert"),Lc=r("."),Bh=h(),Hr=a("p"),jc=r("Let\u2019s take a look:"),Lh=h(),Wr=a("img"),jh=h(),v=a("p"),Oc=r(`As you can see, we do make use of inheritance in \u{1F917} Transformers, but we keep the level of abstraction to an absolute minimum. There are never more than two levels of abstraction for any model in the library. `),es=a("code"),xc=r("BrandNewBertModel"),Cc=r(` inherits from `),ts=a("code"),qc=r("BrandNewBertPreTrainedModel"),Sc=r(" which in turn inherits from "),Gr=a("a"),Rc=r("PreTrainedModel"),Dc=r(` and that\u2019s it. As a general rule, we want to make sure that a new model only depends on `),Ur=a("a"),zc=r("PreTrainedModel"),Fc=r(`. The important functionalities that are automatically provided to every new model are `),Yr=a("a"),Hc=r("from_pretrained()"),Wc=r(` and `),Jr=a("a"),Gc=r("save_pretrained()"),Uc=r(`, which are used for serialization and deserialization. All of the other important functionalities, such as `),os=a("code"),Yc=r("BrandNewBertModel.forward"),Jc=r(` should be completely defined in the new `),rs=a("code"),Xc=r("modeling_brand_new_bert.py"),Zc=r(` script. Next, we want to make sure that a model with a specific head layer, such as `),ns=a("code"),Kc=r("BrandNewBertForMaskedLM"),Qc=r(" does not inherit from "),as=a("code"),Vc=r("BrandNewBertModel"),ey=r(", but rather uses "),ss=a("code"),ty=r("BrandNewBertModel"),oy=r(` as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a configuration class, called `),is=a("code"),ry=r("BrandNewBertConfig"),ny=r(`. This configuration is always stored as an attribute in `),Xr=a("a"),ay=r("PreTrainedModel"),sy=r(", and thus can be accessed via the "),ls=a("code"),iy=r("config"),ly=r(` attribute for all classes inheriting from `),ds=a("code"),dy=r("BrandNewBertPreTrainedModel"),hy=r(":"),Oh=h(),p(po.$$.fragment),xh=h(),A=a("p"),fy=r(`Similar to the model, the configuration inherits basic serialization and deserialization functionalities from `),Zr=a("a"),my=r("PretrainedConfig"),py=r(`. Note that the configuration and the model are always serialized into two different formats - the model to a `),hs=a("em"),uy=r("pytorch_model.bin"),cy=r(" file and the configuration to a "),fs=a("em"),yy=r("config.json"),gy=r(` file. Calling `),Kr=a("a"),wy=r("save_pretrained()"),vy=r(` will automatically call `),Qr=a("a"),by=r("save_pretrained()"),_y=r(", so that both model and configuration are saved."),Ch=h(),qe=a("h3"),st=a("a"),ms=a("span"),p(uo.$$.fragment),Ey=h(),ps=a("span"),ky=r("Overview of tokenizers"),qh=h(),Vr=a("p"),Ty=r("Not quite ready yet :-( This section will be added soon!"),Sh=h(),Se=a("h2"),it=a("a"),us=a("span"),p(co.$$.fragment),$y=h(),cs=a("span"),Py=r("Step-by-step recipe to add a model to \u{1F917} Transformers"),Rh=h(),en=a("p"),Iy=r(`Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model:`),Dh=h(),lt=a("ol"),yo=a("li"),go=a("a"),Ay=r("Porting GPT2 Model"),Ny=r(" by "),wo=a("a"),My=r("Thomas"),By=h(),vo=a("li"),bo=a("a"),Ly=r("Porting WMT19 MT Model"),jy=r(" by "),_o=a("a"),Oy=r("Stas"),zh=h(),tn=a("p"),xy=r("From experience, we can tell you that the most important things to keep in mind when adding a model are:"),Fh=h(),ye=a("ul"),fe=a("li"),Cy=r(`Don\u2019t reinvent the wheel! Most parts of the code you will add for the new \u{1F917} Transformers model already exist somewhere in \u{1F917} Transformers. Take some time to find similar, already existing models and tokenizers you can copy from. `),Eo=a("a"),qy=r("grep"),Sy=r(" and "),ko=a("a"),Ry=r("rg"),Dy=r(` are your friends. Note that it might very well happen that your model\u2019s tokenizer is based on one model implementation, and your model\u2019s modeling code on another one. `),ys=a("em"),zy=r("E.g."),Fy=r(` FSMT\u2019s modeling code is based on BART, while FSMT\u2019s tokenizer code is based on XLM.`),Hy=h(),gs=a("li"),Wy=r(`It\u2019s more of an engineering challenge than a scientific challenge. You should spend more time on creating an efficient debugging environment than trying to understand all theoretical aspects of the model in the paper.`),Gy=h(),ws=a("li"),Uy=r(`Ask for help, when you\u2019re stuck! Models are the core component of \u{1F917} Transformers so that we at Hugging Face are more than happy to help you at every step to add your model. Don\u2019t hesitate to ask if you notice you are not making progress.`),Hh=h(),on=a("p"),Yy=r("In the following, we try to give you a general recipe that we found most useful when porting a model to \u{1F917} Transformers."),Wh=h(),rn=a("p"),Jy=r(`The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do List:`),Gh=h(),b=a("ul"),vs=a("li"),bs=a("ol"),_s=a("li"),Xy=r("\u2610 (Optional) Understood theoretical aspects"),Zy=h(),Es=a("li"),nn=a("ol"),ks=a("li"),Ky=r("\u2610 Prepared transformers dev environment"),Qy=h(),Ts=a("li"),an=a("ol"),$s=a("li"),Vy=r("\u2610 Set up debugging environment of the original repository"),eg=h(),Ps=a("li"),sn=a("ol"),Is=a("li"),tg=r("\u2610 Created script that successfully runs forward pass using original repository and checkpoint"),og=h(),As=a("li"),ln=a("ol"),Ns=a("li"),rg=r("\u2610 Successfully added the model skeleton to Transformers"),ng=h(),Ms=a("li"),dn=a("ol"),Bs=a("li"),ag=r("\u2610 Successfully converted original checkpoint to Transformers checkpoint"),sg=h(),Ls=a("li"),hn=a("ol"),js=a("li"),ig=r("\u2610 Successfully ran forward pass in Transformers that gives identical output to original checkpoint"),lg=h(),Os=a("li"),fn=a("ol"),xs=a("li"),dg=r("\u2610 Finished model tests in Transformers"),hg=h(),Cs=a("li"),mn=a("ol"),qs=a("li"),fg=r("\u2610 Successfully added Tokenizer in Transformers"),mg=h(),Ss=a("li"),pn=a("ol"),Rs=a("li"),pg=r("\u2610 Run end-to-end integration tests"),ug=h(),Ds=a("li"),un=a("ol"),zs=a("li"),cg=r("\u2610 Finished docs"),yg=h(),Fs=a("li"),cn=a("ol"),Hs=a("li"),gg=r("\u2610 Uploaded model weights to the hub"),wg=h(),Ws=a("li"),yn=a("ol"),Gs=a("li"),vg=r("\u2610 Submitted the pull request"),bg=h(),Us=a("li"),gn=a("ol"),Ys=a("li"),_g=r("\u2610 (Optional) Added a demo notebook"),Uh=h(),O=a("p"),Eg=r("To begin with, we usually recommend to start by getting a good theoretical understanding of "),Js=a("code"),kg=r("BrandNewBert"),Tg=r(`. However, if you prefer to understand the theoretical aspects of the model `),Xs=a("em"),$g=r("on-the-job"),Pg=r(`, then it is totally fine to directly dive into the `),Zs=a("code"),Ig=r("BrandNewBert"),Ag=r(`\u2019s code-base. This option might suit you better, if your engineering skills are better than your theoretical skill, if you have trouble understanding `),Ks=a("code"),Ng=r("BrandNewBert"),Mg=r(`\u2019s paper, or if you just enjoy programming much more than reading scientific papers.`),Yh=h(),Re=a("h3"),dt=a("a"),Qs=a("span"),p(To.$$.fragment),Bg=h(),Vs=a("span"),Lg=r("1. (Optional) Theoretical aspects of BrandNewBert"),Jh=h(),ht=a("p"),jg=r("You should take some time to read "),ei=a("em"),Og=r("BrandNewBert\u2019s"),xg=r(` paper, if such descriptive work exists. There might be large sections of the paper that are difficult to understand. If this is the case, this is fine - don\u2019t worry! The goal is not to get a deep theoretical understanding of the paper, but to extract the necessary information required to effectively re-implement the model in \u{1F917} Transformers. That being said, you don\u2019t have to spend too much time on the theoretical aspects, but rather focus on the practical ones, namely:`),Xh=h(),x=a("ul"),De=a("li"),Cg=r("What type of model is "),ti=a("em"),qg=r("brand_new_bert"),Sg=r(`? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like encoder-decoder model? Look at the `),wn=a("a"),Rg=r("model_summary"),Dg=r(" if you\u2019re not familiar with the differences between those."),zg=h(),ze=a("li"),Fg=r("What are the applications of "),oi=a("em"),Hg=r("brand_new_bert"),Wg=r("? Text classification? Text generation? Seq2Seq tasks, "),ri=a("em"),Gg=r("e.g.,"),Ug=r(` summarization?`),Yg=h(),ni=a("li"),Jg=r("What is the novel feature of the model making it different from BERT/GPT-2/BART?"),Xg=h(),Fe=a("li"),Zg=r("Which of the already existing "),$o=a("a"),Kg=r("\u{1F917} Transformers models"),Qg=r(` is most similar to `),ai=a("em"),Vg=r("brand_new_bert"),ew=r("?"),tw=h(),si=a("li"),ow=r(`What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used for BERT or BART?`),Zh=h(),vn=a("p"),rw=r(`After you feel like you have gotten a good overview of the architecture of the model, you might want to write to the Hugging Face team with any questions you might have. This might include questions regarding the model\u2019s architecture, its attention layer, etc. We will be more than happy to help you.`),Kh=h(),He=a("h3"),ft=a("a"),ii=a("span"),p(Po.$$.fragment),nw=h(),li=a("span"),aw=r("2. Next prepare your environment"),Qh=h(),mt=a("ol"),di=a("li"),Io=a("p"),sw=r("Fork the "),Ao=a("a"),iw=r("repository"),lw=r(` by clicking on the \u2018Fork\u2019 button on the repository\u2019s page. This creates a copy of the code under your GitHub user account.`),dw=h(),hi=a("li"),No=a("p"),hw=r("Clone your "),fi=a("code"),fw=r("transformers"),mw=r(" fork to your local disk, and add the base repository as a remote:"),Vh=h(),p(Mo.$$.fragment),ef=h(),Bo=a("ol"),mi=a("li"),pw=r("Set up a development environment, for instance by running the following command:"),tf=h(),p(Lo.$$.fragment),of=h(),bn=a("p"),uw=r("and return to the parent directory"),rf=h(),p(jo.$$.fragment),nf=h(),Oo=a("ol"),We=a("li"),cw=r("We recommend adding the PyTorch version of "),pi=a("em"),yw=r("brand_new_bert"),gw=r(` to Transformers. To install PyTorch, please follow the instructions on `),xo=a("a"),ww=r("https://pytorch.org/get-started/locally/"),vw=r("."),af=h(),Co=a("p"),ui=a("strong"),bw=r("Note:"),_w=r(" You don\u2019t need to have CUDA installed. Making the new model work on CPU is sufficient."),sf=h(),qo=a("ol"),So=a("li"),Ew=r("To port "),ci=a("em"),kw=r("brand_new_bert"),Tw=r(", you will also need access to its original repository:"),lf=h(),p(Ro.$$.fragment),df=h(),pt=a("p"),$w=r("Now you have set up a development environment to port "),yi=a("em"),Pw=r("brand_new_bert"),Iw=r(" to \u{1F917} Transformers."),hf=h(),Ge=a("h3"),ut=a("a"),gi=a("span"),p(Do.$$.fragment),Aw=h(),wi=a("span"),Nw=r("3.-4. Run a pretrained checkpoint using the original repository"),ff=h(),N=a("p"),Mw=r("At first, you will work on the original "),vi=a("em"),Bw=r("brand_new_bert"),Lw=r(` repository. Often, the original implementation is very \u201Cresearchy\u201D. Meaning that documentation might be lacking and the code can be difficult to understand. But this should be exactly your motivation to reimplement `),bi=a("em"),jw=r("brand_new_bert"),Ow=r(". At Hugging Face, one of our main goals is to "),_i=a("em"),xw=r(`make people stand on the shoulders of giants`),Cw=r(` which translates here very well into taking a working model and rewriting it to make it as `),Ei=a("strong"),qw=r("accessible, user-friendly, and beautiful"),Sw=r(` as possible. This is the number-one motivation to re-implement models into \u{1F917} Transformers - trying to make complex new NLP technology accessible to `),ki=a("strong"),Rw=r("everybody"),Dw=r("."),mf=h(),_n=a("p"),zw=r("You should start thereby by diving into the original repository."),pf=h(),ct=a("p"),Fw=r("Successfully running the official pretrained model in the original repository is often "),Ti=a("strong"),Hw=r("the most difficult"),Ww=r(` step. From our experience, it is very important to spend some time getting familiar with the original code-base. You need to figure out the following:`),uf=h(),M=a("ul"),$i=a("li"),Gw=r("Where to find the pretrained weights?"),Uw=h(),Pi=a("li"),Yw=r("How to load the pretrained weights into the corresponding model?"),Jw=h(),Ii=a("li"),Xw=r("How to run the tokenizer independently from the model?"),Zw=h(),Ai=a("li"),Kw=r(`Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually, you only have to reimplement those functions.`),Qw=h(),F=a("li"),Vw=r(`Be able to locate the important components of the model: Where is the model\u2019s class? Are there model sub-classes, `),Ni=a("em"),ev=r("e.g."),tv=r(` EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers, `),Mi=a("em"),ov=r("e.g."),rv=h(),Bi=a("em"),nv=r("self-attention"),av=r(", "),Li=a("em"),sv=r("cross-attention"),iv=r("\u2026?"),lv=h(),Ue=a("li"),dv=r("How can you debug the model in the original environment of the repo? Do you have to add "),ji=a("em"),hv=r("print"),fv=r(` statements, can you work with an interactive debugger like `),Oi=a("em"),mv=r("ipdb"),pv=r(", or should you use an efficient IDE to debug the model, like PyCharm?"),cf=h(),yt=a("p"),uv=r("It is very important that before you start the porting process, that you can "),xi=a("strong"),cv=r("efficiently"),yv=r(` debug code in the original repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or even a pull request in the original repository. The maintainers of this repository are most likely very happy about someone looking into their code!`),yf=h(),En=a("p"),gv=r(`At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to dive into the original repository and also when starting to write the \u{1F917} Transformers implementation of the model. Only at the very end, when the model has already been successfully ported to \u{1F917} Transformers, one should verify that the model also works as expected on GPU.`),gf=h(),kn=a("p"),wv=r("In general, there are two possible debugging environments for running the original model"),wf=h(),gt=a("ul"),zo=a("li"),Fo=a("a"),vv=r("Jupyter notebooks"),bv=r(" / "),Ho=a("a"),_v=r("google colab"),Ev=h(),Ci=a("li"),kv=r("Local python scripts."),vf=h(),Tn=a("p"),Tv=r(`Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also, notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging Face team for help. If you are familiar with Jupiter notebooks, we strongly recommend you to work with them.`),bf=h(),wt=a("p"),$v=r(`The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend some time adjusting to the new programming environment and that you might not be able to use your known debugging tools anymore, like `),qi=a("code"),Pv=r("ipdb"),Iv=r("."),_f=h(),vt=a("p"),Av=r("For each code-base, a good first step is always to load a "),Si=a("strong"),Nv=r("small"),Mv=r(` pretrained checkpoint and to be able to reproduce a single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in pseudocode):`),Ef=h(),p(Wo.$$.fragment),kf=h(),$n=a("p"),Bv=r("Next, regarding the debugging strategy, there are generally a few from which to choose from:"),Tf=h(),bt=a("ul"),Ri=a("li"),Lv=r(`Decompose the original model into many small testable components and run a forward pass on each of those for verification`),jv=h(),Ye=a("li"),Ov=r("Decompose the original model only into the original "),Di=a("em"),xv=r("tokenizer"),Cv=r(" and the original "),zi=a("em"),qv=r("model"),Sv=r(`, run a forward pass on those, and use intermediate print statements or breakpoints for verification`),$f=h(),Pn=a("p"),Rv=r(`Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code base.`),Pf=h(),_t=a("p"),Dv=r("If the original code-base allows you to decompose the model into smaller sub-components, "),Fi=a("em"),zv=r("e.g."),Fv=r(` if the original code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages to taking the more difficult road in the beginning:`),If=h(),U=a("ul"),Hi=a("li"),Hv=r(`at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically for each component individually that the corresponding component of the \u{1F917} Transformers implementation matches instead of relying on visual comparison via print statements`),Wv=h(),Wi=a("li"),Gv=r(`it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting individual components and thus structure your work better`),Uv=h(),Gi=a("li"),Yv=r(`separating the model into logical meaningful components will help you to get a better overview of the model\u2019s design and thus to better understand the model`),Jv=h(),Ui=a("li"),Xv=r(`at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue changing your code`),Af=h(),Go=a("p"),Uo=a("a"),Zv=r("Lysandre\u2019s"),Kv=r(` integration checks for ELECTRA gives a nice example of how this can be done.`),Nf=h(),Et=a("p"),Qv=r(`However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode, it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good example is `),Yo=a("a"),Vv=r("T5\u2019s MeshTensorFlow"),eb=r(` library which is very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one often relies on verifying print statements.`),Mf=h(),In=a("p"),tb=r(`No matter which strategy you choose, the recommended procedure is often the same in that you should start to debug the starting layers first and the ending layers last.`),Bf=h(),An=a("p"),ob=r(`It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following layers in the following order:`),Lf=h(),B=a("ol"),Yi=a("li"),rb=r("Retrieve the input IDs passed to the model"),nb=h(),Ji=a("li"),ab=r("Retrieve the word embeddings"),sb=h(),Xi=a("li"),ib=r("Retrieve the input of the first Transformer layer"),lb=h(),Zi=a("li"),db=r("Retrieve the output of the first Transformer layer"),hb=h(),Ki=a("li"),fb=r("Retrieve the output of the following n - 1 Transformer layers"),mb=h(),Qi=a("li"),pb=r("Retrieve the output of the whole BrandNewBert Model"),jf=h(),Je=a("p"),ub=r("Input IDs should thereby consists of an array of integers, "),Vi=a("em"),cb=r("e.g."),yb=h(),el=a("code"),gb=r("input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]"),Of=h(),Nn=a("p"),wb=r("The outputs of the following layers often consist of multi-dimensional float arrays and can look like this:"),xf=h(),p(Jo.$$.fragment),Cf=h(),ge=a("p"),vb=r(`We expect that every model added to \u{1F917} Transformers passes a couple of integration tests, meaning that the original model and the reimplemented version in \u{1F917} Transformers have to give the exact same output up to a precision of 0.001! Since it is normal that the exact same model written in different libraries can give a slightly different output depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives nearly the same output, they have to be the almost identical. Therefore, you will certainly compare the intermediate outputs of the \u{1F917} Transformers version multiple times against the intermediate outputs of the original implementation of `),tl=a("em"),bb=r("brand_new_bert"),_b=r(" in which case an "),ol=a("strong"),Eb=r("efficient"),kb=r(` debugging environment of the original repository is absolutely important. Here is some advice is to make your debugging environment as efficient as possible.`),qf=h(),C=a("ul"),H=a("li"),Tb=r(`Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should probably take the time to write a longer script that decomposes the original model into smaller sub-components to retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on TensorFlow print operations like `),Xo=a("a"),$b=r("tf.print"),Pb=r(` to output intermediate values. Is the original repository written in Jax? Then make sure that the model is `),rl=a("strong"),Ib=r("not jitted"),Ab=r(` when running the forward pass, `),nl=a("em"),Nb=r("e.g."),Mb=r(" check-out "),Zo=a("a"),Bb=r("this link"),Lb=r("."),jb=h(),al=a("li"),Ob=r(`Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds. In case only very large checkpoints are available, it might make more sense to create a dummy model in the new environment with randomly initialized weights and save those weights for comparison with the \u{1F917} Transformers version of your model`),xb=h(),T=a("li"),Cb=r(`Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to find the function in the original repository that `),sl=a("strong"),qb=r("only"),Sb=r(" calls a single forward pass, "),il=a("em"),Rb=r("i.e."),Db=r(` that is often called `),ll=a("code"),zb=r("predict"),Fb=r(", "),dl=a("code"),Hb=r("evaluate"),Wb=r(", "),hl=a("code"),Gb=r("forward"),Ub=r(" or "),fl=a("code"),Yb=r("__call__"),Jb=r(". You don\u2019t want to debug a function that calls "),ml=a("code"),Xb=r("forward"),Zb=r(` multiple times, `),pl=a("em"),Kb=r("e.g."),Qb=r(" to generate text, like "),ul=a("code"),Vb=r("autoregressive_sample"),e_=r(", "),cl=a("code"),t_=r("generate"),o_=r("."),r_=h(),Ko=a("li"),n_=r("Try to separate the tokenization from the model\u2019s "),yl=a("em"),a_=r("forward"),s_=r(` pass. If the original repository shows examples where you have to input a string, then try to find out where in the forward call the string input is changed to input ids and start from this point. This might mean that you have to possibly write a small script yourself or change the original code so that you can directly input the ids instead of an input string.`),i_=h(),me=a("li"),l_=r("Make sure that the model in your debugging setup is "),gl=a("strong"),d_=r("not"),h_=r(` in training mode, which often causes the model to yield random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging environment is `),wl=a("strong"),f_=r("deterministic"),m_=r(" so that the dropout layers are not used. Or use "),vl=a("em"),p_=r("transformers.file_utils.set_seed"),u_=r(` if the old and new implementations are in the same framework.`),Sf=h(),kt=a("p"),c_=r("The following section gives you more specific details/tips on how you can do this for "),bl=a("em"),y_=r("brand_new_bert"),g_=r("."),Rf=h(),Xe=a("h3"),Tt=a("a"),_l=a("span"),p(Qo.$$.fragment),w_=h(),El=a("span"),v_=r("5.-14. Port BrandNewBert to \u{1F917} Transformers"),Df=h(),Mn=a("p"),b_=r("Next, you can finally start adding new code to \u{1F917} Transformers. Go into the clone of your \u{1F917} Transformers\u2019 fork:"),zf=h(),p(Vo.$$.fragment),Ff=h(),$t=a("p"),__=r(`In the special case that you are adding a model whose architecture exactly matches the model architecture of an existing model you only have to add a conversion script as described in `),Bn=a("a"),E_=r("this section"),k_=r(`. In this case, you can just re-use the whole model architecture of the already existing model.`),Hf=h(),Ln=a("p"),T_=r("Otherwise, let\u2019s start generating a new model with the amazing Cookiecutter!"),Wf=h(),jn=a("p"),kl=a("strong"),$_=r("Use the Cookiecutter to automatically generate the model\u2019s code"),Gf=h(),q=a("p"),P_=r("To begin with head over to the "),er=a("a"),I_=r("\u{1F917} Transformers templates"),A_=r(` to make use of our `),Tl=a("code"),N_=r("cookiecutter"),M_=r(` implementation to automatically generate all the relevant files for your model. Again, we recommend only adding the PyTorch version of the model at first. Make sure you follow the instructions of the `),$l=a("code"),B_=r("README.md"),L_=r(` on the `),tr=a("a"),j_=r("\u{1F917} Transformers templates"),O_=r(` carefully.`),Uf=h(),On=a("p"),Pl=a("strong"),x_=r("Open a Pull Request on the main huggingface/transformers repo"),Yf=h(),we=a("p"),C_=r(`Before starting to adapt the automatically generated code, now is the time to open a \u201CWork in progress (WIP)\u201D pull request, `),Il=a("em"),q_=r("e.g."),S_=r(" \u201C[WIP] Add "),Al=a("em"),R_=r("brand_new_bert"),D_=r(`\u201D, in \u{1F917} Transformers so that you and the Hugging Face team can work side-by-side on integrating the model into \u{1F917} Transformers.`),Jf=h(),xn=a("p"),z_=r("You should do the following:"),Xf=h(),Cn=a("ol"),Nl=a("li"),F_=r("Create a branch with a descriptive name from your master branch"),Zf=h(),p(or.$$.fragment),Kf=h(),rr=a("ol"),Ml=a("li"),H_=r("Commit the automatically generated code:"),Qf=h(),p(nr.$$.fragment),Vf=h(),ar=a("ol"),Bl=a("li"),W_=r("Fetch and rebase to current master"),em=h(),p(sr.$$.fragment),tm=h(),ir=a("ol"),Ll=a("li"),G_=r("Push the changes to your account using:"),om=h(),p(lr.$$.fragment),rm=h(),Ze=a("ol"),jl=a("li"),Ol=a("p"),U_=r(`Once you are satisfied, go to the webpage of your fork on GitHub. Click on \u201CPull request\u201D. Make sure to add the GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for future changes.`),Y_=h(),xl=a("li"),Cl=a("p"),J_=r("Change the PR into a draft by clicking on \u201CConvert to draft\u201D on the right of the GitHub pull request web page."),nm=h(),qn=a("p"),X_=r(`In the following, whenever you have done some progress, don\u2019t forget to commit your work and push it to your account so that it shows in the pull request. Additionally, you should make sure to update your work with the current master from time to time by doing:`),am=h(),p(dr.$$.fragment),sm=h(),Sn=a("p"),Z_=r(`In general, all questions you might have regarding the model or your implementation should be asked in your PR and discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or if you have a question. It is often very helpful to point the Hugging Face team to your added code so that the Hugging Face team can efficiently understand your problem or question.`),im=h(),Rn=a("p"),K_=r(`To do so, you can go to the \u201CFiles changed\u201D tab where you see all of your changes, go to a line regarding which you want to ask a question, and click on the \u201C+\u201D symbol to add a comment. Whenever a question or problem has been solved, you can click on the \u201CResolve\u201D button of the created comment.`),lm=h(),Dn=a("p"),Q_=r(`In the same way, the Hugging Face team will open comments when reviewing your code. We recommend asking most questions on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping the Hugging Face team by Slack or email.`),dm=h(),zn=a("p"),ql=a("strong"),V_=r("5. Adapt the generated models code for brand_new_bert"),hm=h(),ve=a("p"),e1=r(`At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be found in the generated files `),Sl=a("code"),t1=r("src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),o1=r(` and `),Rl=a("code"),r1=r("src/transformers/models/brand_new_bert/configuration_brand_new_bert.py"),n1=r("."),fm=h(),Y=a("p"),a1=r(`Now you can finally start coding :). The generated code in `),Dl=a("code"),s1=r("src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),i1=r(` will either have the same architecture as BERT if it\u2019s an encoder-only model or BART if it\u2019s an encoder-decoder model. At this point, you should remind yourself what you\u2019ve learned in the beginning about the theoretical aspects of the model: `),zl=a("em"),l1=r(`How is the model different from BERT or BART?`),d1=r("\u201D. Implement those changes which often means to change the "),Fl=a("em"),h1=r("self-attention"),f1=r(` layer, the order of the normalization layer, etc\u2026 Again, it is often useful to look at the similar architecture of already existing models in Transformers to get a better feeling of how your model should be implemented.`),mm=h(),j=a("p"),Hl=a("strong"),m1=r("Note"),p1=r(` that at this point, you don\u2019t have to be very sure that your code is fully correct or clean. Rather, it is advised to add a first `),Wl=a("em"),u1=r("unclean"),c1=r(`, copy-pasted version of the original code to `),Gl=a("code"),y1=r("src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),g1=r(` until you feel like all the necessary code is added. From our experience, it is much more efficient to quickly add a first version of the required code and improve/correct the code iteratively with the conversion script as described in the next section. The only thing that has to work at this point is that you can instantiate the \u{1F917} Transformers implementation of `),Ul=a("em"),w1=r("brand_new_bert"),v1=r(", "),Yl=a("em"),b1=r("i.e."),_1=r(` the following command should work:`),pm=h(),p(hr.$$.fragment),um=h(),be=a("p"),E1=r("The above command will create a model according to the default parameters as defined in "),Jl=a("code"),k1=r("BrandNewBertConfig()"),T1=r(` with random weights, thus making sure that the `),Xl=a("code"),$1=r("init()"),P1=r(" methods of all components works."),cm=h(),Fn=a("p"),Zl=a("strong"),I1=r("6. Write a conversion script"),ym=h(),J=a("p"),A1=r("Next, you should write a conversion script that lets you convert the checkpoint you used to debug "),Kl=a("em"),N1=r("brand_new_bert"),M1=r(` in the original repository to a checkpoint compatible with your just created \u{1F917} Transformers implementation of `),Ql=a("em"),B1=r("brand_new_bert"),L1=r(`. It is not advised to write the conversion script from scratch, but rather to look through already existing conversion scripts in \u{1F917} Transformers for one that has been used to convert a similar model that was written in the same framework as `),Vl=a("em"),j1=r("brand_new_bert"),O1=r(`. Usually, it is enough to copy an already existing conversion script and slightly adapt it for your use case. Don\u2019t hesitate to ask the Hugging Face team to point you to a similar already existing conversion script for your model.`),gm=h(),Pt=a("ul"),Hn=a("li"),x1=r("If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT\u2019s conversion script "),fr=a("a"),C1=r("here"),q1=h(),Wn=a("li"),S1=r("If you are porting a model from PyTorch to PyTorch, a good starting point might be BART\u2019s conversion script "),mr=a("a"),R1=r("here"),wm=h(),It=a("p"),D1=r(`In the following, we\u2019ll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the name of a layer is defined by the name of the class attribute you give the layer. Let\u2019s define a dummy model in PyTorch, called `),ed=a("code"),z1=r("SimpleModel"),F1=r(" as follows:"),vm=h(),p(pr.$$.fragment),bm=h(),X=a("p"),H1=r("Now we can create an instance of this model definition which will fill all weights: "),td=a("code"),W1=r("dense"),G1=r(", "),od=a("code"),U1=r("intermediate"),Y1=r(`, `),rd=a("code"),J1=r("layer_norm"),X1=r(" with random weights. We can print the model to see its architecture"),_m=h(),p(ur.$$.fragment),Em=h(),Gn=a("p"),Z1=r("This will print out the following:"),km=h(),p(cr.$$.fragment),Tm=h(),Un=a("p"),K1=r(`We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight values of a specific layer:`),$m=h(),p(yr.$$.fragment),Pm=h(),Yn=a("p"),Q1=r("to see that the weights were randomly initialized"),Im=h(),p(gr.$$.fragment),Am=h(),wr=a("p"),V1=r(`In the conversion script, you should fill those randomly initialized weights with the exact weights of the corresponding layer in the checkpoint. `),nd=a("em"),e0=r("E.g."),Nm=h(),p(vr.$$.fragment),Mm=h(),_e=a("p"),t0=r(`While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding pretrained checkpoint weight exactly match in both `),ad=a("strong"),o0=r("shape and name"),r0=r(". To do so, it is "),sd=a("strong"),n0=r("necessary"),a0=r(` to add assert statements for the shape and print out the names of the checkpoints weights. E.g. you should add statements like:`),Bm=h(),p(br.$$.fragment),Lm=h(),_r=a("p"),s0=r("Besides, you should also print out the names of both weights to make sure they match, "),id=a("em"),i0=r("e.g."),jm=h(),p(Er.$$.fragment),Om=h(),Jn=a("p"),l0=r(`If either the shape or the name doesn\u2019t match, you probably assigned the wrong checkpoint weight to a randomly initialized layer of the \u{1F917} Transformers implementation.`),xm=h(),At=a("p"),d0=r("An incorrect shape is most likely due to an incorrect setting of the config parameters in "),ld=a("code"),h0=r("BrandNewBertConfig()"),f0=r(` that do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that PyTorch\u2019s implementation of a layer requires the weight to be transposed beforehand.`),Cm=h(),Z=a("p"),m0=r("Finally, you should also check that "),dd=a("strong"),p0=r("all"),u0=r(` required weights are initialized and print out all checkpoint weights that were not used for initialization to make sure the model is correctly converted. It is completely normal, that the conversion trials fail with either a wrong shape statement or wrong name assignment. This is most likely because either you used incorrect parameters in `),hd=a("code"),c0=r("BrandNewBertConfig()"),y0=r(`, have a wrong architecture in the \u{1F917} Transformers implementation, you have a bug in the `),fd=a("code"),g0=r("init()"),w0=r(` functions of one of the components of the \u{1F917} Transformers implementation or you need to transpose one of the checkpoint weights.`),qm=h(),K=a("p"),v0=r(`This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the Transformers model. Having correctly loaded the checkpoint into the \u{1F917} Transformers implementation, you can then save the model under a folder of your choice `),md=a("code"),b0=r("/path/to/converted/checkpoint/folder"),_0=r(` that should then contain both a `),pd=a("code"),E0=r("pytorch_model.bin"),k0=r(" file and a "),ud=a("code"),T0=r("config.json"),$0=r(" file:"),Sm=h(),p(kr.$$.fragment),Rm=h(),Xn=a("p"),cd=a("strong"),P0=r("7. Implement the forward pass"),Dm=h(),Nt=a("p"),I0=r(`Having managed to correctly load the pretrained weights into the \u{1F917} Transformers implementation, you should now make sure that the forward pass is correctly implemented. In `),Zn=a("a"),A0=r("Get familiar with the original repository"),N0=r(`, you have already created a script that runs a forward pass of the model using the original repository. Now you should write an analogous script using the \u{1F917} Transformers implementation instead of the original one. It should look as follows:`),zm=h(),p(Tr.$$.fragment),Fm=h(),S=a("p"),M0=r(`It is very likely that the \u{1F917} Transformers implementation and the original model implementation don\u2019t give the exact same output the very first time or that the forward pass throws an error. Don\u2019t be disappointed - it\u2019s expected! First, you should make sure that the forward pass doesn\u2019t throw any errors. It often happens that the wrong dimensions are used leading to a `),yd=a("em"),B0=r("Dimensionality mismatch"),L0=r(" error or that the wrong data type object is used, "),gd=a("em"),j0=r("e.g."),O0=h(),wd=a("code"),x0=r("torch.long"),C0=r(` instead of `),vd=a("code"),q0=r("torch.float32"),S0=r(`. Don\u2019t hesitate to ask the Hugging Face team for help, if you don\u2019t manage to solve certain errors.`),Hm=h(),Q=a("p"),R0=r(`The final part to make sure the \u{1F917} Transformers implementation works correctly is to ensure that the outputs are equivalent to a precision of `),bd=a("code"),D0=r("1e-3"),z0=r(". First, you should ensure that the output shapes are identical, "),_d=a("em"),F0=r("i.e."),H0=h(),Ed=a("code"),W0=r("outputs.shape"),G0=r(` should yield the same value for the script of the \u{1F917} Transformers implementation and the original implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult parts of adding a new model. Common mistakes why the outputs are not identical are:`),Wm=h(),V=a("ul"),Ke=a("li"),U0=r("Some layers were not added, "),kd=a("em"),Y0=r("i.e."),J0=r(" an "),Td=a("em"),X0=r("activation"),Z0=r(" layer was not added, or the residual connection was forgotten"),K0=h(),$d=a("li"),Q0=r("The word embedding matrix was not tied"),V0=h(),Pd=a("li"),e2=r("The wrong positional embeddings are used because the original implementation uses on offset"),t2=h(),ee=a("li"),o2=r("Dropout is applied during the forward pass. To fix this make sure "),Id=a("em"),r2=r("model.training is False"),n2=r(` and that no dropout layer is falsely activated during the forward pass, `),Ad=a("em"),a2=r("i.e."),s2=r(" pass "),Nd=a("em"),i2=r("self.training"),l2=r(" to "),$r=a("a"),d2=r("PyTorch\u2019s functional dropout"),Gm=h(),Ee=a("p"),h2=r(`The best way to fix the problem is usually to look at the forward pass of the original implementation and the \u{1F917} Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out intermediate outputs of both implementations of the forward pass to find the exact position in the network where the \u{1F917} Transformers implementation shows a different output than the original implementation. First, make sure that the hard-coded `),Md=a("code"),f2=r("input_ids"),m2=r(` in both scripts are identical. Next, verify that the outputs of the first transformation of the `),Bd=a("code"),p2=r("input_ids"),u2=r(` (usually the word embeddings) are identical. And then work your way up to the very last layer of the network. At some point, you will notice a difference between the two implementations, which should point you to the bug in the \u{1F917} Transformers implementation. From our experience, a simple and efficient way is to add many print statements in both the original implementation and \u{1F917} Transformers implementation, at the same positions in the network respectively, and to successively remove print statements showing the same values for intermediate presentations.`),Um=h(),Mt=a("p"),c2=r(`When you\u2019re confident that both implementations yield the same output, verifying the outputs with `),Ld=a("code"),y2=r("torch.allclose(original_output, output, atol=1e-3)"),g2=r(`, you\u2019re done with the most difficult part! Congratulations - the work left to be done should be a cakewalk \u{1F60A}.`),Ym=h(),Kn=a("p"),jd=a("strong"),w2=r("8. Adding all necessary model tests"),Jm=h(),Bt=a("p"),v2=r(`At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with \u{1F917} Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under the same `),Od=a("code"),b2=r("tests/test_modeling_brand_new_bert.py"),_2=r(". Run this test file to verify that all common tests pass:"),Xm=h(),p(Pr.$$.fragment),Zm=h(),Qn=a("p"),E2=r("Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that"),Km=h(),Lt=a("ul"),Vn=a("li"),k2=r("a) The community can easily understand your work by looking at specific tests of "),xd=a("em"),T2=r("brand_new_bert"),$2=h(),Cd=a("li"),P2=r("b) Future changes to your model will not break any important feature of the model."),Qm=h(),jt=a("p"),I2=r(`At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts you used earlier to implement the model to \u{1F917} Transformers. A template of those model tests is already added by the Cookiecutter, called `),qd=a("code"),A2=r("BrandNewBertModelIntegrationTests"),N2=r(` and only has to be filled out by you. To ensure that those tests are passing, run`),Vm=h(),p(Ir.$$.fragment),ep=h(),p(Ot.$$.fragment),tp=h(),te=a("p"),M2=r("Second, all features that are special to "),Sd=a("em"),B2=r("brand_new_bert"),L2=r(` should be tested additionally in a separate test under `),Rd=a("code"),j2=r("BrandNewBertModelTester"),O2=r("/`"),Dd=a("code"),x2=r("BrandNewBertModelTest"),C2=r(`. This part is often forgotten but is extremely useful in two ways:`),op=h(),xt=a("ul"),Ar=a("li"),q2=r(`It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the special features of `),zd=a("em"),S2=r("brand_new_bert"),R2=r(" should work."),D2=h(),Fd=a("li"),z2=r("Future contributors can quickly test changes to the model by running those special tests."),rp=h(),ea=a("p"),Hd=a("strong"),F2=r("9. Implement the tokenizer"),np=h(),Ct=a("p"),H2=r("Next, we should add the tokenizer of "),Wd=a("em"),W2=r("brand_new_bert"),G2=r(`. Usually, the tokenizer is equivalent or very similar to an already existing tokenizer of \u{1F917} Transformers.`),ap=h(),ta=a("p"),U2=r(`It is very important to find/extract the original tokenizer file and to manage to load this file into the \u{1F917} Transformers\u2019 implementation of the tokenizer.`),sp=h(),oa=a("p"),Y2=r("To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository\nthat inputs a string and returns the `input_ids\u201C. It could look similar to this (in pseudo-code):"),ip=h(),p(Nr.$$.fragment),lp=h(),qt=a("p"),J2=r(`You might have to take a deeper look again into the original repository to find the correct tokenizer function or you might even have to do changes to your clone of the original repository to only output the `),Gd=a("code"),X2=r("input_ids"),Z2=r(`. Having written a functional tokenization script that uses the original repository, an analogous script for \u{1F917} Transformers should be created. It should look similar to this:`),dp=h(),p(Mr.$$.fragment),hp=h(),St=a("p"),K2=r("When both "),Ud=a("code"),Q2=r("input_ids"),V2=r(" yield the same values, as a final step a tokenizer test file should also be added."),fp=h(),ke=a("p"),eE=r("Analogous to the modeling test files of "),Yd=a("em"),tE=r("brand_new_bert"),oE=r(", the tokenization test files of "),Jd=a("em"),rE=r("brand_new_bert"),nE=r(` should contain a couple of hard-coded integration tests.`),mp=h(),ra=a("p"),Xd=a("strong"),aE=r("10. Run End-to-end integration tests"),pp=h(),oe=a("p"),sE=r(`Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the tokenizer to `),Zd=a("code"),iE=r("tests/test_modeling_brand_new_bert.py"),lE=r(` in \u{1F917} Transformers. Such a test should show on a meaningful text-to-text sample that the \u{1F917} Transformers implementation works as expected. A meaningful text-to-text sample can include `),Kd=a("em"),dE=r("e.g."),hE=r(` a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc\u2026 If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can happen that you forgot to add some `),Qd=a("code"),fE=r(".to(self.device)"),mE=r(` statements to internal tensors of the model, which in such a test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those tests for you.`),up=h(),na=a("p"),Vd=a("strong"),pE=r("11. Add Docstring"),cp=h(),re=a("p"),uE=r("Now, all the necessary functionality for "),eh=a("em"),cE=r("brand_new_bert"),yE=r(` is added - you\u2019re almost done! The only thing left to add is a nice docstring and a doc page. The Cookiecutter should have added a template file called `),th=a("code"),gE=r("docs/source/model_doc/brand_new_bert.rst"),wE=r(` that you should fill out. Users of your model will usually first look at this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for the community to add some `),oh=a("em"),vE=r("Tips"),bE=r(` to show how the model should be used. Don\u2019t hesitate to ping the Hugging Face team regarding the docstrings.`),yp=h(),Te=a("p"),_E=r("Next, make sure that the docstring added to "),rh=a("code"),EE=r("src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),kE=r(` is correct and included all necessary inputs and outputs. We have a detailed guide about writing documentation and our docstring format `),aa=a("a"),TE=r("here"),$E=r(`. It is always to good to remind oneself that documentation should be treated at least as carefully as the code in \u{1F917} Transformers since the documentation is usually the first contact point of the community with the model.`),gp=h(),sa=a("p"),nh=a("strong"),PE=r("Code refactor"),wp=h(),Rt=a("p"),IE=r("Great, now you have added all the necessary code for "),ah=a("em"),AE=r("brand_new_bert"),NE=r(`. At this point, you should correct some potential incorrect code style by running:`),vp=h(),p(Br.$$.fragment),bp=h(),ia=a("p"),ME=r("and verify that your coding style passes the quality check:"),_p=h(),p(Lr.$$.fragment),Ep=h(),la=a("p"),BE=r(`There are a couple of other very strict design tests in \u{1F917} Transformers that might still be failing, which shows up in the tests of your pull request. This is often because of some missing information in the docstring or some incorrect naming. The Hugging Face team will surely help you if you\u2019re stuck here.`),kp=h(),da=a("p"),LE=r(`Lastly, it is always a good idea to refactor one\u2019s code after having ensured that the code works correctly. With all tests passing, now it\u2019s a good time to go over the added code again and do some refactoring.`),Tp=h(),ha=a("p"),jE=r("You have now finished the coding part, congratulation! \u{1F389} You are Awesome! \u{1F60E}"),$p=h(),fa=a("p"),sh=a("strong"),OE=r("12. Upload the models to the model hub"),Pp=h(),R=a("p"),xE=r(`In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each uploaded model checkpoint. You can get familiar with the hub functionalities by reading our `),ma=a("a"),CE=r("Model sharing and uploading Page"),qE=r(`. You should work alongside the Hugging Face team here to decide on a fitting name for each checkpoint and to get the required access rights to be able to upload the model under the author\u2019s organization of `),ih=a("em"),SE=r("brand_new_bert"),RE=r(". The "),lh=a("code"),DE=r("push_to_hub"),zE=r(" method, present in all models in "),dh=a("code"),FE=r("transformers"),HE=r(", is a quick and efficient way to push your checkpoint to the hub. A little snippet is pasted below:"),Ip=h(),p(jr.$$.fragment),Ap=h(),Dt=a("p"),WE=r(`It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the specific characteristics of this particular checkpoint, `),hh=a("em"),GE=r("e.g."),UE=r(` On which dataset was the checkpoint pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to correctly use the model.`),Np=h(),pa=a("p"),fh=a("strong"),YE=r("13. (Optional) Add notebook"),Mp=h(),zt=a("p"),JE=r("It is very helpful to add a notebook that showcases in-detail how "),mh=a("em"),XE=r("brand_new_bert"),ZE=r(` can be used for inference and/or fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community.`),Bp=h(),ua=a("p"),ph=a("strong"),KE=r("14. Submit your finished PR"),Lp=h(),ca=a("p"),QE=r(`You\u2019re done programming now and can move to the last step, which is getting your PR merged into master. Usually, the Hugging Face team should have helped you already at this point, but it is worth taking some time to give your finished PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your reviewer.`),jp=h(),Qe=a("h3"),Ft=a("a"),uh=a("span"),p(Or.$$.fragment),VE=h(),ch=a("span"),e3=r("Share your work!!"),Op=h(),ya=a("p"),t3=r(`Now, it\u2019s time to get some credit from the community for your work! Having completed a model addition is a major contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share your achievement with the community.`),xp=h(),ga=a("p"),yh=a("strong"),o3=r("You have made another model that is super easy to access for everyone in the community! \u{1F92F}"),this.h()},l(e){const l=f9('[data-svelte="svelte-1phssyn"]',document.head);P=s(l,"META",{name:!0,content:!0}),l.forEach(o),ue=f(e),I=s(e,"H1",{class:!0});var xr=i(I);L=s(xr,"A",{id:!0,class:!0,href:!0});var f3=i(L);Oe=s(f3,"SPAN",{});var m3=i(Oe);u(D.$$.fragment,m3),m3.forEach(o),f3.forEach(o),ao=f(xr),le=s(xr,"SPAN",{});var p3=i(le);de=n(p3,"How to add a model to \u{1F917} Transformers?"),p3.forEach(o),xr.forEach(o),so=f(e),z=s(e,"P",{});var wa=i(z);xu=n(wa,`Adding a new model is often difficult and requires an in-depth knowledge of the \u{1F917} Transformers library and ideally also of the model\u2019s original repository. At Hugging Face, we are trying to empower the community more and more to add models independently. Thus, for some new models that the community wants to be added to \u{1F917} Transformers, we create a customized `),qa=s(wa,"EM",{});var u3=i(qa);Cu=n(u3,"call-for-model-addition"),u3.forEach(o),qu=n(wa,` that explains step-by-step how to add the requested model. With this `),Sa=s(wa,"EM",{});var c3=i(Sa);Su=n(c3,"call-for-model-addition"),c3.forEach(o),Ru=n(wa,`, we want to teach a motivated and experienced contributor of the community how to port a model to \u{1F917} Transformers.`),wa.forEach(o),wh=f(e),et=s(e,"P",{});var qp=i(et);Du=n(qp,`If this sounds like something you would be interested in, feel free to check out the currently open \u201Ccalls-for-model-addition\u201D `),io=s(qp,"A",{href:!0,rel:!0});var y3=i(io);zu=n(y3,"here"),y3.forEach(o),Fu=n(qp,` and to contact us.`),qp.forEach(o),vh=f(e),Cr=s(e,"P",{});var g3=i(Cr);Hu=n(g3,`If selected, you will then work closely with one member of the Hugging Face team to integrate the model into \u{1F917} Transformers. By doing so, you will both gain a theoretical and deep practical understanding of the proposed model. But more importantly, you will have made a major open-source contribution to \u{1F917} Transformers. Along the way, you will:`),g3.forEach(o),bh=f(e),W=s(e,"UL",{});var Ht=i(W);Ra=s(Ht,"LI",{});var w3=i(Ra);Wu=n(w3,"get insights into open-source best practices"),w3.forEach(o),Gu=f(Ht),Da=s(Ht,"LI",{});var v3=i(Da);Uu=n(v3,"understand the design principles of one of the most popular NLP libraries"),v3.forEach(o),Yu=f(Ht),za=s(Ht,"LI",{});var b3=i(za);Ju=n(b3,"learn how to do efficiently test large NLP models"),b3.forEach(o),Xu=f(Ht),he=s(Ht,"LI",{});var Wt=i(he);Zu=n(Wt,"learn how to integrate Python utilities like "),Fa=s(Wt,"CODE",{});var _3=i(Fa);Ku=n(_3,"black"),_3.forEach(o),Qu=n(Wt,", "),Ha=s(Wt,"CODE",{});var E3=i(Ha);Vu=n(E3,"isort"),E3.forEach(o),ec=n(Wt,", "),Wa=s(Wt,"CODE",{});var k3=i(Wa);tc=n(k3,"make fix-copies"),k3.forEach(o),oc=n(Wt,` into a library to always ensure clean and readable code`),Wt.forEach(o),Ht.forEach(o),_h=f(e),tt=s(e,"P",{});var Sp=i(tt);rc=n(Sp,`We are also more than happy if you want to add a model that cannot be found in the \u201Ccalls-for-model-addition\u201D folder. The following sections explain in detail how to add a new model. It might also be very helpful to check out already added models to see if those resemble the model you would like to add `),lo=s(Sp,"A",{href:!0,rel:!0});var T3=i(lo);nc=n(T3,"here"),T3.forEach(o),ac=n(Sp,"."),Sp.forEach(o),Eh=f(e),qr=s(e,"P",{});var $3=i(qr);sc=n($3,"To start, let\u2019s try to get a general overview of the Transformers library."),$3.forEach(o),kh=f(e),xe=s(e,"H2",{class:!0});var Rp=i(xe);ot=s(Rp,"A",{id:!0,class:!0,href:!0});var P3=i(ot);Ga=s(P3,"SPAN",{});var I3=i(Ga);u(ho.$$.fragment,I3),I3.forEach(o),P3.forEach(o),ic=f(Rp),Ua=s(Rp,"SPAN",{});var A3=i(Ua);lc=n(A3,"General overview of \u{1F917} Transformers"),A3.forEach(o),Rp.forEach(o),Th=f(e),Sr=s(e,"P",{});var N3=i(Sr);dc=n(N3,`First, you should get a general overview of \u{1F917} Transformers. \u{1F917} Transformers is a very opinionated library, so there is a chance that you don\u2019t agree with some of the library\u2019s philosophies or design choices. From our experience, however, we found that the fundamental design choices and philosophies of the library are crucial to efficiently scale \u{1F917} Transformers while keeping maintenance costs at a reasonable level.`),N3.forEach(o),$h=f(e),rt=s(e,"P",{});var Dp=i(rt);hc=n(Dp,"A good first starting point to better understand the library is to read the "),Rr=s(Dp,"A",{href:!0});var M3=i(Rr);fc=n(M3,"documentation of our philosophy"),M3.forEach(o),mc=n(Dp,". As a result of our way of working, there are some choices that we try to apply to all models:"),Dp.forEach(o),Ph=f(e),ce=s(e,"UL",{});var va=i(ce);Ya=s(va,"LI",{});var B3=i(Ya);pc=n(B3,"Composition is generally favored over-abstraction"),B3.forEach(o),uc=f(va),Ja=s(va,"LI",{});var L3=i(Ja);cc=n(L3,"Duplicating code is not always bad if it strongly improves the readability or accessibility of a model"),L3.forEach(o),yc=f(va),fo=s(va,"LI",{});var zp=i(fo);gc=n(zp,`Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only have to look into the respective `),Xa=s(zp,"CODE",{});var j3=i(Xa);wc=n(j3,"modeling_....py"),j3.forEach(o),vc=n(zp," file."),zp.forEach(o),va.forEach(o),Ih=f(e),nt=s(e,"P",{});var Fp=i(nt);bc=n(Fp,"In our opinion, the library\u2019s code is not just a means to provide a product, "),Za=s(Fp,"EM",{});var O3=i(Za);_c=n(O3,"e.g."),O3.forEach(o),Ec=n(Fp,` the ability to use BERT for inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the person that will use your model, but also everybody that will read, try to understand, and possibly tweak your code.`),Fp.forEach(o),Ah=f(e),Dr=s(e,"P",{});var x3=i(Dr);kc=n(x3,"With this in mind, let\u2019s go a bit deeper into the general library design."),x3.forEach(o),Nh=f(e),Ce=s(e,"H3",{class:!0});var Hp=i(Ce);at=s(Hp,"A",{id:!0,class:!0,href:!0});var C3=i(at);Ka=s(C3,"SPAN",{});var q3=i(Ka);u(mo.$$.fragment,q3),q3.forEach(o),C3.forEach(o),Tc=f(Hp),Qa=s(Hp,"SPAN",{});var S3=i(Qa);$c=n(S3,"Overview of models"),S3.forEach(o),Hp.forEach(o),Mh=f(e),G=s(e,"P",{});var Gt=i(G);Pc=n(Gt,`To successfully add a model, it is important to understand the interaction between your model and its config, `),zr=s(Gt,"A",{href:!0});var R3=i(zr);Ic=n(R3,"PreTrainedModel"),R3.forEach(o),Ac=n(Gt,", and "),Fr=s(Gt,"A",{href:!0});var D3=i(Fr);Nc=n(D3,"PretrainedConfig"),D3.forEach(o),Mc=n(Gt,`. For exemplary purposes, we will call the model to be added to \u{1F917} Transformers `),Va=s(Gt,"CODE",{});var z3=i(Va);Bc=n(z3,"BrandNewBert"),z3.forEach(o),Lc=n(Gt,"."),Gt.forEach(o),Bh=f(e),Hr=s(e,"P",{});var F3=i(Hr);jc=n(F3,"Let\u2019s take a look:"),F3.forEach(o),Lh=f(e),Wr=s(e,"IMG",{src:!0}),jh=f(e),v=s(e,"P",{});var _=i(v);Oc=n(_,`As you can see, we do make use of inheritance in \u{1F917} Transformers, but we keep the level of abstraction to an absolute minimum. There are never more than two levels of abstraction for any model in the library. `),es=s(_,"CODE",{});var H3=i(es);xc=n(H3,"BrandNewBertModel"),H3.forEach(o),Cc=n(_,` inherits from `),ts=s(_,"CODE",{});var W3=i(ts);qc=n(W3,"BrandNewBertPreTrainedModel"),W3.forEach(o),Sc=n(_," which in turn inherits from "),Gr=s(_,"A",{href:!0});var G3=i(Gr);Rc=n(G3,"PreTrainedModel"),G3.forEach(o),Dc=n(_,` and that\u2019s it. As a general rule, we want to make sure that a new model only depends on `),Ur=s(_,"A",{href:!0});var U3=i(Ur);zc=n(U3,"PreTrainedModel"),U3.forEach(o),Fc=n(_,`. The important functionalities that are automatically provided to every new model are `),Yr=s(_,"A",{href:!0});var Y3=i(Yr);Hc=n(Y3,"from_pretrained()"),Y3.forEach(o),Wc=n(_,` and `),Jr=s(_,"A",{href:!0});var J3=i(Jr);Gc=n(J3,"save_pretrained()"),J3.forEach(o),Uc=n(_,`, which are used for serialization and deserialization. All of the other important functionalities, such as `),os=s(_,"CODE",{});var X3=i(os);Yc=n(X3,"BrandNewBertModel.forward"),X3.forEach(o),Jc=n(_,` should be completely defined in the new `),rs=s(_,"CODE",{});var Z3=i(rs);Xc=n(Z3,"modeling_brand_new_bert.py"),Z3.forEach(o),Zc=n(_,` script. Next, we want to make sure that a model with a specific head layer, such as `),ns=s(_,"CODE",{});var K3=i(ns);Kc=n(K3,"BrandNewBertForMaskedLM"),K3.forEach(o),Qc=n(_," does not inherit from "),as=s(_,"CODE",{});var Q3=i(as);Vc=n(Q3,"BrandNewBertModel"),Q3.forEach(o),ey=n(_,", but rather uses "),ss=s(_,"CODE",{});var V3=i(ss);ty=n(V3,"BrandNewBertModel"),V3.forEach(o),oy=n(_,` as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a configuration class, called `),is=s(_,"CODE",{});var ek=i(is);ry=n(ek,"BrandNewBertConfig"),ek.forEach(o),ny=n(_,`. This configuration is always stored as an attribute in `),Xr=s(_,"A",{href:!0});var tk=i(Xr);ay=n(tk,"PreTrainedModel"),tk.forEach(o),sy=n(_,", and thus can be accessed via the "),ls=s(_,"CODE",{});var ok=i(ls);iy=n(ok,"config"),ok.forEach(o),ly=n(_,` attribute for all classes inheriting from `),ds=s(_,"CODE",{});var rk=i(ds);dy=n(rk,"BrandNewBertPreTrainedModel"),rk.forEach(o),hy=n(_,":"),_.forEach(o),Oh=f(e),u(po.$$.fragment,e),xh=f(e),A=s(e,"P",{});var ne=i(A);fy=n(ne,`Similar to the model, the configuration inherits basic serialization and deserialization functionalities from `),Zr=s(ne,"A",{href:!0});var nk=i(Zr);my=n(nk,"PretrainedConfig"),nk.forEach(o),py=n(ne,`. Note that the configuration and the model are always serialized into two different formats - the model to a `),hs=s(ne,"EM",{});var ak=i(hs);uy=n(ak,"pytorch_model.bin"),ak.forEach(o),cy=n(ne," file and the configuration to a "),fs=s(ne,"EM",{});var sk=i(fs);yy=n(sk,"config.json"),sk.forEach(o),gy=n(ne,` file. Calling `),Kr=s(ne,"A",{href:!0});var ik=i(Kr);wy=n(ik,"save_pretrained()"),ik.forEach(o),vy=n(ne,` will automatically call `),Qr=s(ne,"A",{href:!0});var lk=i(Qr);by=n(lk,"save_pretrained()"),lk.forEach(o),_y=n(ne,", so that both model and configuration are saved."),ne.forEach(o),Ch=f(e),qe=s(e,"H3",{class:!0});var Wp=i(qe);st=s(Wp,"A",{id:!0,class:!0,href:!0});var dk=i(st);ms=s(dk,"SPAN",{});var hk=i(ms);u(uo.$$.fragment,hk),hk.forEach(o),dk.forEach(o),Ey=f(Wp),ps=s(Wp,"SPAN",{});var fk=i(ps);ky=n(fk,"Overview of tokenizers"),fk.forEach(o),Wp.forEach(o),qh=f(e),Vr=s(e,"P",{});var mk=i(Vr);Ty=n(mk,"Not quite ready yet :-( This section will be added soon!"),mk.forEach(o),Sh=f(e),Se=s(e,"H2",{class:!0});var Gp=i(Se);it=s(Gp,"A",{id:!0,class:!0,href:!0});var pk=i(it);us=s(pk,"SPAN",{});var uk=i(us);u(co.$$.fragment,uk),uk.forEach(o),pk.forEach(o),$y=f(Gp),cs=s(Gp,"SPAN",{});var ck=i(cs);Py=n(ck,"Step-by-step recipe to add a model to \u{1F917} Transformers"),ck.forEach(o),Gp.forEach(o),Rh=f(e),en=s(e,"P",{});var yk=i(en);Iy=n(yk,`Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model:`),yk.forEach(o),Dh=f(e),lt=s(e,"OL",{});var Up=i(lt);yo=s(Up,"LI",{});var Yp=i(yo);go=s(Yp,"A",{href:!0,rel:!0});var gk=i(go);Ay=n(gk,"Porting GPT2 Model"),gk.forEach(o),Ny=n(Yp," by "),wo=s(Yp,"A",{href:!0,rel:!0});var wk=i(wo);My=n(wk,"Thomas"),wk.forEach(o),Yp.forEach(o),By=f(Up),vo=s(Up,"LI",{});var Jp=i(vo);bo=s(Jp,"A",{href:!0,rel:!0});var vk=i(bo);Ly=n(vk,"Porting WMT19 MT Model"),vk.forEach(o),jy=n(Jp," by "),_o=s(Jp,"A",{href:!0,rel:!0});var bk=i(_o);Oy=n(bk,"Stas"),bk.forEach(o),Jp.forEach(o),Up.forEach(o),zh=f(e),tn=s(e,"P",{});var _k=i(tn);xy=n(_k,"From experience, we can tell you that the most important things to keep in mind when adding a model are:"),_k.forEach(o),Fh=f(e),ye=s(e,"UL",{});var ba=i(ye);fe=s(ba,"LI",{});var Ut=i(fe);Cy=n(Ut,`Don\u2019t reinvent the wheel! Most parts of the code you will add for the new \u{1F917} Transformers model already exist somewhere in \u{1F917} Transformers. Take some time to find similar, already existing models and tokenizers you can copy from. `),Eo=s(Ut,"A",{href:!0,rel:!0});var Ek=i(Eo);qy=n(Ek,"grep"),Ek.forEach(o),Sy=n(Ut," and "),ko=s(Ut,"A",{href:!0,rel:!0});var kk=i(ko);Ry=n(kk,"rg"),kk.forEach(o),Dy=n(Ut,` are your friends. Note that it might very well happen that your model\u2019s tokenizer is based on one model implementation, and your model\u2019s modeling code on another one. `),ys=s(Ut,"EM",{});var Tk=i(ys);zy=n(Tk,"E.g."),Tk.forEach(o),Fy=n(Ut,` FSMT\u2019s modeling code is based on BART, while FSMT\u2019s tokenizer code is based on XLM.`),Ut.forEach(o),Hy=f(ba),gs=s(ba,"LI",{});var $k=i(gs);Wy=n($k,`It\u2019s more of an engineering challenge than a scientific challenge. You should spend more time on creating an efficient debugging environment than trying to understand all theoretical aspects of the model in the paper.`),$k.forEach(o),Gy=f(ba),ws=s(ba,"LI",{});var Pk=i(ws);Uy=n(Pk,`Ask for help, when you\u2019re stuck! Models are the core component of \u{1F917} Transformers so that we at Hugging Face are more than happy to help you at every step to add your model. Don\u2019t hesitate to ask if you notice you are not making progress.`),Pk.forEach(o),ba.forEach(o),Hh=f(e),on=s(e,"P",{});var Ik=i(on);Yy=n(Ik,"In the following, we try to give you a general recipe that we found most useful when porting a model to \u{1F917} Transformers."),Ik.forEach(o),Wh=f(e),rn=s(e,"P",{});var Ak=i(rn);Jy=n(Ak,`The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do List:`),Ak.forEach(o),Gh=f(e),b=s(e,"UL",{});var k=i(b);vs=s(k,"LI",{});var Nk=i(vs);bs=s(Nk,"OL",{});var Mk=i(bs);_s=s(Mk,"LI",{});var Bk=i(_s);Xy=n(Bk,"\u2610 (Optional) Understood theoretical aspects"),Bk.forEach(o),Mk.forEach(o),Nk.forEach(o),Zy=f(k),Es=s(k,"LI",{});var Lk=i(Es);nn=s(Lk,"OL",{start:!0});var jk=i(nn);ks=s(jk,"LI",{});var Ok=i(ks);Ky=n(Ok,"\u2610 Prepared transformers dev environment"),Ok.forEach(o),jk.forEach(o),Lk.forEach(o),Qy=f(k),Ts=s(k,"LI",{});var xk=i(Ts);an=s(xk,"OL",{start:!0});var Ck=i(an);$s=s(Ck,"LI",{});var qk=i($s);Vy=n(qk,"\u2610 Set up debugging environment of the original repository"),qk.forEach(o),Ck.forEach(o),xk.forEach(o),eg=f(k),Ps=s(k,"LI",{});var Sk=i(Ps);sn=s(Sk,"OL",{start:!0});var Rk=i(sn);Is=s(Rk,"LI",{});var Dk=i(Is);tg=n(Dk,"\u2610 Created script that successfully runs forward pass using original repository and checkpoint"),Dk.forEach(o),Rk.forEach(o),Sk.forEach(o),og=f(k),As=s(k,"LI",{});var zk=i(As);ln=s(zk,"OL",{start:!0});var Fk=i(ln);Ns=s(Fk,"LI",{});var Hk=i(Ns);rg=n(Hk,"\u2610 Successfully added the model skeleton to Transformers"),Hk.forEach(o),Fk.forEach(o),zk.forEach(o),ng=f(k),Ms=s(k,"LI",{});var Wk=i(Ms);dn=s(Wk,"OL",{start:!0});var Gk=i(dn);Bs=s(Gk,"LI",{});var Uk=i(Bs);ag=n(Uk,"\u2610 Successfully converted original checkpoint to Transformers checkpoint"),Uk.forEach(o),Gk.forEach(o),Wk.forEach(o),sg=f(k),Ls=s(k,"LI",{});var Yk=i(Ls);hn=s(Yk,"OL",{start:!0});var Jk=i(hn);js=s(Jk,"LI",{});var Xk=i(js);ig=n(Xk,"\u2610 Successfully ran forward pass in Transformers that gives identical output to original checkpoint"),Xk.forEach(o),Jk.forEach(o),Yk.forEach(o),lg=f(k),Os=s(k,"LI",{});var Zk=i(Os);fn=s(Zk,"OL",{start:!0});var Kk=i(fn);xs=s(Kk,"LI",{});var Qk=i(xs);dg=n(Qk,"\u2610 Finished model tests in Transformers"),Qk.forEach(o),Kk.forEach(o),Zk.forEach(o),hg=f(k),Cs=s(k,"LI",{});var Vk=i(Cs);mn=s(Vk,"OL",{start:!0});var e4=i(mn);qs=s(e4,"LI",{});var t4=i(qs);fg=n(t4,"\u2610 Successfully added Tokenizer in Transformers"),t4.forEach(o),e4.forEach(o),Vk.forEach(o),mg=f(k),Ss=s(k,"LI",{});var o4=i(Ss);pn=s(o4,"OL",{start:!0});var r4=i(pn);Rs=s(r4,"LI",{});var n4=i(Rs);pg=n(n4,"\u2610 Run end-to-end integration tests"),n4.forEach(o),r4.forEach(o),o4.forEach(o),ug=f(k),Ds=s(k,"LI",{});var a4=i(Ds);un=s(a4,"OL",{start:!0});var s4=i(un);zs=s(s4,"LI",{});var i4=i(zs);cg=n(i4,"\u2610 Finished docs"),i4.forEach(o),s4.forEach(o),a4.forEach(o),yg=f(k),Fs=s(k,"LI",{});var l4=i(Fs);cn=s(l4,"OL",{start:!0});var d4=i(cn);Hs=s(d4,"LI",{});var h4=i(Hs);gg=n(h4,"\u2610 Uploaded model weights to the hub"),h4.forEach(o),d4.forEach(o),l4.forEach(o),wg=f(k),Ws=s(k,"LI",{});var f4=i(Ws);yn=s(f4,"OL",{start:!0});var m4=i(yn);Gs=s(m4,"LI",{});var p4=i(Gs);vg=n(p4,"\u2610 Submitted the pull request"),p4.forEach(o),m4.forEach(o),f4.forEach(o),bg=f(k),Us=s(k,"LI",{});var u4=i(Us);gn=s(u4,"OL",{start:!0});var c4=i(gn);Ys=s(c4,"LI",{});var y4=i(Ys);_g=n(y4,"\u2610 (Optional) Added a demo notebook"),y4.forEach(o),c4.forEach(o),u4.forEach(o),k.forEach(o),Uh=f(e),O=s(e,"P",{});var $e=i(O);Eg=n($e,"To begin with, we usually recommend to start by getting a good theoretical understanding of "),Js=s($e,"CODE",{});var g4=i(Js);kg=n(g4,"BrandNewBert"),g4.forEach(o),Tg=n($e,`. However, if you prefer to understand the theoretical aspects of the model `),Xs=s($e,"EM",{});var w4=i(Xs);$g=n(w4,"on-the-job"),w4.forEach(o),Pg=n($e,`, then it is totally fine to directly dive into the `),Zs=s($e,"CODE",{});var v4=i(Zs);Ig=n(v4,"BrandNewBert"),v4.forEach(o),Ag=n($e,`\u2019s code-base. This option might suit you better, if your engineering skills are better than your theoretical skill, if you have trouble understanding `),Ks=s($e,"CODE",{});var b4=i(Ks);Ng=n(b4,"BrandNewBert"),b4.forEach(o),Mg=n($e,`\u2019s paper, or if you just enjoy programming much more than reading scientific papers.`),$e.forEach(o),Yh=f(e),Re=s(e,"H3",{class:!0});var Xp=i(Re);dt=s(Xp,"A",{id:!0,class:!0,href:!0});var _4=i(dt);Qs=s(_4,"SPAN",{});var E4=i(Qs);u(To.$$.fragment,E4),E4.forEach(o),_4.forEach(o),Bg=f(Xp),Vs=s(Xp,"SPAN",{});var k4=i(Vs);Lg=n(k4,"1. (Optional) Theoretical aspects of BrandNewBert"),k4.forEach(o),Xp.forEach(o),Jh=f(e),ht=s(e,"P",{});var Zp=i(ht);jg=n(Zp,"You should take some time to read "),ei=s(Zp,"EM",{});var T4=i(ei);Og=n(T4,"BrandNewBert\u2019s"),T4.forEach(o),xg=n(Zp,` paper, if such descriptive work exists. There might be large sections of the paper that are difficult to understand. If this is the case, this is fine - don\u2019t worry! The goal is not to get a deep theoretical understanding of the paper, but to extract the necessary information required to effectively re-implement the model in \u{1F917} Transformers. That being said, you don\u2019t have to spend too much time on the theoretical aspects, but rather focus on the practical ones, namely:`),Zp.forEach(o),Xh=f(e),x=s(e,"UL",{});var Pe=i(x);De=s(Pe,"LI",{});var _a=i(De);Cg=n(_a,"What type of model is "),ti=s(_a,"EM",{});var $4=i(ti);qg=n($4,"brand_new_bert"),$4.forEach(o),Sg=n(_a,`? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like encoder-decoder model? Look at the `),wn=s(_a,"A",{href:!0});var P4=i(wn);Rg=n(P4,"model_summary"),P4.forEach(o),Dg=n(_a," if you\u2019re not familiar with the differences between those."),_a.forEach(o),zg=f(Pe),ze=s(Pe,"LI",{});var Ea=i(ze);Fg=n(Ea,"What are the applications of "),oi=s(Ea,"EM",{});var I4=i(oi);Hg=n(I4,"brand_new_bert"),I4.forEach(o),Wg=n(Ea,"? Text classification? Text generation? Seq2Seq tasks, "),ri=s(Ea,"EM",{});var A4=i(ri);Gg=n(A4,"e.g.,"),A4.forEach(o),Ug=n(Ea,` summarization?`),Ea.forEach(o),Yg=f(Pe),ni=s(Pe,"LI",{});var N4=i(ni);Jg=n(N4,"What is the novel feature of the model making it different from BERT/GPT-2/BART?"),N4.forEach(o),Xg=f(Pe),Fe=s(Pe,"LI",{});var ka=i(Fe);Zg=n(ka,"Which of the already existing "),$o=s(ka,"A",{href:!0,rel:!0});var M4=i($o);Kg=n(M4,"\u{1F917} Transformers models"),M4.forEach(o),Qg=n(ka,` is most similar to `),ai=s(ka,"EM",{});var B4=i(ai);Vg=n(B4,"brand_new_bert"),B4.forEach(o),ew=n(ka,"?"),ka.forEach(o),tw=f(Pe),si=s(Pe,"LI",{});var L4=i(si);ow=n(L4,`What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used for BERT or BART?`),L4.forEach(o),Pe.forEach(o),Zh=f(e),vn=s(e,"P",{});var j4=i(vn);rw=n(j4,`After you feel like you have gotten a good overview of the architecture of the model, you might want to write to the Hugging Face team with any questions you might have. This might include questions regarding the model\u2019s architecture, its attention layer, etc. We will be more than happy to help you.`),j4.forEach(o),Kh=f(e),He=s(e,"H3",{class:!0});var Kp=i(He);ft=s(Kp,"A",{id:!0,class:!0,href:!0});var O4=i(ft);ii=s(O4,"SPAN",{});var x4=i(ii);u(Po.$$.fragment,x4),x4.forEach(o),O4.forEach(o),nw=f(Kp),li=s(Kp,"SPAN",{});var C4=i(li);aw=n(C4,"2. Next prepare your environment"),C4.forEach(o),Kp.forEach(o),Qh=f(e),mt=s(e,"OL",{});var Qp=i(mt);di=s(Qp,"LI",{});var q4=i(di);Io=s(q4,"P",{});var Vp=i(Io);sw=n(Vp,"Fork the "),Ao=s(Vp,"A",{href:!0,rel:!0});var S4=i(Ao);iw=n(S4,"repository"),S4.forEach(o),lw=n(Vp,` by clicking on the \u2018Fork\u2019 button on the repository\u2019s page. This creates a copy of the code under your GitHub user account.`),Vp.forEach(o),q4.forEach(o),dw=f(Qp),hi=s(Qp,"LI",{});var R4=i(hi);No=s(R4,"P",{});var eu=i(No);hw=n(eu,"Clone your "),fi=s(eu,"CODE",{});var D4=i(fi);fw=n(D4,"transformers"),D4.forEach(o),mw=n(eu," fork to your local disk, and add the base repository as a remote:"),eu.forEach(o),R4.forEach(o),Qp.forEach(o),Vh=f(e),u(Mo.$$.fragment,e),ef=f(e),Bo=s(e,"OL",{start:!0});var z4=i(Bo);mi=s(z4,"LI",{});var F4=i(mi);pw=n(F4,"Set up a development environment, for instance by running the following command:"),F4.forEach(o),z4.forEach(o),tf=f(e),u(Lo.$$.fragment,e),of=f(e),bn=s(e,"P",{});var H4=i(bn);uw=n(H4,"and return to the parent directory"),H4.forEach(o),rf=f(e),u(jo.$$.fragment,e),nf=f(e),Oo=s(e,"OL",{start:!0});var W4=i(Oo);We=s(W4,"LI",{});var Ta=i(We);cw=n(Ta,"We recommend adding the PyTorch version of "),pi=s(Ta,"EM",{});var G4=i(pi);yw=n(G4,"brand_new_bert"),G4.forEach(o),gw=n(Ta,` to Transformers. To install PyTorch, please follow the instructions on `),xo=s(Ta,"A",{href:!0,rel:!0});var U4=i(xo);ww=n(U4,"https://pytorch.org/get-started/locally/"),U4.forEach(o),vw=n(Ta,"."),Ta.forEach(o),W4.forEach(o),af=f(e),Co=s(e,"P",{});var r3=i(Co);ui=s(r3,"STRONG",{});var Y4=i(ui);bw=n(Y4,"Note:"),Y4.forEach(o),_w=n(r3," You don\u2019t need to have CUDA installed. Making the new model work on CPU is sufficient."),r3.forEach(o),sf=f(e),qo=s(e,"OL",{start:!0});var J4=i(qo);So=s(J4,"LI",{});var tu=i(So);Ew=n(tu,"To port "),ci=s(tu,"EM",{});var X4=i(ci);kw=n(X4,"brand_new_bert"),X4.forEach(o),Tw=n(tu,", you will also need access to its original repository:"),tu.forEach(o),J4.forEach(o),lf=f(e),u(Ro.$$.fragment,e),df=f(e),pt=s(e,"P",{});var ou=i(pt);$w=n(ou,"Now you have set up a development environment to port "),yi=s(ou,"EM",{});var Z4=i(yi);Pw=n(Z4,"brand_new_bert"),Z4.forEach(o),Iw=n(ou," to \u{1F917} Transformers."),ou.forEach(o),hf=f(e),Ge=s(e,"H3",{class:!0});var ru=i(Ge);ut=s(ru,"A",{id:!0,class:!0,href:!0});var K4=i(ut);gi=s(K4,"SPAN",{});var Q4=i(gi);u(Do.$$.fragment,Q4),Q4.forEach(o),K4.forEach(o),Aw=f(ru),wi=s(ru,"SPAN",{});var V4=i(wi);Nw=n(V4,"3.-4. Run a pretrained checkpoint using the original repository"),V4.forEach(o),ru.forEach(o),ff=f(e),N=s(e,"P",{});var ae=i(N);Mw=n(ae,"At first, you will work on the original "),vi=s(ae,"EM",{});var e5=i(vi);Bw=n(e5,"brand_new_bert"),e5.forEach(o),Lw=n(ae,` repository. Often, the original implementation is very \u201Cresearchy\u201D. Meaning that documentation might be lacking and the code can be difficult to understand. But this should be exactly your motivation to reimplement `),bi=s(ae,"EM",{});var t5=i(bi);jw=n(t5,"brand_new_bert"),t5.forEach(o),Ow=n(ae,". At Hugging Face, one of our main goals is to "),_i=s(ae,"EM",{});var o5=i(_i);xw=n(o5,`make people stand on the shoulders of giants`),o5.forEach(o),Cw=n(ae,` which translates here very well into taking a working model and rewriting it to make it as `),Ei=s(ae,"STRONG",{});var r5=i(Ei);qw=n(r5,"accessible, user-friendly, and beautiful"),r5.forEach(o),Sw=n(ae,` as possible. This is the number-one motivation to re-implement models into \u{1F917} Transformers - trying to make complex new NLP technology accessible to `),ki=s(ae,"STRONG",{});var n5=i(ki);Rw=n(n5,"everybody"),n5.forEach(o),Dw=n(ae,"."),ae.forEach(o),mf=f(e),_n=s(e,"P",{});var a5=i(_n);zw=n(a5,"You should start thereby by diving into the original repository."),a5.forEach(o),pf=f(e),ct=s(e,"P",{});var nu=i(ct);Fw=n(nu,"Successfully running the official pretrained model in the original repository is often "),Ti=s(nu,"STRONG",{});var s5=i(Ti);Hw=n(s5,"the most difficult"),s5.forEach(o),Ww=n(nu,` step. From our experience, it is very important to spend some time getting familiar with the original code-base. You need to figure out the following:`),nu.forEach(o),uf=f(e),M=s(e,"UL",{});var se=i(M);$i=s(se,"LI",{});var i5=i($i);Gw=n(i5,"Where to find the pretrained weights?"),i5.forEach(o),Uw=f(se),Pi=s(se,"LI",{});var l5=i(Pi);Yw=n(l5,"How to load the pretrained weights into the corresponding model?"),l5.forEach(o),Jw=f(se),Ii=s(se,"LI",{});var d5=i(Ii);Xw=n(d5,"How to run the tokenizer independently from the model?"),d5.forEach(o),Zw=f(se),Ai=s(se,"LI",{});var h5=i(Ai);Kw=n(h5,`Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually, you only have to reimplement those functions.`),h5.forEach(o),Qw=f(se),F=s(se,"LI",{});var Ie=i(F);Vw=n(Ie,`Be able to locate the important components of the model: Where is the model\u2019s class? Are there model sub-classes, `),Ni=s(Ie,"EM",{});var f5=i(Ni);ev=n(f5,"e.g."),f5.forEach(o),tv=n(Ie,` EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers, `),Mi=s(Ie,"EM",{});var m5=i(Mi);ov=n(m5,"e.g."),m5.forEach(o),rv=f(Ie),Bi=s(Ie,"EM",{});var p5=i(Bi);nv=n(p5,"self-attention"),p5.forEach(o),av=n(Ie,", "),Li=s(Ie,"EM",{});var u5=i(Li);sv=n(u5,"cross-attention"),u5.forEach(o),iv=n(Ie,"\u2026?"),Ie.forEach(o),lv=f(se),Ue=s(se,"LI",{});var $a=i(Ue);dv=n($a,"How can you debug the model in the original environment of the repo? Do you have to add "),ji=s($a,"EM",{});var c5=i(ji);hv=n(c5,"print"),c5.forEach(o),fv=n($a,` statements, can you work with an interactive debugger like `),Oi=s($a,"EM",{});var y5=i(Oi);mv=n(y5,"ipdb"),y5.forEach(o),pv=n($a,", or should you use an efficient IDE to debug the model, like PyCharm?"),$a.forEach(o),se.forEach(o),cf=f(e),yt=s(e,"P",{});var au=i(yt);uv=n(au,"It is very important that before you start the porting process, that you can "),xi=s(au,"STRONG",{});var g5=i(xi);cv=n(g5,"efficiently"),g5.forEach(o),yv=n(au,` debug code in the original repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or even a pull request in the original repository. The maintainers of this repository are most likely very happy about someone looking into their code!`),au.forEach(o),yf=f(e),En=s(e,"P",{});var w5=i(En);gv=n(w5,`At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to dive into the original repository and also when starting to write the \u{1F917} Transformers implementation of the model. Only at the very end, when the model has already been successfully ported to \u{1F917} Transformers, one should verify that the model also works as expected on GPU.`),w5.forEach(o),gf=f(e),kn=s(e,"P",{});var v5=i(kn);wv=n(v5,"In general, there are two possible debugging environments for running the original model"),v5.forEach(o),wf=f(e),gt=s(e,"UL",{});var su=i(gt);zo=s(su,"LI",{});var iu=i(zo);Fo=s(iu,"A",{href:!0,rel:!0});var b5=i(Fo);vv=n(b5,"Jupyter notebooks"),b5.forEach(o),bv=n(iu," / "),Ho=s(iu,"A",{href:!0,rel:!0});var _5=i(Ho);_v=n(_5,"google colab"),_5.forEach(o),iu.forEach(o),Ev=f(su),Ci=s(su,"LI",{});var E5=i(Ci);kv=n(E5,"Local python scripts."),E5.forEach(o),su.forEach(o),vf=f(e),Tn=s(e,"P",{});var k5=i(Tn);Tv=n(k5,`Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also, notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging Face team for help. If you are familiar with Jupiter notebooks, we strongly recommend you to work with them.`),k5.forEach(o),bf=f(e),wt=s(e,"P",{});var lu=i(wt);$v=n(lu,`The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend some time adjusting to the new programming environment and that you might not be able to use your known debugging tools anymore, like `),qi=s(lu,"CODE",{});var T5=i(qi);Pv=n(T5,"ipdb"),T5.forEach(o),Iv=n(lu,"."),lu.forEach(o),_f=f(e),vt=s(e,"P",{});var du=i(vt);Av=n(du,"For each code-base, a good first step is always to load a "),Si=s(du,"STRONG",{});var $5=i(Si);Nv=n($5,"small"),$5.forEach(o),Mv=n(du,` pretrained checkpoint and to be able to reproduce a single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in pseudocode):`),du.forEach(o),Ef=f(e),u(Wo.$$.fragment,e),kf=f(e),$n=s(e,"P",{});var P5=i($n);Bv=n(P5,"Next, regarding the debugging strategy, there are generally a few from which to choose from:"),P5.forEach(o),Tf=f(e),bt=s(e,"UL",{});var hu=i(bt);Ri=s(hu,"LI",{});var I5=i(Ri);Lv=n(I5,`Decompose the original model into many small testable components and run a forward pass on each of those for verification`),I5.forEach(o),jv=f(hu),Ye=s(hu,"LI",{});var Pa=i(Ye);Ov=n(Pa,"Decompose the original model only into the original "),Di=s(Pa,"EM",{});var A5=i(Di);xv=n(A5,"tokenizer"),A5.forEach(o),Cv=n(Pa," and the original "),zi=s(Pa,"EM",{});var N5=i(zi);qv=n(N5,"model"),N5.forEach(o),Sv=n(Pa,`, run a forward pass on those, and use intermediate print statements or breakpoints for verification`),Pa.forEach(o),hu.forEach(o),$f=f(e),Pn=s(e,"P",{});var M5=i(Pn);Rv=n(M5,`Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code base.`),M5.forEach(o),Pf=f(e),_t=s(e,"P",{});var fu=i(_t);Dv=n(fu,"If the original code-base allows you to decompose the model into smaller sub-components, "),Fi=s(fu,"EM",{});var B5=i(Fi);zv=n(B5,"e.g."),B5.forEach(o),Fv=n(fu,` if the original code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages to taking the more difficult road in the beginning:`),fu.forEach(o),If=f(e),U=s(e,"UL",{});var Yt=i(U);Hi=s(Yt,"LI",{});var L5=i(Hi);Hv=n(L5,`at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically for each component individually that the corresponding component of the \u{1F917} Transformers implementation matches instead of relying on visual comparison via print statements`),L5.forEach(o),Wv=f(Yt),Wi=s(Yt,"LI",{});var j5=i(Wi);Gv=n(j5,`it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting individual components and thus structure your work better`),j5.forEach(o),Uv=f(Yt),Gi=s(Yt,"LI",{});var O5=i(Gi);Yv=n(O5,`separating the model into logical meaningful components will help you to get a better overview of the model\u2019s design and thus to better understand the model`),O5.forEach(o),Jv=f(Yt),Ui=s(Yt,"LI",{});var x5=i(Ui);Xv=n(x5,`at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue changing your code`),x5.forEach(o),Yt.forEach(o),Af=f(e),Go=s(e,"P",{});var n3=i(Go);Uo=s(n3,"A",{href:!0,rel:!0});var C5=i(Uo);Zv=n(C5,"Lysandre\u2019s"),C5.forEach(o),Kv=n(n3,` integration checks for ELECTRA gives a nice example of how this can be done.`),n3.forEach(o),Nf=f(e),Et=s(e,"P",{});var mu=i(Et);Qv=n(mu,`However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode, it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good example is `),Yo=s(mu,"A",{href:!0,rel:!0});var q5=i(Yo);Vv=n(q5,"T5\u2019s MeshTensorFlow"),q5.forEach(o),eb=n(mu,` library which is very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one often relies on verifying print statements.`),mu.forEach(o),Mf=f(e),In=s(e,"P",{});var S5=i(In);tb=n(S5,`No matter which strategy you choose, the recommended procedure is often the same in that you should start to debug the starting layers first and the ending layers last.`),S5.forEach(o),Bf=f(e),An=s(e,"P",{});var R5=i(An);ob=n(R5,`It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following layers in the following order:`),R5.forEach(o),Lf=f(e),B=s(e,"OL",{});var ie=i(B);Yi=s(ie,"LI",{});var D5=i(Yi);rb=n(D5,"Retrieve the input IDs passed to the model"),D5.forEach(o),nb=f(ie),Ji=s(ie,"LI",{});var z5=i(Ji);ab=n(z5,"Retrieve the word embeddings"),z5.forEach(o),sb=f(ie),Xi=s(ie,"LI",{});var F5=i(Xi);ib=n(F5,"Retrieve the input of the first Transformer layer"),F5.forEach(o),lb=f(ie),Zi=s(ie,"LI",{});var H5=i(Zi);db=n(H5,"Retrieve the output of the first Transformer layer"),H5.forEach(o),hb=f(ie),Ki=s(ie,"LI",{});var W5=i(Ki);fb=n(W5,"Retrieve the output of the following n - 1 Transformer layers"),W5.forEach(o),mb=f(ie),Qi=s(ie,"LI",{});var G5=i(Qi);pb=n(G5,"Retrieve the output of the whole BrandNewBert Model"),G5.forEach(o),ie.forEach(o),jf=f(e),Je=s(e,"P",{});var gh=i(Je);ub=n(gh,"Input IDs should thereby consists of an array of integers, "),Vi=s(gh,"EM",{});var U5=i(Vi);cb=n(U5,"e.g."),U5.forEach(o),yb=f(gh),el=s(gh,"CODE",{});var Y5=i(el);gb=n(Y5,"input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]"),Y5.forEach(o),gh.forEach(o),Of=f(e),Nn=s(e,"P",{});var J5=i(Nn);wb=n(J5,"The outputs of the following layers often consist of multi-dimensional float arrays and can look like this:"),J5.forEach(o),xf=f(e),u(Jo.$$.fragment,e),Cf=f(e),ge=s(e,"P",{});var Ia=i(ge);vb=n(Ia,`We expect that every model added to \u{1F917} Transformers passes a couple of integration tests, meaning that the original model and the reimplemented version in \u{1F917} Transformers have to give the exact same output up to a precision of 0.001! Since it is normal that the exact same model written in different libraries can give a slightly different output depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives nearly the same output, they have to be the almost identical. Therefore, you will certainly compare the intermediate outputs of the \u{1F917} Transformers version multiple times against the intermediate outputs of the original implementation of `),tl=s(Ia,"EM",{});var X5=i(tl);bb=n(X5,"brand_new_bert"),X5.forEach(o),_b=n(Ia," in which case an "),ol=s(Ia,"STRONG",{});var Z5=i(ol);Eb=n(Z5,"efficient"),Z5.forEach(o),kb=n(Ia,` debugging environment of the original repository is absolutely important. Here is some advice is to make your debugging environment as efficient as possible.`),Ia.forEach(o),qf=f(e),C=s(e,"UL",{});var Ae=i(C);H=s(Ae,"LI",{});var Ne=i(H);Tb=n(Ne,`Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should probably take the time to write a longer script that decomposes the original model into smaller sub-components to retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on TensorFlow print operations like `),Xo=s(Ne,"A",{href:!0,rel:!0});var K5=i(Xo);$b=n(K5,"tf.print"),K5.forEach(o),Pb=n(Ne,` to output intermediate values. Is the original repository written in Jax? Then make sure that the model is `),rl=s(Ne,"STRONG",{});var Q5=i(rl);Ib=n(Q5,"not jitted"),Q5.forEach(o),Ab=n(Ne,` when running the forward pass, `),nl=s(Ne,"EM",{});var V5=i(nl);Nb=n(V5,"e.g."),V5.forEach(o),Mb=n(Ne," check-out "),Zo=s(Ne,"A",{href:!0,rel:!0});var e6=i(Zo);Bb=n(e6,"this link"),e6.forEach(o),Lb=n(Ne,"."),Ne.forEach(o),jb=f(Ae),al=s(Ae,"LI",{});var t6=i(al);Ob=n(t6,`Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds. In case only very large checkpoints are available, it might make more sense to create a dummy model in the new environment with randomly initialized weights and save those weights for comparison with the \u{1F917} Transformers version of your model`),t6.forEach(o),xb=f(Ae),T=s(Ae,"LI",{});var $=i(T);Cb=n($,`Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to find the function in the original repository that `),sl=s($,"STRONG",{});var o6=i(sl);qb=n(o6,"only"),o6.forEach(o),Sb=n($," calls a single forward pass, "),il=s($,"EM",{});var r6=i(il);Rb=n(r6,"i.e."),r6.forEach(o),Db=n($,` that is often called `),ll=s($,"CODE",{});var n6=i(ll);zb=n(n6,"predict"),n6.forEach(o),Fb=n($,", "),dl=s($,"CODE",{});var a6=i(dl);Hb=n(a6,"evaluate"),a6.forEach(o),Wb=n($,", "),hl=s($,"CODE",{});var s6=i(hl);Gb=n(s6,"forward"),s6.forEach(o),Ub=n($," or "),fl=s($,"CODE",{});var i6=i(fl);Yb=n(i6,"__call__"),i6.forEach(o),Jb=n($,". You don\u2019t want to debug a function that calls "),ml=s($,"CODE",{});var l6=i(ml);Xb=n(l6,"forward"),l6.forEach(o),Zb=n($,` multiple times, `),pl=s($,"EM",{});var d6=i(pl);Kb=n(d6,"e.g."),d6.forEach(o),Qb=n($," to generate text, like "),ul=s($,"CODE",{});var h6=i(ul);Vb=n(h6,"autoregressive_sample"),h6.forEach(o),e_=n($,", "),cl=s($,"CODE",{});var f6=i(cl);t_=n(f6,"generate"),f6.forEach(o),o_=n($,"."),$.forEach(o),r_=f(Ae),Ko=s(Ae,"LI",{});var pu=i(Ko);n_=n(pu,"Try to separate the tokenization from the model\u2019s "),yl=s(pu,"EM",{});var m6=i(yl);a_=n(m6,"forward"),m6.forEach(o),s_=n(pu,` pass. If the original repository shows examples where you have to input a string, then try to find out where in the forward call the string input is changed to input ids and start from this point. This might mean that you have to possibly write a small script yourself or change the original code so that you can directly input the ids instead of an input string.`),pu.forEach(o),i_=f(Ae),me=s(Ae,"LI",{});var Jt=i(me);l_=n(Jt,"Make sure that the model in your debugging setup is "),gl=s(Jt,"STRONG",{});var p6=i(gl);d_=n(p6,"not"),p6.forEach(o),h_=n(Jt,` in training mode, which often causes the model to yield random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging environment is `),wl=s(Jt,"STRONG",{});var u6=i(wl);f_=n(u6,"deterministic"),u6.forEach(o),m_=n(Jt," so that the dropout layers are not used. Or use "),vl=s(Jt,"EM",{});var c6=i(vl);p_=n(c6,"transformers.file_utils.set_seed"),c6.forEach(o),u_=n(Jt,` if the old and new implementations are in the same framework.`),Jt.forEach(o),Ae.forEach(o),Sf=f(e),kt=s(e,"P",{});var uu=i(kt);c_=n(uu,"The following section gives you more specific details/tips on how you can do this for "),bl=s(uu,"EM",{});var y6=i(bl);y_=n(y6,"brand_new_bert"),y6.forEach(o),g_=n(uu,"."),uu.forEach(o),Rf=f(e),Xe=s(e,"H3",{class:!0});var cu=i(Xe);Tt=s(cu,"A",{id:!0,class:!0,href:!0});var g6=i(Tt);_l=s(g6,"SPAN",{});var w6=i(_l);u(Qo.$$.fragment,w6),w6.forEach(o),g6.forEach(o),w_=f(cu),El=s(cu,"SPAN",{});var v6=i(El);v_=n(v6,"5.-14. Port BrandNewBert to \u{1F917} Transformers"),v6.forEach(o),cu.forEach(o),Df=f(e),Mn=s(e,"P",{});var b6=i(Mn);b_=n(b6,"Next, you can finally start adding new code to \u{1F917} Transformers. Go into the clone of your \u{1F917} Transformers\u2019 fork:"),b6.forEach(o),zf=f(e),u(Vo.$$.fragment,e),Ff=f(e),$t=s(e,"P",{});var yu=i($t);__=n(yu,`In the special case that you are adding a model whose architecture exactly matches the model architecture of an existing model you only have to add a conversion script as described in `),Bn=s(yu,"A",{href:!0});var _6=i(Bn);E_=n(_6,"this section"),_6.forEach(o),k_=n(yu,`. In this case, you can just re-use the whole model architecture of the already existing model.`),yu.forEach(o),Hf=f(e),Ln=s(e,"P",{});var E6=i(Ln);T_=n(E6,"Otherwise, let\u2019s start generating a new model with the amazing Cookiecutter!"),E6.forEach(o),Wf=f(e),jn=s(e,"P",{});var k6=i(jn);kl=s(k6,"STRONG",{});var T6=i(kl);$_=n(T6,"Use the Cookiecutter to automatically generate the model\u2019s code"),T6.forEach(o),k6.forEach(o),Gf=f(e),q=s(e,"P",{});var Me=i(q);P_=n(Me,"To begin with head over to the "),er=s(Me,"A",{href:!0,rel:!0});var $6=i(er);I_=n($6,"\u{1F917} Transformers templates"),$6.forEach(o),A_=n(Me,` to make use of our `),Tl=s(Me,"CODE",{});var P6=i(Tl);N_=n(P6,"cookiecutter"),P6.forEach(o),M_=n(Me,` implementation to automatically generate all the relevant files for your model. Again, we recommend only adding the PyTorch version of the model at first. Make sure you follow the instructions of the `),$l=s(Me,"CODE",{});var I6=i($l);B_=n(I6,"README.md"),I6.forEach(o),L_=n(Me,` on the `),tr=s(Me,"A",{href:!0,rel:!0});var A6=i(tr);j_=n(A6,"\u{1F917} Transformers templates"),A6.forEach(o),O_=n(Me,` carefully.`),Me.forEach(o),Uf=f(e),On=s(e,"P",{});var N6=i(On);Pl=s(N6,"STRONG",{});var M6=i(Pl);x_=n(M6,"Open a Pull Request on the main huggingface/transformers repo"),M6.forEach(o),N6.forEach(o),Yf=f(e),we=s(e,"P",{});var Aa=i(we);C_=n(Aa,`Before starting to adapt the automatically generated code, now is the time to open a \u201CWork in progress (WIP)\u201D pull request, `),Il=s(Aa,"EM",{});var B6=i(Il);q_=n(B6,"e.g."),B6.forEach(o),S_=n(Aa," \u201C[WIP] Add "),Al=s(Aa,"EM",{});var L6=i(Al);R_=n(L6,"brand_new_bert"),L6.forEach(o),D_=n(Aa,`\u201D, in \u{1F917} Transformers so that you and the Hugging Face team can work side-by-side on integrating the model into \u{1F917} Transformers.`),Aa.forEach(o),Jf=f(e),xn=s(e,"P",{});var j6=i(xn);z_=n(j6,"You should do the following:"),j6.forEach(o),Xf=f(e),Cn=s(e,"OL",{});var O6=i(Cn);Nl=s(O6,"LI",{});var x6=i(Nl);F_=n(x6,"Create a branch with a descriptive name from your master branch"),x6.forEach(o),O6.forEach(o),Zf=f(e),u(or.$$.fragment,e),Kf=f(e),rr=s(e,"OL",{start:!0});var C6=i(rr);Ml=s(C6,"LI",{});var q6=i(Ml);H_=n(q6,"Commit the automatically generated code:"),q6.forEach(o),C6.forEach(o),Qf=f(e),u(nr.$$.fragment,e),Vf=f(e),ar=s(e,"OL",{start:!0});var S6=i(ar);Bl=s(S6,"LI",{});var R6=i(Bl);W_=n(R6,"Fetch and rebase to current master"),R6.forEach(o),S6.forEach(o),em=f(e),u(sr.$$.fragment,e),tm=f(e),ir=s(e,"OL",{start:!0});var D6=i(ir);Ll=s(D6,"LI",{});var z6=i(Ll);G_=n(z6,"Push the changes to your account using:"),z6.forEach(o),D6.forEach(o),om=f(e),u(lr.$$.fragment,e),rm=f(e),Ze=s(e,"OL",{start:!0});var gu=i(Ze);jl=s(gu,"LI",{});var F6=i(jl);Ol=s(F6,"P",{});var H6=i(Ol);U_=n(H6,`Once you are satisfied, go to the webpage of your fork on GitHub. Click on \u201CPull request\u201D. Make sure to add the GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for future changes.`),H6.forEach(o),F6.forEach(o),Y_=f(gu),xl=s(gu,"LI",{});var W6=i(xl);Cl=s(W6,"P",{});var G6=i(Cl);J_=n(G6,"Change the PR into a draft by clicking on \u201CConvert to draft\u201D on the right of the GitHub pull request web page."),G6.forEach(o),W6.forEach(o),gu.forEach(o),nm=f(e),qn=s(e,"P",{});var U6=i(qn);X_=n(U6,`In the following, whenever you have done some progress, don\u2019t forget to commit your work and push it to your account so that it shows in the pull request. Additionally, you should make sure to update your work with the current master from time to time by doing:`),U6.forEach(o),am=f(e),u(dr.$$.fragment,e),sm=f(e),Sn=s(e,"P",{});var Y6=i(Sn);Z_=n(Y6,`In general, all questions you might have regarding the model or your implementation should be asked in your PR and discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or if you have a question. It is often very helpful to point the Hugging Face team to your added code so that the Hugging Face team can efficiently understand your problem or question.`),Y6.forEach(o),im=f(e),Rn=s(e,"P",{});var J6=i(Rn);K_=n(J6,`To do so, you can go to the \u201CFiles changed\u201D tab where you see all of your changes, go to a line regarding which you want to ask a question, and click on the \u201C+\u201D symbol to add a comment. Whenever a question or problem has been solved, you can click on the \u201CResolve\u201D button of the created comment.`),J6.forEach(o),lm=f(e),Dn=s(e,"P",{});var X6=i(Dn);Q_=n(X6,`In the same way, the Hugging Face team will open comments when reviewing your code. We recommend asking most questions on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping the Hugging Face team by Slack or email.`),X6.forEach(o),dm=f(e),zn=s(e,"P",{});var Z6=i(zn);ql=s(Z6,"STRONG",{});var K6=i(ql);V_=n(K6,"5. Adapt the generated models code for brand_new_bert"),K6.forEach(o),Z6.forEach(o),hm=f(e),ve=s(e,"P",{});var Na=i(ve);e1=n(Na,`At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be found in the generated files `),Sl=s(Na,"CODE",{});var Q6=i(Sl);t1=n(Q6,"src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),Q6.forEach(o),o1=n(Na,` and `),Rl=s(Na,"CODE",{});var V6=i(Rl);r1=n(V6,"src/transformers/models/brand_new_bert/configuration_brand_new_bert.py"),V6.forEach(o),n1=n(Na,"."),Na.forEach(o),fm=f(e),Y=s(e,"P",{});var Xt=i(Y);a1=n(Xt,`Now you can finally start coding :). The generated code in `),Dl=s(Xt,"CODE",{});var e7=i(Dl);s1=n(e7,"src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),e7.forEach(o),i1=n(Xt,` will either have the same architecture as BERT if it\u2019s an encoder-only model or BART if it\u2019s an encoder-decoder model. At this point, you should remind yourself what you\u2019ve learned in the beginning about the theoretical aspects of the model: `),zl=s(Xt,"EM",{});var t7=i(zl);l1=n(t7,`How is the model different from BERT or BART?`),t7.forEach(o),d1=n(Xt,"\u201D. Implement those changes which often means to change the "),Fl=s(Xt,"EM",{});var o7=i(Fl);h1=n(o7,"self-attention"),o7.forEach(o),f1=n(Xt,` layer, the order of the normalization layer, etc\u2026 Again, it is often useful to look at the similar architecture of already existing models in Transformers to get a better feeling of how your model should be implemented.`),Xt.forEach(o),mm=f(e),j=s(e,"P",{});var pe=i(j);Hl=s(pe,"STRONG",{});var r7=i(Hl);m1=n(r7,"Note"),r7.forEach(o),p1=n(pe,` that at this point, you don\u2019t have to be very sure that your code is fully correct or clean. Rather, it is advised to add a first `),Wl=s(pe,"EM",{});var n7=i(Wl);u1=n(n7,"unclean"),n7.forEach(o),c1=n(pe,`, copy-pasted version of the original code to `),Gl=s(pe,"CODE",{});var a7=i(Gl);y1=n(a7,"src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),a7.forEach(o),g1=n(pe,` until you feel like all the necessary code is added. From our experience, it is much more efficient to quickly add a first version of the required code and improve/correct the code iteratively with the conversion script as described in the next section. The only thing that has to work at this point is that you can instantiate the \u{1F917} Transformers implementation of `),Ul=s(pe,"EM",{});var s7=i(Ul);w1=n(s7,"brand_new_bert"),s7.forEach(o),v1=n(pe,", "),Yl=s(pe,"EM",{});var i7=i(Yl);b1=n(i7,"i.e."),i7.forEach(o),_1=n(pe,` the following command should work:`),pe.forEach(o),pm=f(e),u(hr.$$.fragment,e),um=f(e),be=s(e,"P",{});var Ma=i(be);E1=n(Ma,"The above command will create a model according to the default parameters as defined in "),Jl=s(Ma,"CODE",{});var l7=i(Jl);k1=n(l7,"BrandNewBertConfig()"),l7.forEach(o),T1=n(Ma,` with random weights, thus making sure that the `),Xl=s(Ma,"CODE",{});var d7=i(Xl);$1=n(d7,"init()"),d7.forEach(o),P1=n(Ma," methods of all components works."),Ma.forEach(o),cm=f(e),Fn=s(e,"P",{});var h7=i(Fn);Zl=s(h7,"STRONG",{});var f7=i(Zl);I1=n(f7,"6. Write a conversion script"),f7.forEach(o),h7.forEach(o),ym=f(e),J=s(e,"P",{});var Zt=i(J);A1=n(Zt,"Next, you should write a conversion script that lets you convert the checkpoint you used to debug "),Kl=s(Zt,"EM",{});var m7=i(Kl);N1=n(m7,"brand_new_bert"),m7.forEach(o),M1=n(Zt,` in the original repository to a checkpoint compatible with your just created \u{1F917} Transformers implementation of `),Ql=s(Zt,"EM",{});var p7=i(Ql);B1=n(p7,"brand_new_bert"),p7.forEach(o),L1=n(Zt,`. It is not advised to write the conversion script from scratch, but rather to look through already existing conversion scripts in \u{1F917} Transformers for one that has been used to convert a similar model that was written in the same framework as `),Vl=s(Zt,"EM",{});var u7=i(Vl);j1=n(u7,"brand_new_bert"),u7.forEach(o),O1=n(Zt,`. Usually, it is enough to copy an already existing conversion script and slightly adapt it for your use case. Don\u2019t hesitate to ask the Hugging Face team to point you to a similar already existing conversion script for your model.`),Zt.forEach(o),gm=f(e),Pt=s(e,"UL",{});var wu=i(Pt);Hn=s(wu,"LI",{});var a3=i(Hn);x1=n(a3,"If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT\u2019s conversion script "),fr=s(a3,"A",{href:!0,rel:!0});var c7=i(fr);C1=n(c7,"here"),c7.forEach(o),a3.forEach(o),q1=f(wu),Wn=s(wu,"LI",{});var s3=i(Wn);S1=n(s3,"If you are porting a model from PyTorch to PyTorch, a good starting point might be BART\u2019s conversion script "),mr=s(s3,"A",{href:!0,rel:!0});var y7=i(mr);R1=n(y7,"here"),y7.forEach(o),s3.forEach(o),wu.forEach(o),wm=f(e),It=s(e,"P",{});var vu=i(It);D1=n(vu,`In the following, we\u2019ll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the name of a layer is defined by the name of the class attribute you give the layer. Let\u2019s define a dummy model in PyTorch, called `),ed=s(vu,"CODE",{});var g7=i(ed);z1=n(g7,"SimpleModel"),g7.forEach(o),F1=n(vu," as follows:"),vu.forEach(o),vm=f(e),u(pr.$$.fragment,e),bm=f(e),X=s(e,"P",{});var Kt=i(X);H1=n(Kt,"Now we can create an instance of this model definition which will fill all weights: "),td=s(Kt,"CODE",{});var w7=i(td);W1=n(w7,"dense"),w7.forEach(o),G1=n(Kt,", "),od=s(Kt,"CODE",{});var v7=i(od);U1=n(v7,"intermediate"),v7.forEach(o),Y1=n(Kt,`, `),rd=s(Kt,"CODE",{});var b7=i(rd);J1=n(b7,"layer_norm"),b7.forEach(o),X1=n(Kt," with random weights. We can print the model to see its architecture"),Kt.forEach(o),_m=f(e),u(ur.$$.fragment,e),Em=f(e),Gn=s(e,"P",{});var _7=i(Gn);Z1=n(_7,"This will print out the following:"),_7.forEach(o),km=f(e),u(cr.$$.fragment,e),Tm=f(e),Un=s(e,"P",{});var E7=i(Un);K1=n(E7,`We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight values of a specific layer:`),E7.forEach(o),$m=f(e),u(yr.$$.fragment,e),Pm=f(e),Yn=s(e,"P",{});var k7=i(Yn);Q1=n(k7,"to see that the weights were randomly initialized"),k7.forEach(o),Im=f(e),u(gr.$$.fragment,e),Am=f(e),wr=s(e,"P",{});var i3=i(wr);V1=n(i3,`In the conversion script, you should fill those randomly initialized weights with the exact weights of the corresponding layer in the checkpoint. `),nd=s(i3,"EM",{});var T7=i(nd);e0=n(T7,"E.g."),T7.forEach(o),i3.forEach(o),Nm=f(e),u(vr.$$.fragment,e),Mm=f(e),_e=s(e,"P",{});var Ba=i(_e);t0=n(Ba,`While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding pretrained checkpoint weight exactly match in both `),ad=s(Ba,"STRONG",{});var $7=i(ad);o0=n($7,"shape and name"),$7.forEach(o),r0=n(Ba,". To do so, it is "),sd=s(Ba,"STRONG",{});var P7=i(sd);n0=n(P7,"necessary"),P7.forEach(o),a0=n(Ba,` to add assert statements for the shape and print out the names of the checkpoints weights. E.g. you should add statements like:`),Ba.forEach(o),Bm=f(e),u(br.$$.fragment,e),Lm=f(e),_r=s(e,"P",{});var l3=i(_r);s0=n(l3,"Besides, you should also print out the names of both weights to make sure they match, "),id=s(l3,"EM",{});var I7=i(id);i0=n(I7,"e.g."),I7.forEach(o),l3.forEach(o),jm=f(e),u(Er.$$.fragment,e),Om=f(e),Jn=s(e,"P",{});var A7=i(Jn);l0=n(A7,`If either the shape or the name doesn\u2019t match, you probably assigned the wrong checkpoint weight to a randomly initialized layer of the \u{1F917} Transformers implementation.`),A7.forEach(o),xm=f(e),At=s(e,"P",{});var bu=i(At);d0=n(bu,"An incorrect shape is most likely due to an incorrect setting of the config parameters in "),ld=s(bu,"CODE",{});var N7=i(ld);h0=n(N7,"BrandNewBertConfig()"),N7.forEach(o),f0=n(bu,` that do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that PyTorch\u2019s implementation of a layer requires the weight to be transposed beforehand.`),bu.forEach(o),Cm=f(e),Z=s(e,"P",{});var Qt=i(Z);m0=n(Qt,"Finally, you should also check that "),dd=s(Qt,"STRONG",{});var M7=i(dd);p0=n(M7,"all"),M7.forEach(o),u0=n(Qt,` required weights are initialized and print out all checkpoint weights that were not used for initialization to make sure the model is correctly converted. It is completely normal, that the conversion trials fail with either a wrong shape statement or wrong name assignment. This is most likely because either you used incorrect parameters in `),hd=s(Qt,"CODE",{});var B7=i(hd);c0=n(B7,"BrandNewBertConfig()"),B7.forEach(o),y0=n(Qt,`, have a wrong architecture in the \u{1F917} Transformers implementation, you have a bug in the `),fd=s(Qt,"CODE",{});var L7=i(fd);g0=n(L7,"init()"),L7.forEach(o),w0=n(Qt,` functions of one of the components of the \u{1F917} Transformers implementation or you need to transpose one of the checkpoint weights.`),Qt.forEach(o),qm=f(e),K=s(e,"P",{});var Vt=i(K);v0=n(Vt,`This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the Transformers model. Having correctly loaded the checkpoint into the \u{1F917} Transformers implementation, you can then save the model under a folder of your choice `),md=s(Vt,"CODE",{});var j7=i(md);b0=n(j7,"/path/to/converted/checkpoint/folder"),j7.forEach(o),_0=n(Vt,` that should then contain both a `),pd=s(Vt,"CODE",{});var O7=i(pd);E0=n(O7,"pytorch_model.bin"),O7.forEach(o),k0=n(Vt," file and a "),ud=s(Vt,"CODE",{});var x7=i(ud);T0=n(x7,"config.json"),x7.forEach(o),$0=n(Vt," file:"),Vt.forEach(o),Sm=f(e),u(kr.$$.fragment,e),Rm=f(e),Xn=s(e,"P",{});var C7=i(Xn);cd=s(C7,"STRONG",{});var q7=i(cd);P0=n(q7,"7. Implement the forward pass"),q7.forEach(o),C7.forEach(o),Dm=f(e),Nt=s(e,"P",{});var _u=i(Nt);I0=n(_u,`Having managed to correctly load the pretrained weights into the \u{1F917} Transformers implementation, you should now make sure that the forward pass is correctly implemented. In `),Zn=s(_u,"A",{href:!0});var S7=i(Zn);A0=n(S7,"Get familiar with the original repository"),S7.forEach(o),N0=n(_u,`, you have already created a script that runs a forward pass of the model using the original repository. Now you should write an analogous script using the \u{1F917} Transformers implementation instead of the original one. It should look as follows:`),_u.forEach(o),zm=f(e),u(Tr.$$.fragment,e),Fm=f(e),S=s(e,"P",{});var Be=i(S);M0=n(Be,`It is very likely that the \u{1F917} Transformers implementation and the original model implementation don\u2019t give the exact same output the very first time or that the forward pass throws an error. Don\u2019t be disappointed - it\u2019s expected! First, you should make sure that the forward pass doesn\u2019t throw any errors. It often happens that the wrong dimensions are used leading to a `),yd=s(Be,"EM",{});var R7=i(yd);B0=n(R7,"Dimensionality mismatch"),R7.forEach(o),L0=n(Be," error or that the wrong data type object is used, "),gd=s(Be,"EM",{});var D7=i(gd);j0=n(D7,"e.g."),D7.forEach(o),O0=f(Be),wd=s(Be,"CODE",{});var z7=i(wd);x0=n(z7,"torch.long"),z7.forEach(o),C0=n(Be,` instead of `),vd=s(Be,"CODE",{});var F7=i(vd);q0=n(F7,"torch.float32"),F7.forEach(o),S0=n(Be,`. Don\u2019t hesitate to ask the Hugging Face team for help, if you don\u2019t manage to solve certain errors.`),Be.forEach(o),Hm=f(e),Q=s(e,"P",{});var eo=i(Q);R0=n(eo,`The final part to make sure the \u{1F917} Transformers implementation works correctly is to ensure that the outputs are equivalent to a precision of `),bd=s(eo,"CODE",{});var H7=i(bd);D0=n(H7,"1e-3"),H7.forEach(o),z0=n(eo,". First, you should ensure that the output shapes are identical, "),_d=s(eo,"EM",{});var W7=i(_d);F0=n(W7,"i.e."),W7.forEach(o),H0=f(eo),Ed=s(eo,"CODE",{});var G7=i(Ed);W0=n(G7,"outputs.shape"),G7.forEach(o),G0=n(eo,` should yield the same value for the script of the \u{1F917} Transformers implementation and the original implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult parts of adding a new model. Common mistakes why the outputs are not identical are:`),eo.forEach(o),Wm=f(e),V=s(e,"UL",{});var to=i(V);Ke=s(to,"LI",{});var La=i(Ke);U0=n(La,"Some layers were not added, "),kd=s(La,"EM",{});var U7=i(kd);Y0=n(U7,"i.e."),U7.forEach(o),J0=n(La," an "),Td=s(La,"EM",{});var Y7=i(Td);X0=n(Y7,"activation"),Y7.forEach(o),Z0=n(La," layer was not added, or the residual connection was forgotten"),La.forEach(o),K0=f(to),$d=s(to,"LI",{});var J7=i($d);Q0=n(J7,"The word embedding matrix was not tied"),J7.forEach(o),V0=f(to),Pd=s(to,"LI",{});var X7=i(Pd);e2=n(X7,"The wrong positional embeddings are used because the original implementation uses on offset"),X7.forEach(o),t2=f(to),ee=s(to,"LI",{});var Ve=i(ee);o2=n(Ve,"Dropout is applied during the forward pass. To fix this make sure "),Id=s(Ve,"EM",{});var Z7=i(Id);r2=n(Z7,"model.training is False"),Z7.forEach(o),n2=n(Ve,` and that no dropout layer is falsely activated during the forward pass, `),Ad=s(Ve,"EM",{});var K7=i(Ad);a2=n(K7,"i.e."),K7.forEach(o),s2=n(Ve," pass "),Nd=s(Ve,"EM",{});var Q7=i(Nd);i2=n(Q7,"self.training"),Q7.forEach(o),l2=n(Ve," to "),$r=s(Ve,"A",{href:!0,rel:!0});var V7=i($r);d2=n(V7,"PyTorch\u2019s functional dropout"),V7.forEach(o),Ve.forEach(o),to.forEach(o),Gm=f(e),Ee=s(e,"P",{});var ja=i(Ee);h2=n(ja,`The best way to fix the problem is usually to look at the forward pass of the original implementation and the \u{1F917} Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out intermediate outputs of both implementations of the forward pass to find the exact position in the network where the \u{1F917} Transformers implementation shows a different output than the original implementation. First, make sure that the hard-coded `),Md=s(ja,"CODE",{});var eT=i(Md);f2=n(eT,"input_ids"),eT.forEach(o),m2=n(ja,` in both scripts are identical. Next, verify that the outputs of the first transformation of the `),Bd=s(ja,"CODE",{});var tT=i(Bd);p2=n(tT,"input_ids"),tT.forEach(o),u2=n(ja,` (usually the word embeddings) are identical. And then work your way up to the very last layer of the network. At some point, you will notice a difference between the two implementations, which should point you to the bug in the \u{1F917} Transformers implementation. From our experience, a simple and efficient way is to add many print statements in both the original implementation and \u{1F917} Transformers implementation, at the same positions in the network respectively, and to successively remove print statements showing the same values for intermediate presentations.`),ja.forEach(o),Um=f(e),Mt=s(e,"P",{});var Eu=i(Mt);c2=n(Eu,`When you\u2019re confident that both implementations yield the same output, verifying the outputs with `),Ld=s(Eu,"CODE",{});var oT=i(Ld);y2=n(oT,"torch.allclose(original_output, output, atol=1e-3)"),oT.forEach(o),g2=n(Eu,`, you\u2019re done with the most difficult part! Congratulations - the work left to be done should be a cakewalk \u{1F60A}.`),Eu.forEach(o),Ym=f(e),Kn=s(e,"P",{});var rT=i(Kn);jd=s(rT,"STRONG",{});var nT=i(jd);w2=n(nT,"8. Adding all necessary model tests"),nT.forEach(o),rT.forEach(o),Jm=f(e),Bt=s(e,"P",{});var ku=i(Bt);v2=n(ku,`At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with \u{1F917} Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under the same `),Od=s(ku,"CODE",{});var aT=i(Od);b2=n(aT,"tests/test_modeling_brand_new_bert.py"),aT.forEach(o),_2=n(ku,". Run this test file to verify that all common tests pass:"),ku.forEach(o),Xm=f(e),u(Pr.$$.fragment,e),Zm=f(e),Qn=s(e,"P",{});var sT=i(Qn);E2=n(sT,"Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that"),sT.forEach(o),Km=f(e),Lt=s(e,"UL",{});var Tu=i(Lt);Vn=s(Tu,"LI",{});var d3=i(Vn);k2=n(d3,"a) The community can easily understand your work by looking at specific tests of "),xd=s(d3,"EM",{});var iT=i(xd);T2=n(iT,"brand_new_bert"),iT.forEach(o),d3.forEach(o),$2=f(Tu),Cd=s(Tu,"LI",{});var lT=i(Cd);P2=n(lT,"b) Future changes to your model will not break any important feature of the model."),lT.forEach(o),Tu.forEach(o),Qm=f(e),jt=s(e,"P",{});var $u=i(jt);I2=n($u,`At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts you used earlier to implement the model to \u{1F917} Transformers. A template of those model tests is already added by the Cookiecutter, called `),qd=s($u,"CODE",{});var dT=i(qd);A2=n(dT,"BrandNewBertModelIntegrationTests"),dT.forEach(o),N2=n($u,` and only has to be filled out by you. To ensure that those tests are passing, run`),$u.forEach(o),Vm=f(e),u(Ir.$$.fragment,e),ep=f(e),u(Ot.$$.fragment,e),tp=f(e),te=s(e,"P",{});var oo=i(te);M2=n(oo,"Second, all features that are special to "),Sd=s(oo,"EM",{});var hT=i(Sd);B2=n(hT,"brand_new_bert"),hT.forEach(o),L2=n(oo,` should be tested additionally in a separate test under `),Rd=s(oo,"CODE",{});var fT=i(Rd);j2=n(fT,"BrandNewBertModelTester"),fT.forEach(o),O2=n(oo,"/`"),Dd=s(oo,"CODE",{});var mT=i(Dd);x2=n(mT,"BrandNewBertModelTest"),mT.forEach(o),C2=n(oo,`. This part is often forgotten but is extremely useful in two ways:`),oo.forEach(o),op=f(e),xt=s(e,"UL",{});var Pu=i(xt);Ar=s(Pu,"LI",{});var Iu=i(Ar);q2=n(Iu,`It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the special features of `),zd=s(Iu,"EM",{});var pT=i(zd);S2=n(pT,"brand_new_bert"),pT.forEach(o),R2=n(Iu," should work."),Iu.forEach(o),D2=f(Pu),Fd=s(Pu,"LI",{});var uT=i(Fd);z2=n(uT,"Future contributors can quickly test changes to the model by running those special tests."),uT.forEach(o),Pu.forEach(o),rp=f(e),ea=s(e,"P",{});var cT=i(ea);Hd=s(cT,"STRONG",{});var yT=i(Hd);F2=n(yT,"9. Implement the tokenizer"),yT.forEach(o),cT.forEach(o),np=f(e),Ct=s(e,"P",{});var Au=i(Ct);H2=n(Au,"Next, we should add the tokenizer of "),Wd=s(Au,"EM",{});var gT=i(Wd);W2=n(gT,"brand_new_bert"),gT.forEach(o),G2=n(Au,`. Usually, the tokenizer is equivalent or very similar to an already existing tokenizer of \u{1F917} Transformers.`),Au.forEach(o),ap=f(e),ta=s(e,"P",{});var wT=i(ta);U2=n(wT,`It is very important to find/extract the original tokenizer file and to manage to load this file into the \u{1F917} Transformers\u2019 implementation of the tokenizer.`),wT.forEach(o),sp=f(e),oa=s(e,"P",{});var vT=i(oa);Y2=n(vT,"To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository\nthat inputs a string and returns the `input_ids\u201C. It could look similar to this (in pseudo-code):"),vT.forEach(o),ip=f(e),u(Nr.$$.fragment,e),lp=f(e),qt=s(e,"P",{});var Nu=i(qt);J2=n(Nu,`You might have to take a deeper look again into the original repository to find the correct tokenizer function or you might even have to do changes to your clone of the original repository to only output the `),Gd=s(Nu,"CODE",{});var bT=i(Gd);X2=n(bT,"input_ids"),bT.forEach(o),Z2=n(Nu,`. Having written a functional tokenization script that uses the original repository, an analogous script for \u{1F917} Transformers should be created. It should look similar to this:`),Nu.forEach(o),dp=f(e),u(Mr.$$.fragment,e),hp=f(e),St=s(e,"P",{});var Mu=i(St);K2=n(Mu,"When both "),Ud=s(Mu,"CODE",{});var _T=i(Ud);Q2=n(_T,"input_ids"),_T.forEach(o),V2=n(Mu," yield the same values, as a final step a tokenizer test file should also be added."),Mu.forEach(o),fp=f(e),ke=s(e,"P",{});var Oa=i(ke);eE=n(Oa,"Analogous to the modeling test files of "),Yd=s(Oa,"EM",{});var ET=i(Yd);tE=n(ET,"brand_new_bert"),ET.forEach(o),oE=n(Oa,", the tokenization test files of "),Jd=s(Oa,"EM",{});var kT=i(Jd);rE=n(kT,"brand_new_bert"),kT.forEach(o),nE=n(Oa,` should contain a couple of hard-coded integration tests.`),Oa.forEach(o),mp=f(e),ra=s(e,"P",{});var TT=i(ra);Xd=s(TT,"STRONG",{});var $T=i(Xd);aE=n($T,"10. Run End-to-end integration tests"),$T.forEach(o),TT.forEach(o),pp=f(e),oe=s(e,"P",{});var ro=i(oe);sE=n(ro,`Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the tokenizer to `),Zd=s(ro,"CODE",{});var PT=i(Zd);iE=n(PT,"tests/test_modeling_brand_new_bert.py"),PT.forEach(o),lE=n(ro,` in \u{1F917} Transformers. Such a test should show on a meaningful text-to-text sample that the \u{1F917} Transformers implementation works as expected. A meaningful text-to-text sample can include `),Kd=s(ro,"EM",{});var IT=i(Kd);dE=n(IT,"e.g."),IT.forEach(o),hE=n(ro,` a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc\u2026 If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can happen that you forgot to add some `),Qd=s(ro,"CODE",{});var AT=i(Qd);fE=n(AT,".to(self.device)"),AT.forEach(o),mE=n(ro,` statements to internal tensors of the model, which in such a test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those tests for you.`),ro.forEach(o),up=f(e),na=s(e,"P",{});var NT=i(na);Vd=s(NT,"STRONG",{});var MT=i(Vd);pE=n(MT,"11. Add Docstring"),MT.forEach(o),NT.forEach(o),cp=f(e),re=s(e,"P",{});var no=i(re);uE=n(no,"Now, all the necessary functionality for "),eh=s(no,"EM",{});var BT=i(eh);cE=n(BT,"brand_new_bert"),BT.forEach(o),yE=n(no,` is added - you\u2019re almost done! The only thing left to add is a nice docstring and a doc page. The Cookiecutter should have added a template file called `),th=s(no,"CODE",{});var LT=i(th);gE=n(LT,"docs/source/model_doc/brand_new_bert.rst"),LT.forEach(o),wE=n(no,` that you should fill out. Users of your model will usually first look at this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for the community to add some `),oh=s(no,"EM",{});var jT=i(oh);vE=n(jT,"Tips"),jT.forEach(o),bE=n(no,` to show how the model should be used. Don\u2019t hesitate to ping the Hugging Face team regarding the docstrings.`),no.forEach(o),yp=f(e),Te=s(e,"P",{});var xa=i(Te);_E=n(xa,"Next, make sure that the docstring added to "),rh=s(xa,"CODE",{});var OT=i(rh);EE=n(OT,"src/transformers/models/brand_new_bert/modeling_brand_new_bert.py"),OT.forEach(o),kE=n(xa,` is correct and included all necessary inputs and outputs. We have a detailed guide about writing documentation and our docstring format `),aa=s(xa,"A",{href:!0});var xT=i(aa);TE=n(xT,"here"),xT.forEach(o),$E=n(xa,`. It is always to good to remind oneself that documentation should be treated at least as carefully as the code in \u{1F917} Transformers since the documentation is usually the first contact point of the community with the model.`),xa.forEach(o),gp=f(e),sa=s(e,"P",{});var CT=i(sa);nh=s(CT,"STRONG",{});var qT=i(nh);PE=n(qT,"Code refactor"),qT.forEach(o),CT.forEach(o),wp=f(e),Rt=s(e,"P",{});var Bu=i(Rt);IE=n(Bu,"Great, now you have added all the necessary code for "),ah=s(Bu,"EM",{});var ST=i(ah);AE=n(ST,"brand_new_bert"),ST.forEach(o),NE=n(Bu,`. At this point, you should correct some potential incorrect code style by running:`),Bu.forEach(o),vp=f(e),u(Br.$$.fragment,e),bp=f(e),ia=s(e,"P",{});var RT=i(ia);ME=n(RT,"and verify that your coding style passes the quality check:"),RT.forEach(o),_p=f(e),u(Lr.$$.fragment,e),Ep=f(e),la=s(e,"P",{});var DT=i(la);BE=n(DT,`There are a couple of other very strict design tests in \u{1F917} Transformers that might still be failing, which shows up in the tests of your pull request. This is often because of some missing information in the docstring or some incorrect naming. The Hugging Face team will surely help you if you\u2019re stuck here.`),DT.forEach(o),kp=f(e),da=s(e,"P",{});var zT=i(da);LE=n(zT,`Lastly, it is always a good idea to refactor one\u2019s code after having ensured that the code works correctly. With all tests passing, now it\u2019s a good time to go over the added code again and do some refactoring.`),zT.forEach(o),Tp=f(e),ha=s(e,"P",{});var FT=i(ha);jE=n(FT,"You have now finished the coding part, congratulation! \u{1F389} You are Awesome! \u{1F60E}"),FT.forEach(o),$p=f(e),fa=s(e,"P",{});var HT=i(fa);sh=s(HT,"STRONG",{});var WT=i(sh);OE=n(WT,"12. Upload the models to the model hub"),WT.forEach(o),HT.forEach(o),Pp=f(e),R=s(e,"P",{});var Le=i(R);xE=n(Le,`In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each uploaded model checkpoint. You can get familiar with the hub functionalities by reading our `),ma=s(Le,"A",{href:!0});var GT=i(ma);CE=n(GT,"Model sharing and uploading Page"),GT.forEach(o),qE=n(Le,`. You should work alongside the Hugging Face team here to decide on a fitting name for each checkpoint and to get the required access rights to be able to upload the model under the author\u2019s organization of `),ih=s(Le,"EM",{});var UT=i(ih);SE=n(UT,"brand_new_bert"),UT.forEach(o),RE=n(Le,". The "),lh=s(Le,"CODE",{});var YT=i(lh);DE=n(YT,"push_to_hub"),YT.forEach(o),zE=n(Le," method, present in all models in "),dh=s(Le,"CODE",{});var JT=i(dh);FE=n(JT,"transformers"),JT.forEach(o),HE=n(Le,", is a quick and efficient way to push your checkpoint to the hub. A little snippet is pasted below:"),Le.forEach(o),Ip=f(e),u(jr.$$.fragment,e),Ap=f(e),Dt=s(e,"P",{});var Lu=i(Dt);WE=n(Lu,`It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the specific characteristics of this particular checkpoint, `),hh=s(Lu,"EM",{});var XT=i(hh);GE=n(XT,"e.g."),XT.forEach(o),UE=n(Lu,` On which dataset was the checkpoint pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to correctly use the model.`),Lu.forEach(o),Np=f(e),pa=s(e,"P",{});var ZT=i(pa);fh=s(ZT,"STRONG",{});var KT=i(fh);YE=n(KT,"13. (Optional) Add notebook"),KT.forEach(o),ZT.forEach(o),Mp=f(e),zt=s(e,"P",{});var ju=i(zt);JE=n(ju,"It is very helpful to add a notebook that showcases in-detail how "),mh=s(ju,"EM",{});var QT=i(mh);XE=n(QT,"brand_new_bert"),QT.forEach(o),ZE=n(ju,` can be used for inference and/or fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community.`),ju.forEach(o),Bp=f(e),ua=s(e,"P",{});var VT=i(ua);ph=s(VT,"STRONG",{});var e9=i(ph);KE=n(e9,"14. Submit your finished PR"),e9.forEach(o),VT.forEach(o),Lp=f(e),ca=s(e,"P",{});var t9=i(ca);QE=n(t9,`You\u2019re done programming now and can move to the last step, which is getting your PR merged into master. Usually, the Hugging Face team should have helped you already at this point, but it is worth taking some time to give your finished PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your reviewer.`),t9.forEach(o),jp=f(e),Qe=s(e,"H3",{class:!0});var Ou=i(Qe);Ft=s(Ou,"A",{id:!0,class:!0,href:!0});var o9=i(Ft);uh=s(o9,"SPAN",{});var r9=i(uh);u(Or.$$.fragment,r9),r9.forEach(o),o9.forEach(o),VE=f(Ou),ch=s(Ou,"SPAN",{});var n9=i(ch);e3=n(n9,"Share your work!!"),n9.forEach(o),Ou.forEach(o),Op=f(e),ya=s(e,"P",{});var a9=i(ya);t3=n(a9,`Now, it\u2019s time to get some credit from the community for your work! Having completed a model addition is a major contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share your achievement with the community.`),a9.forEach(o),xp=f(e),ga=s(e,"P",{});var s9=i(ga);yh=s(s9,"STRONG",{});var i9=i(yh);o3=n(i9,"You have made another model that is super easy to access for everyone in the community! \u{1F92F}"),i9.forEach(o),s9.forEach(o),this.h()},h(){m(P,"name","hf:doc:metadata"),m(P,"content",JSON.stringify(y9)),m(L,"id","how-to-add-a-model-to-transformers"),m(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(L,"href","#how-to-add-a-model-to-transformers"),m(I,"class","relative group"),m(io,"href","https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model/open_model_proposals/README.md"),m(io,"rel","nofollow"),m(lo,"href","https://github.com/huggingface/transformers/pulls?q=is%3Apr+label%3A%22PR+for+Model+Addition%22+is%3Aclosed"),m(lo,"rel","nofollow"),m(ot,"id","general-overview-of-transformers"),m(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ot,"href","#general-overview-of-transformers"),m(xe,"class","relative group"),m(Rr,"href","philosophy"),m(at,"id","overview-of-models"),m(at,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(at,"href","#overview-of-models"),m(Ce,"class","relative group"),m(zr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(Fr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m9(Wr.src,h3="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png")||m(Wr,"src",h3),m(Gr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(Ur,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(Yr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),m(Jr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained"),m(Xr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(Zr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m(Kr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained"),m(Qr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.save_pretrained"),m(st,"id","overview-of-tokenizers"),m(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(st,"href","#overview-of-tokenizers"),m(qe,"class","relative group"),m(it,"id","stepbystep-recipe-to-add-a-model-to-transformers"),m(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(it,"href","#stepbystep-recipe-to-add-a-model-to-transformers"),m(Se,"class","relative group"),m(go,"href","https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28"),m(go,"rel","nofollow"),m(wo,"href","https://huggingface.co/thomwolf"),m(wo,"rel","nofollow"),m(bo,"href","https://huggingface.co/blog/porting-fsmt"),m(bo,"rel","nofollow"),m(_o,"href","https://huggingface.co/stas"),m(_o,"rel","nofollow"),m(Eo,"href","https://www.gnu.org/software/grep/"),m(Eo,"rel","nofollow"),m(ko,"href","https://github.com/BurntSushi/ripgrep"),m(ko,"rel","nofollow"),m(nn,"start","2"),m(an,"start","3"),m(sn,"start","4"),m(ln,"start","5"),m(dn,"start","6"),m(hn,"start","7"),m(fn,"start","8"),m(mn,"start","9"),m(pn,"start","10"),m(un,"start","11"),m(cn,"start","12"),m(yn,"start","13"),m(gn,"start","14"),m(dt,"id","1-optional-theoretical-aspects-of-brandnewbert"),m(dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(dt,"href","#1-optional-theoretical-aspects-of-brandnewbert"),m(Re,"class","relative group"),m(wn,"href","model_summary"),m($o,"href","https://huggingface.co/transformers/#contents"),m($o,"rel","nofollow"),m(ft,"id","2-next-prepare-your-environment"),m(ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ft,"href","#2-next-prepare-your-environment"),m(He,"class","relative group"),m(Ao,"href","https://github.com/huggingface/transformers"),m(Ao,"rel","nofollow"),m(Bo,"start","3"),m(xo,"href","https://pytorch.org/get-started/locally/"),m(xo,"rel","nofollow"),m(Oo,"start","4"),m(qo,"start","5"),m(ut,"id","34-run-a-pretrained-checkpoint-using-the-original-repository"),m(ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ut,"href","#34-run-a-pretrained-checkpoint-using-the-original-repository"),m(Ge,"class","relative group"),m(Fo,"href","https://jupyter.org/"),m(Fo,"rel","nofollow"),m(Ho,"href","https://colab.research.google.com/notebooks/intro.ipynb"),m(Ho,"rel","nofollow"),m(Uo,"href","https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed"),m(Uo,"rel","nofollow"),m(Yo,"href","https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow"),m(Yo,"rel","nofollow"),m(Xo,"href","https://www.tensorflow.org/api_docs/python/tf/print"),m(Xo,"rel","nofollow"),m(Zo,"href","https://github.com/google/jax/issues/196"),m(Zo,"rel","nofollow"),m(Tt,"id","514-port-brandnewbert-to-transformers"),m(Tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Tt,"href","#514-port-brandnewbert-to-transformers"),m(Xe,"class","relative group"),m(Bn,"href","#write-a-conversion-script"),m(er,"href","https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model"),m(er,"rel","nofollow"),m(tr,"href","https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model"),m(tr,"rel","nofollow"),m(rr,"start","2"),m(ar,"start","3"),m(ir,"start","4"),m(Ze,"start","5"),m(fr,"href","https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91"),m(fr,"rel","nofollow"),m(mr,"href","https://github.com/huggingface/transformers/blob/master/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py"),m(mr,"rel","nofollow"),m(Zn,"href","#run-a-pretrained-checkpoint-using-the-original-repository"),m($r,"href","https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout"),m($r,"rel","nofollow"),m(aa,"href","writing-documentation"),m(ma,"href","model_sharing"),m(Ft,"id","share-your-work"),m(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Ft,"href","#share-your-work"),m(Qe,"class","relative group")},m(e,l){t(document.head,P),d(e,ue,l),d(e,I,l),t(I,L),t(L,Oe),c(D,Oe,null),t(I,ao),t(I,le),t(le,de),d(e,so,l),d(e,z,l),t(z,xu),t(z,qa),t(qa,Cu),t(z,qu),t(z,Sa),t(Sa,Su),t(z,Ru),d(e,wh,l),d(e,et,l),t(et,Du),t(et,io),t(io,zu),t(et,Fu),d(e,vh,l),d(e,Cr,l),t(Cr,Hu),d(e,bh,l),d(e,W,l),t(W,Ra),t(Ra,Wu),t(W,Gu),t(W,Da),t(Da,Uu),t(W,Yu),t(W,za),t(za,Ju),t(W,Xu),t(W,he),t(he,Zu),t(he,Fa),t(Fa,Ku),t(he,Qu),t(he,Ha),t(Ha,Vu),t(he,ec),t(he,Wa),t(Wa,tc),t(he,oc),d(e,_h,l),d(e,tt,l),t(tt,rc),t(tt,lo),t(lo,nc),t(tt,ac),d(e,Eh,l),d(e,qr,l),t(qr,sc),d(e,kh,l),d(e,xe,l),t(xe,ot),t(ot,Ga),c(ho,Ga,null),t(xe,ic),t(xe,Ua),t(Ua,lc),d(e,Th,l),d(e,Sr,l),t(Sr,dc),d(e,$h,l),d(e,rt,l),t(rt,hc),t(rt,Rr),t(Rr,fc),t(rt,mc),d(e,Ph,l),d(e,ce,l),t(ce,Ya),t(Ya,pc),t(ce,uc),t(ce,Ja),t(Ja,cc),t(ce,yc),t(ce,fo),t(fo,gc),t(fo,Xa),t(Xa,wc),t(fo,vc),d(e,Ih,l),d(e,nt,l),t(nt,bc),t(nt,Za),t(Za,_c),t(nt,Ec),d(e,Ah,l),d(e,Dr,l),t(Dr,kc),d(e,Nh,l),d(e,Ce,l),t(Ce,at),t(at,Ka),c(mo,Ka,null),t(Ce,Tc),t(Ce,Qa),t(Qa,$c),d(e,Mh,l),d(e,G,l),t(G,Pc),t(G,zr),t(zr,Ic),t(G,Ac),t(G,Fr),t(Fr,Nc),t(G,Mc),t(G,Va),t(Va,Bc),t(G,Lc),d(e,Bh,l),d(e,Hr,l),t(Hr,jc),d(e,Lh,l),d(e,Wr,l),d(e,jh,l),d(e,v,l),t(v,Oc),t(v,es),t(es,xc),t(v,Cc),t(v,ts),t(ts,qc),t(v,Sc),t(v,Gr),t(Gr,Rc),t(v,Dc),t(v,Ur),t(Ur,zc),t(v,Fc),t(v,Yr),t(Yr,Hc),t(v,Wc),t(v,Jr),t(Jr,Gc),t(v,Uc),t(v,os),t(os,Yc),t(v,Jc),t(v,rs),t(rs,Xc),t(v,Zc),t(v,ns),t(ns,Kc),t(v,Qc),t(v,as),t(as,Vc),t(v,ey),t(v,ss),t(ss,ty),t(v,oy),t(v,is),t(is,ry),t(v,ny),t(v,Xr),t(Xr,ay),t(v,sy),t(v,ls),t(ls,iy),t(v,ly),t(v,ds),t(ds,dy),t(v,hy),d(e,Oh,l),c(po,e,l),d(e,xh,l),d(e,A,l),t(A,fy),t(A,Zr),t(Zr,my),t(A,py),t(A,hs),t(hs,uy),t(A,cy),t(A,fs),t(fs,yy),t(A,gy),t(A,Kr),t(Kr,wy),t(A,vy),t(A,Qr),t(Qr,by),t(A,_y),d(e,Ch,l),d(e,qe,l),t(qe,st),t(st,ms),c(uo,ms,null),t(qe,Ey),t(qe,ps),t(ps,ky),d(e,qh,l),d(e,Vr,l),t(Vr,Ty),d(e,Sh,l),d(e,Se,l),t(Se,it),t(it,us),c(co,us,null),t(Se,$y),t(Se,cs),t(cs,Py),d(e,Rh,l),d(e,en,l),t(en,Iy),d(e,Dh,l),d(e,lt,l),t(lt,yo),t(yo,go),t(go,Ay),t(yo,Ny),t(yo,wo),t(wo,My),t(lt,By),t(lt,vo),t(vo,bo),t(bo,Ly),t(vo,jy),t(vo,_o),t(_o,Oy),d(e,zh,l),d(e,tn,l),t(tn,xy),d(e,Fh,l),d(e,ye,l),t(ye,fe),t(fe,Cy),t(fe,Eo),t(Eo,qy),t(fe,Sy),t(fe,ko),t(ko,Ry),t(fe,Dy),t(fe,ys),t(ys,zy),t(fe,Fy),t(ye,Hy),t(ye,gs),t(gs,Wy),t(ye,Gy),t(ye,ws),t(ws,Uy),d(e,Hh,l),d(e,on,l),t(on,Yy),d(e,Wh,l),d(e,rn,l),t(rn,Jy),d(e,Gh,l),d(e,b,l),t(b,vs),t(vs,bs),t(bs,_s),t(_s,Xy),t(b,Zy),t(b,Es),t(Es,nn),t(nn,ks),t(ks,Ky),t(b,Qy),t(b,Ts),t(Ts,an),t(an,$s),t($s,Vy),t(b,eg),t(b,Ps),t(Ps,sn),t(sn,Is),t(Is,tg),t(b,og),t(b,As),t(As,ln),t(ln,Ns),t(Ns,rg),t(b,ng),t(b,Ms),t(Ms,dn),t(dn,Bs),t(Bs,ag),t(b,sg),t(b,Ls),t(Ls,hn),t(hn,js),t(js,ig),t(b,lg),t(b,Os),t(Os,fn),t(fn,xs),t(xs,dg),t(b,hg),t(b,Cs),t(Cs,mn),t(mn,qs),t(qs,fg),t(b,mg),t(b,Ss),t(Ss,pn),t(pn,Rs),t(Rs,pg),t(b,ug),t(b,Ds),t(Ds,un),t(un,zs),t(zs,cg),t(b,yg),t(b,Fs),t(Fs,cn),t(cn,Hs),t(Hs,gg),t(b,wg),t(b,Ws),t(Ws,yn),t(yn,Gs),t(Gs,vg),t(b,bg),t(b,Us),t(Us,gn),t(gn,Ys),t(Ys,_g),d(e,Uh,l),d(e,O,l),t(O,Eg),t(O,Js),t(Js,kg),t(O,Tg),t(O,Xs),t(Xs,$g),t(O,Pg),t(O,Zs),t(Zs,Ig),t(O,Ag),t(O,Ks),t(Ks,Ng),t(O,Mg),d(e,Yh,l),d(e,Re,l),t(Re,dt),t(dt,Qs),c(To,Qs,null),t(Re,Bg),t(Re,Vs),t(Vs,Lg),d(e,Jh,l),d(e,ht,l),t(ht,jg),t(ht,ei),t(ei,Og),t(ht,xg),d(e,Xh,l),d(e,x,l),t(x,De),t(De,Cg),t(De,ti),t(ti,qg),t(De,Sg),t(De,wn),t(wn,Rg),t(De,Dg),t(x,zg),t(x,ze),t(ze,Fg),t(ze,oi),t(oi,Hg),t(ze,Wg),t(ze,ri),t(ri,Gg),t(ze,Ug),t(x,Yg),t(x,ni),t(ni,Jg),t(x,Xg),t(x,Fe),t(Fe,Zg),t(Fe,$o),t($o,Kg),t(Fe,Qg),t(Fe,ai),t(ai,Vg),t(Fe,ew),t(x,tw),t(x,si),t(si,ow),d(e,Zh,l),d(e,vn,l),t(vn,rw),d(e,Kh,l),d(e,He,l),t(He,ft),t(ft,ii),c(Po,ii,null),t(He,nw),t(He,li),t(li,aw),d(e,Qh,l),d(e,mt,l),t(mt,di),t(di,Io),t(Io,sw),t(Io,Ao),t(Ao,iw),t(Io,lw),t(mt,dw),t(mt,hi),t(hi,No),t(No,hw),t(No,fi),t(fi,fw),t(No,mw),d(e,Vh,l),c(Mo,e,l),d(e,ef,l),d(e,Bo,l),t(Bo,mi),t(mi,pw),d(e,tf,l),c(Lo,e,l),d(e,of,l),d(e,bn,l),t(bn,uw),d(e,rf,l),c(jo,e,l),d(e,nf,l),d(e,Oo,l),t(Oo,We),t(We,cw),t(We,pi),t(pi,yw),t(We,gw),t(We,xo),t(xo,ww),t(We,vw),d(e,af,l),d(e,Co,l),t(Co,ui),t(ui,bw),t(Co,_w),d(e,sf,l),d(e,qo,l),t(qo,So),t(So,Ew),t(So,ci),t(ci,kw),t(So,Tw),d(e,lf,l),c(Ro,e,l),d(e,df,l),d(e,pt,l),t(pt,$w),t(pt,yi),t(yi,Pw),t(pt,Iw),d(e,hf,l),d(e,Ge,l),t(Ge,ut),t(ut,gi),c(Do,gi,null),t(Ge,Aw),t(Ge,wi),t(wi,Nw),d(e,ff,l),d(e,N,l),t(N,Mw),t(N,vi),t(vi,Bw),t(N,Lw),t(N,bi),t(bi,jw),t(N,Ow),t(N,_i),t(_i,xw),t(N,Cw),t(N,Ei),t(Ei,qw),t(N,Sw),t(N,ki),t(ki,Rw),t(N,Dw),d(e,mf,l),d(e,_n,l),t(_n,zw),d(e,pf,l),d(e,ct,l),t(ct,Fw),t(ct,Ti),t(Ti,Hw),t(ct,Ww),d(e,uf,l),d(e,M,l),t(M,$i),t($i,Gw),t(M,Uw),t(M,Pi),t(Pi,Yw),t(M,Jw),t(M,Ii),t(Ii,Xw),t(M,Zw),t(M,Ai),t(Ai,Kw),t(M,Qw),t(M,F),t(F,Vw),t(F,Ni),t(Ni,ev),t(F,tv),t(F,Mi),t(Mi,ov),t(F,rv),t(F,Bi),t(Bi,nv),t(F,av),t(F,Li),t(Li,sv),t(F,iv),t(M,lv),t(M,Ue),t(Ue,dv),t(Ue,ji),t(ji,hv),t(Ue,fv),t(Ue,Oi),t(Oi,mv),t(Ue,pv),d(e,cf,l),d(e,yt,l),t(yt,uv),t(yt,xi),t(xi,cv),t(yt,yv),d(e,yf,l),d(e,En,l),t(En,gv),d(e,gf,l),d(e,kn,l),t(kn,wv),d(e,wf,l),d(e,gt,l),t(gt,zo),t(zo,Fo),t(Fo,vv),t(zo,bv),t(zo,Ho),t(Ho,_v),t(gt,Ev),t(gt,Ci),t(Ci,kv),d(e,vf,l),d(e,Tn,l),t(Tn,Tv),d(e,bf,l),d(e,wt,l),t(wt,$v),t(wt,qi),t(qi,Pv),t(wt,Iv),d(e,_f,l),d(e,vt,l),t(vt,Av),t(vt,Si),t(Si,Nv),t(vt,Mv),d(e,Ef,l),c(Wo,e,l),d(e,kf,l),d(e,$n,l),t($n,Bv),d(e,Tf,l),d(e,bt,l),t(bt,Ri),t(Ri,Lv),t(bt,jv),t(bt,Ye),t(Ye,Ov),t(Ye,Di),t(Di,xv),t(Ye,Cv),t(Ye,zi),t(zi,qv),t(Ye,Sv),d(e,$f,l),d(e,Pn,l),t(Pn,Rv),d(e,Pf,l),d(e,_t,l),t(_t,Dv),t(_t,Fi),t(Fi,zv),t(_t,Fv),d(e,If,l),d(e,U,l),t(U,Hi),t(Hi,Hv),t(U,Wv),t(U,Wi),t(Wi,Gv),t(U,Uv),t(U,Gi),t(Gi,Yv),t(U,Jv),t(U,Ui),t(Ui,Xv),d(e,Af,l),d(e,Go,l),t(Go,Uo),t(Uo,Zv),t(Go,Kv),d(e,Nf,l),d(e,Et,l),t(Et,Qv),t(Et,Yo),t(Yo,Vv),t(Et,eb),d(e,Mf,l),d(e,In,l),t(In,tb),d(e,Bf,l),d(e,An,l),t(An,ob),d(e,Lf,l),d(e,B,l),t(B,Yi),t(Yi,rb),t(B,nb),t(B,Ji),t(Ji,ab),t(B,sb),t(B,Xi),t(Xi,ib),t(B,lb),t(B,Zi),t(Zi,db),t(B,hb),t(B,Ki),t(Ki,fb),t(B,mb),t(B,Qi),t(Qi,pb),d(e,jf,l),d(e,Je,l),t(Je,ub),t(Je,Vi),t(Vi,cb),t(Je,yb),t(Je,el),t(el,gb),d(e,Of,l),d(e,Nn,l),t(Nn,wb),d(e,xf,l),c(Jo,e,l),d(e,Cf,l),d(e,ge,l),t(ge,vb),t(ge,tl),t(tl,bb),t(ge,_b),t(ge,ol),t(ol,Eb),t(ge,kb),d(e,qf,l),d(e,C,l),t(C,H),t(H,Tb),t(H,Xo),t(Xo,$b),t(H,Pb),t(H,rl),t(rl,Ib),t(H,Ab),t(H,nl),t(nl,Nb),t(H,Mb),t(H,Zo),t(Zo,Bb),t(H,Lb),t(C,jb),t(C,al),t(al,Ob),t(C,xb),t(C,T),t(T,Cb),t(T,sl),t(sl,qb),t(T,Sb),t(T,il),t(il,Rb),t(T,Db),t(T,ll),t(ll,zb),t(T,Fb),t(T,dl),t(dl,Hb),t(T,Wb),t(T,hl),t(hl,Gb),t(T,Ub),t(T,fl),t(fl,Yb),t(T,Jb),t(T,ml),t(ml,Xb),t(T,Zb),t(T,pl),t(pl,Kb),t(T,Qb),t(T,ul),t(ul,Vb),t(T,e_),t(T,cl),t(cl,t_),t(T,o_),t(C,r_),t(C,Ko),t(Ko,n_),t(Ko,yl),t(yl,a_),t(Ko,s_),t(C,i_),t(C,me),t(me,l_),t(me,gl),t(gl,d_),t(me,h_),t(me,wl),t(wl,f_),t(me,m_),t(me,vl),t(vl,p_),t(me,u_),d(e,Sf,l),d(e,kt,l),t(kt,c_),t(kt,bl),t(bl,y_),t(kt,g_),d(e,Rf,l),d(e,Xe,l),t(Xe,Tt),t(Tt,_l),c(Qo,_l,null),t(Xe,w_),t(Xe,El),t(El,v_),d(e,Df,l),d(e,Mn,l),t(Mn,b_),d(e,zf,l),c(Vo,e,l),d(e,Ff,l),d(e,$t,l),t($t,__),t($t,Bn),t(Bn,E_),t($t,k_),d(e,Hf,l),d(e,Ln,l),t(Ln,T_),d(e,Wf,l),d(e,jn,l),t(jn,kl),t(kl,$_),d(e,Gf,l),d(e,q,l),t(q,P_),t(q,er),t(er,I_),t(q,A_),t(q,Tl),t(Tl,N_),t(q,M_),t(q,$l),t($l,B_),t(q,L_),t(q,tr),t(tr,j_),t(q,O_),d(e,Uf,l),d(e,On,l),t(On,Pl),t(Pl,x_),d(e,Yf,l),d(e,we,l),t(we,C_),t(we,Il),t(Il,q_),t(we,S_),t(we,Al),t(Al,R_),t(we,D_),d(e,Jf,l),d(e,xn,l),t(xn,z_),d(e,Xf,l),d(e,Cn,l),t(Cn,Nl),t(Nl,F_),d(e,Zf,l),c(or,e,l),d(e,Kf,l),d(e,rr,l),t(rr,Ml),t(Ml,H_),d(e,Qf,l),c(nr,e,l),d(e,Vf,l),d(e,ar,l),t(ar,Bl),t(Bl,W_),d(e,em,l),c(sr,e,l),d(e,tm,l),d(e,ir,l),t(ir,Ll),t(Ll,G_),d(e,om,l),c(lr,e,l),d(e,rm,l),d(e,Ze,l),t(Ze,jl),t(jl,Ol),t(Ol,U_),t(Ze,Y_),t(Ze,xl),t(xl,Cl),t(Cl,J_),d(e,nm,l),d(e,qn,l),t(qn,X_),d(e,am,l),c(dr,e,l),d(e,sm,l),d(e,Sn,l),t(Sn,Z_),d(e,im,l),d(e,Rn,l),t(Rn,K_),d(e,lm,l),d(e,Dn,l),t(Dn,Q_),d(e,dm,l),d(e,zn,l),t(zn,ql),t(ql,V_),d(e,hm,l),d(e,ve,l),t(ve,e1),t(ve,Sl),t(Sl,t1),t(ve,o1),t(ve,Rl),t(Rl,r1),t(ve,n1),d(e,fm,l),d(e,Y,l),t(Y,a1),t(Y,Dl),t(Dl,s1),t(Y,i1),t(Y,zl),t(zl,l1),t(Y,d1),t(Y,Fl),t(Fl,h1),t(Y,f1),d(e,mm,l),d(e,j,l),t(j,Hl),t(Hl,m1),t(j,p1),t(j,Wl),t(Wl,u1),t(j,c1),t(j,Gl),t(Gl,y1),t(j,g1),t(j,Ul),t(Ul,w1),t(j,v1),t(j,Yl),t(Yl,b1),t(j,_1),d(e,pm,l),c(hr,e,l),d(e,um,l),d(e,be,l),t(be,E1),t(be,Jl),t(Jl,k1),t(be,T1),t(be,Xl),t(Xl,$1),t(be,P1),d(e,cm,l),d(e,Fn,l),t(Fn,Zl),t(Zl,I1),d(e,ym,l),d(e,J,l),t(J,A1),t(J,Kl),t(Kl,N1),t(J,M1),t(J,Ql),t(Ql,B1),t(J,L1),t(J,Vl),t(Vl,j1),t(J,O1),d(e,gm,l),d(e,Pt,l),t(Pt,Hn),t(Hn,x1),t(Hn,fr),t(fr,C1),t(Pt,q1),t(Pt,Wn),t(Wn,S1),t(Wn,mr),t(mr,R1),d(e,wm,l),d(e,It,l),t(It,D1),t(It,ed),t(ed,z1),t(It,F1),d(e,vm,l),c(pr,e,l),d(e,bm,l),d(e,X,l),t(X,H1),t(X,td),t(td,W1),t(X,G1),t(X,od),t(od,U1),t(X,Y1),t(X,rd),t(rd,J1),t(X,X1),d(e,_m,l),c(ur,e,l),d(e,Em,l),d(e,Gn,l),t(Gn,Z1),d(e,km,l),c(cr,e,l),d(e,Tm,l),d(e,Un,l),t(Un,K1),d(e,$m,l),c(yr,e,l),d(e,Pm,l),d(e,Yn,l),t(Yn,Q1),d(e,Im,l),c(gr,e,l),d(e,Am,l),d(e,wr,l),t(wr,V1),t(wr,nd),t(nd,e0),d(e,Nm,l),c(vr,e,l),d(e,Mm,l),d(e,_e,l),t(_e,t0),t(_e,ad),t(ad,o0),t(_e,r0),t(_e,sd),t(sd,n0),t(_e,a0),d(e,Bm,l),c(br,e,l),d(e,Lm,l),d(e,_r,l),t(_r,s0),t(_r,id),t(id,i0),d(e,jm,l),c(Er,e,l),d(e,Om,l),d(e,Jn,l),t(Jn,l0),d(e,xm,l),d(e,At,l),t(At,d0),t(At,ld),t(ld,h0),t(At,f0),d(e,Cm,l),d(e,Z,l),t(Z,m0),t(Z,dd),t(dd,p0),t(Z,u0),t(Z,hd),t(hd,c0),t(Z,y0),t(Z,fd),t(fd,g0),t(Z,w0),d(e,qm,l),d(e,K,l),t(K,v0),t(K,md),t(md,b0),t(K,_0),t(K,pd),t(pd,E0),t(K,k0),t(K,ud),t(ud,T0),t(K,$0),d(e,Sm,l),c(kr,e,l),d(e,Rm,l),d(e,Xn,l),t(Xn,cd),t(cd,P0),d(e,Dm,l),d(e,Nt,l),t(Nt,I0),t(Nt,Zn),t(Zn,A0),t(Nt,N0),d(e,zm,l),c(Tr,e,l),d(e,Fm,l),d(e,S,l),t(S,M0),t(S,yd),t(yd,B0),t(S,L0),t(S,gd),t(gd,j0),t(S,O0),t(S,wd),t(wd,x0),t(S,C0),t(S,vd),t(vd,q0),t(S,S0),d(e,Hm,l),d(e,Q,l),t(Q,R0),t(Q,bd),t(bd,D0),t(Q,z0),t(Q,_d),t(_d,F0),t(Q,H0),t(Q,Ed),t(Ed,W0),t(Q,G0),d(e,Wm,l),d(e,V,l),t(V,Ke),t(Ke,U0),t(Ke,kd),t(kd,Y0),t(Ke,J0),t(Ke,Td),t(Td,X0),t(Ke,Z0),t(V,K0),t(V,$d),t($d,Q0),t(V,V0),t(V,Pd),t(Pd,e2),t(V,t2),t(V,ee),t(ee,o2),t(ee,Id),t(Id,r2),t(ee,n2),t(ee,Ad),t(Ad,a2),t(ee,s2),t(ee,Nd),t(Nd,i2),t(ee,l2),t(ee,$r),t($r,d2),d(e,Gm,l),d(e,Ee,l),t(Ee,h2),t(Ee,Md),t(Md,f2),t(Ee,m2),t(Ee,Bd),t(Bd,p2),t(Ee,u2),d(e,Um,l),d(e,Mt,l),t(Mt,c2),t(Mt,Ld),t(Ld,y2),t(Mt,g2),d(e,Ym,l),d(e,Kn,l),t(Kn,jd),t(jd,w2),d(e,Jm,l),d(e,Bt,l),t(Bt,v2),t(Bt,Od),t(Od,b2),t(Bt,_2),d(e,Xm,l),c(Pr,e,l),d(e,Zm,l),d(e,Qn,l),t(Qn,E2),d(e,Km,l),d(e,Lt,l),t(Lt,Vn),t(Vn,k2),t(Vn,xd),t(xd,T2),t(Lt,$2),t(Lt,Cd),t(Cd,P2),d(e,Qm,l),d(e,jt,l),t(jt,I2),t(jt,qd),t(qd,A2),t(jt,N2),d(e,Vm,l),c(Ir,e,l),d(e,ep,l),c(Ot,e,l),d(e,tp,l),d(e,te,l),t(te,M2),t(te,Sd),t(Sd,B2),t(te,L2),t(te,Rd),t(Rd,j2),t(te,O2),t(te,Dd),t(Dd,x2),t(te,C2),d(e,op,l),d(e,xt,l),t(xt,Ar),t(Ar,q2),t(Ar,zd),t(zd,S2),t(Ar,R2),t(xt,D2),t(xt,Fd),t(Fd,z2),d(e,rp,l),d(e,ea,l),t(ea,Hd),t(Hd,F2),d(e,np,l),d(e,Ct,l),t(Ct,H2),t(Ct,Wd),t(Wd,W2),t(Ct,G2),d(e,ap,l),d(e,ta,l),t(ta,U2),d(e,sp,l),d(e,oa,l),t(oa,Y2),d(e,ip,l),c(Nr,e,l),d(e,lp,l),d(e,qt,l),t(qt,J2),t(qt,Gd),t(Gd,X2),t(qt,Z2),d(e,dp,l),c(Mr,e,l),d(e,hp,l),d(e,St,l),t(St,K2),t(St,Ud),t(Ud,Q2),t(St,V2),d(e,fp,l),d(e,ke,l),t(ke,eE),t(ke,Yd),t(Yd,tE),t(ke,oE),t(ke,Jd),t(Jd,rE),t(ke,nE),d(e,mp,l),d(e,ra,l),t(ra,Xd),t(Xd,aE),d(e,pp,l),d(e,oe,l),t(oe,sE),t(oe,Zd),t(Zd,iE),t(oe,lE),t(oe,Kd),t(Kd,dE),t(oe,hE),t(oe,Qd),t(Qd,fE),t(oe,mE),d(e,up,l),d(e,na,l),t(na,Vd),t(Vd,pE),d(e,cp,l),d(e,re,l),t(re,uE),t(re,eh),t(eh,cE),t(re,yE),t(re,th),t(th,gE),t(re,wE),t(re,oh),t(oh,vE),t(re,bE),d(e,yp,l),d(e,Te,l),t(Te,_E),t(Te,rh),t(rh,EE),t(Te,kE),t(Te,aa),t(aa,TE),t(Te,$E),d(e,gp,l),d(e,sa,l),t(sa,nh),t(nh,PE),d(e,wp,l),d(e,Rt,l),t(Rt,IE),t(Rt,ah),t(ah,AE),t(Rt,NE),d(e,vp,l),c(Br,e,l),d(e,bp,l),d(e,ia,l),t(ia,ME),d(e,_p,l),c(Lr,e,l),d(e,Ep,l),d(e,la,l),t(la,BE),d(e,kp,l),d(e,da,l),t(da,LE),d(e,Tp,l),d(e,ha,l),t(ha,jE),d(e,$p,l),d(e,fa,l),t(fa,sh),t(sh,OE),d(e,Pp,l),d(e,R,l),t(R,xE),t(R,ma),t(ma,CE),t(R,qE),t(R,ih),t(ih,SE),t(R,RE),t(R,lh),t(lh,DE),t(R,zE),t(R,dh),t(dh,FE),t(R,HE),d(e,Ip,l),c(jr,e,l),d(e,Ap,l),d(e,Dt,l),t(Dt,WE),t(Dt,hh),t(hh,GE),t(Dt,UE),d(e,Np,l),d(e,pa,l),t(pa,fh),t(fh,YE),d(e,Mp,l),d(e,zt,l),t(zt,JE),t(zt,mh),t(mh,XE),t(zt,ZE),d(e,Bp,l),d(e,ua,l),t(ua,ph),t(ph,KE),d(e,Lp,l),d(e,ca,l),t(ca,QE),d(e,jp,l),d(e,Qe,l),t(Qe,Ft),t(Ft,uh),c(Or,uh,null),t(Qe,VE),t(Qe,ch),t(ch,e3),d(e,Op,l),d(e,ya,l),t(ya,t3),d(e,xp,l),d(e,ga,l),t(ga,yh),t(yh,o3),Cp=!0},p(e,[l]){const xr={};l&2&&(xr.$$scope={dirty:l,ctx:e}),Ot.$set(xr)},i(e){Cp||(y(D.$$.fragment,e),y(ho.$$.fragment,e),y(mo.$$.fragment,e),y(po.$$.fragment,e),y(uo.$$.fragment,e),y(co.$$.fragment,e),y(To.$$.fragment,e),y(Po.$$.fragment,e),y(Mo.$$.fragment,e),y(Lo.$$.fragment,e),y(jo.$$.fragment,e),y(Ro.$$.fragment,e),y(Do.$$.fragment,e),y(Wo.$$.fragment,e),y(Jo.$$.fragment,e),y(Qo.$$.fragment,e),y(Vo.$$.fragment,e),y(or.$$.fragment,e),y(nr.$$.fragment,e),y(sr.$$.fragment,e),y(lr.$$.fragment,e),y(dr.$$.fragment,e),y(hr.$$.fragment,e),y(pr.$$.fragment,e),y(ur.$$.fragment,e),y(cr.$$.fragment,e),y(yr.$$.fragment,e),y(gr.$$.fragment,e),y(vr.$$.fragment,e),y(br.$$.fragment,e),y(Er.$$.fragment,e),y(kr.$$.fragment,e),y(Tr.$$.fragment,e),y(Pr.$$.fragment,e),y(Ir.$$.fragment,e),y(Ot.$$.fragment,e),y(Nr.$$.fragment,e),y(Mr.$$.fragment,e),y(Br.$$.fragment,e),y(Lr.$$.fragment,e),y(jr.$$.fragment,e),y(Or.$$.fragment,e),Cp=!0)},o(e){g(D.$$.fragment,e),g(ho.$$.fragment,e),g(mo.$$.fragment,e),g(po.$$.fragment,e),g(uo.$$.fragment,e),g(co.$$.fragment,e),g(To.$$.fragment,e),g(Po.$$.fragment,e),g(Mo.$$.fragment,e),g(Lo.$$.fragment,e),g(jo.$$.fragment,e),g(Ro.$$.fragment,e),g(Do.$$.fragment,e),g(Wo.$$.fragment,e),g(Jo.$$.fragment,e),g(Qo.$$.fragment,e),g(Vo.$$.fragment,e),g(or.$$.fragment,e),g(nr.$$.fragment,e),g(sr.$$.fragment,e),g(lr.$$.fragment,e),g(dr.$$.fragment,e),g(hr.$$.fragment,e),g(pr.$$.fragment,e),g(ur.$$.fragment,e),g(cr.$$.fragment,e),g(yr.$$.fragment,e),g(gr.$$.fragment,e),g(vr.$$.fragment,e),g(br.$$.fragment,e),g(Er.$$.fragment,e),g(kr.$$.fragment,e),g(Tr.$$.fragment,e),g(Pr.$$.fragment,e),g(Ir.$$.fragment,e),g(Ot.$$.fragment,e),g(Nr.$$.fragment,e),g(Mr.$$.fragment,e),g(Br.$$.fragment,e),g(Lr.$$.fragment,e),g(jr.$$.fragment,e),g(Or.$$.fragment,e),Cp=!1},d(e){o(P),e&&o(ue),e&&o(I),w(D),e&&o(so),e&&o(z),e&&o(wh),e&&o(et),e&&o(vh),e&&o(Cr),e&&o(bh),e&&o(W),e&&o(_h),e&&o(tt),e&&o(Eh),e&&o(qr),e&&o(kh),e&&o(xe),w(ho),e&&o(Th),e&&o(Sr),e&&o($h),e&&o(rt),e&&o(Ph),e&&o(ce),e&&o(Ih),e&&o(nt),e&&o(Ah),e&&o(Dr),e&&o(Nh),e&&o(Ce),w(mo),e&&o(Mh),e&&o(G),e&&o(Bh),e&&o(Hr),e&&o(Lh),e&&o(Wr),e&&o(jh),e&&o(v),e&&o(Oh),w(po,e),e&&o(xh),e&&o(A),e&&o(Ch),e&&o(qe),w(uo),e&&o(qh),e&&o(Vr),e&&o(Sh),e&&o(Se),w(co),e&&o(Rh),e&&o(en),e&&o(Dh),e&&o(lt),e&&o(zh),e&&o(tn),e&&o(Fh),e&&o(ye),e&&o(Hh),e&&o(on),e&&o(Wh),e&&o(rn),e&&o(Gh),e&&o(b),e&&o(Uh),e&&o(O),e&&o(Yh),e&&o(Re),w(To),e&&o(Jh),e&&o(ht),e&&o(Xh),e&&o(x),e&&o(Zh),e&&o(vn),e&&o(Kh),e&&o(He),w(Po),e&&o(Qh),e&&o(mt),e&&o(Vh),w(Mo,e),e&&o(ef),e&&o(Bo),e&&o(tf),w(Lo,e),e&&o(of),e&&o(bn),e&&o(rf),w(jo,e),e&&o(nf),e&&o(Oo),e&&o(af),e&&o(Co),e&&o(sf),e&&o(qo),e&&o(lf),w(Ro,e),e&&o(df),e&&o(pt),e&&o(hf),e&&o(Ge),w(Do),e&&o(ff),e&&o(N),e&&o(mf),e&&o(_n),e&&o(pf),e&&o(ct),e&&o(uf),e&&o(M),e&&o(cf),e&&o(yt),e&&o(yf),e&&o(En),e&&o(gf),e&&o(kn),e&&o(wf),e&&o(gt),e&&o(vf),e&&o(Tn),e&&o(bf),e&&o(wt),e&&o(_f),e&&o(vt),e&&o(Ef),w(Wo,e),e&&o(kf),e&&o($n),e&&o(Tf),e&&o(bt),e&&o($f),e&&o(Pn),e&&o(Pf),e&&o(_t),e&&o(If),e&&o(U),e&&o(Af),e&&o(Go),e&&o(Nf),e&&o(Et),e&&o(Mf),e&&o(In),e&&o(Bf),e&&o(An),e&&o(Lf),e&&o(B),e&&o(jf),e&&o(Je),e&&o(Of),e&&o(Nn),e&&o(xf),w(Jo,e),e&&o(Cf),e&&o(ge),e&&o(qf),e&&o(C),e&&o(Sf),e&&o(kt),e&&o(Rf),e&&o(Xe),w(Qo),e&&o(Df),e&&o(Mn),e&&o(zf),w(Vo,e),e&&o(Ff),e&&o($t),e&&o(Hf),e&&o(Ln),e&&o(Wf),e&&o(jn),e&&o(Gf),e&&o(q),e&&o(Uf),e&&o(On),e&&o(Yf),e&&o(we),e&&o(Jf),e&&o(xn),e&&o(Xf),e&&o(Cn),e&&o(Zf),w(or,e),e&&o(Kf),e&&o(rr),e&&o(Qf),w(nr,e),e&&o(Vf),e&&o(ar),e&&o(em),w(sr,e),e&&o(tm),e&&o(ir),e&&o(om),w(lr,e),e&&o(rm),e&&o(Ze),e&&o(nm),e&&o(qn),e&&o(am),w(dr,e),e&&o(sm),e&&o(Sn),e&&o(im),e&&o(Rn),e&&o(lm),e&&o(Dn),e&&o(dm),e&&o(zn),e&&o(hm),e&&o(ve),e&&o(fm),e&&o(Y),e&&o(mm),e&&o(j),e&&o(pm),w(hr,e),e&&o(um),e&&o(be),e&&o(cm),e&&o(Fn),e&&o(ym),e&&o(J),e&&o(gm),e&&o(Pt),e&&o(wm),e&&o(It),e&&o(vm),w(pr,e),e&&o(bm),e&&o(X),e&&o(_m),w(ur,e),e&&o(Em),e&&o(Gn),e&&o(km),w(cr,e),e&&o(Tm),e&&o(Un),e&&o($m),w(yr,e),e&&o(Pm),e&&o(Yn),e&&o(Im),w(gr,e),e&&o(Am),e&&o(wr),e&&o(Nm),w(vr,e),e&&o(Mm),e&&o(_e),e&&o(Bm),w(br,e),e&&o(Lm),e&&o(_r),e&&o(jm),w(Er,e),e&&o(Om),e&&o(Jn),e&&o(xm),e&&o(At),e&&o(Cm),e&&o(Z),e&&o(qm),e&&o(K),e&&o(Sm),w(kr,e),e&&o(Rm),e&&o(Xn),e&&o(Dm),e&&o(Nt),e&&o(zm),w(Tr,e),e&&o(Fm),e&&o(S),e&&o(Hm),e&&o(Q),e&&o(Wm),e&&o(V),e&&o(Gm),e&&o(Ee),e&&o(Um),e&&o(Mt),e&&o(Ym),e&&o(Kn),e&&o(Jm),e&&o(Bt),e&&o(Xm),w(Pr,e),e&&o(Zm),e&&o(Qn),e&&o(Km),e&&o(Lt),e&&o(Qm),e&&o(jt),e&&o(Vm),w(Ir,e),e&&o(ep),w(Ot,e),e&&o(tp),e&&o(te),e&&o(op),e&&o(xt),e&&o(rp),e&&o(ea),e&&o(np),e&&o(Ct),e&&o(ap),e&&o(ta),e&&o(sp),e&&o(oa),e&&o(ip),w(Nr,e),e&&o(lp),e&&o(qt),e&&o(dp),w(Mr,e),e&&o(hp),e&&o(St),e&&o(fp),e&&o(ke),e&&o(mp),e&&o(ra),e&&o(pp),e&&o(oe),e&&o(up),e&&o(na),e&&o(cp),e&&o(re),e&&o(yp),e&&o(Te),e&&o(gp),e&&o(sa),e&&o(wp),e&&o(Rt),e&&o(vp),w(Br,e),e&&o(bp),e&&o(ia),e&&o(_p),w(Lr,e),e&&o(Ep),e&&o(la),e&&o(kp),e&&o(da),e&&o(Tp),e&&o(ha),e&&o($p),e&&o(fa),e&&o(Pp),e&&o(R),e&&o(Ip),w(jr,e),e&&o(Ap),e&&o(Dt),e&&o(Np),e&&o(pa),e&&o(Mp),e&&o(zt),e&&o(Bp),e&&o(ua),e&&o(Lp),e&&o(ca),e&&o(jp),e&&o(Qe),w(Or),e&&o(Op),e&&o(ya),e&&o(xp),e&&o(ga)}}}const y9={local:"how-to-add-a-model-to-transformers",sections:[{local:"general-overview-of-transformers",sections:[{local:"overview-of-models",title:"Overview of models"},{local:"overview-of-tokenizers",title:"Overview of tokenizers"}],title:"General overview of \u{1F917} Transformers"},{local:"stepbystep-recipe-to-add-a-model-to-transformers",sections:[{local:"1-optional-theoretical-aspects-of-brandnewbert",title:"1. (Optional) Theoretical aspects of BrandNewBert"},{local:"2-next-prepare-your-environment",title:"2. Next prepare your environment"},{local:"34-run-a-pretrained-checkpoint-using-the-original-repository",title:"3.-4. Run a pretrained checkpoint using the original repository"},{local:"514-port-brandnewbert-to-transformers",title:"5.-14. Port BrandNewBert to \u{1F917} Transformers"},{local:"share-your-work",title:"Share your work!!"}],title:"Step-by-step recipe to add a model to \u{1F917} Transformers"}],title:"How to add a model to \u{1F917} Transformers?"};function g9(Ca,P,ue){let{fw:I}=P;return Ca.$$set=L=>{"fw"in L&&ue(0,I=L.fw)},[I]}class k9 extends l9{constructor(P){super();d9(this,P,g9,c9,h9,{fw:0})}}export{k9 as default,y9 as metadata};
274
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/quicktour.mdx-8a4295b9.js
import{S as Yf,i as Gf,s as Qf,e as n,k as f,w as d,t as a,M as Jf,c as l,d as s,m as u,a as i,x as _,h as r,b as h,F as t,g as p,y as g,q as v,o as y,B as $}from"../chunks/vendor-4833417e.js";import{T as ba}from"../chunks/Tip-fffd6df1.js";import{Y as Bf}from"../chunks/Youtube-27813aed.js";import{I as Ne}from"../chunks/IconCopyLink-4b81c553.js";import{C as D}from"../chunks/CodeBlock-6a3d1b46.js";import{C as pe}from"../chunks/CodeBlockFw-27a176a0.js";import{D as Kf}from"../chunks/DocNotebookDropdown-ecff2a90.js";import"../chunks/CopyButton-dacfbfaf.js";function Vf(N){let m,k;return{c(){m=n("p"),k=a(`All code examples presented in the documentation have a toggle on the top left for PyTorch and TensorFlow. If not, the code is expected to work for both backends without any change.`)},l(c){m=l(c,"P",{});var w=i(m);k=r(w,`All code examples presented in the documentation have a toggle on the top left for PyTorch and TensorFlow. If not, the code is expected to work for both backends without any change.`),w.forEach(s)},m(c,w){p(c,m,w),t(m,k)},d(c){c&&s(m)}}}function Zf(N){let m,k,c,w,A,b,j,x;return{c(){m=n("p"),k=a("For more details about the "),c=n("a"),w=a("pipeline()"),A=a(" and associated tasks, refer to the documentation "),b=n("a"),j=a("here"),x=a("."),this.h()},l(S){m=l(S,"P",{});var E=i(m);k=r(E,"For more details about the "),c=l(E,"A",{href:!0});var O=i(c);w=r(O,"pipeline()"),O.forEach(s),A=r(E," and associated tasks, refer to the documentation "),b=l(E,"A",{href:!0});var R=i(b);j=r(R,"here"),R.forEach(s),x=r(E,"."),E.forEach(s),this.h()},h(){h(c,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(b,"href","./main_classes/pipelines")},m(S,E){p(S,m,E),t(m,k),t(m,c),t(c,w),t(m,A),t(m,b),t(b,j),t(m,x)},d(S){S&&s(m)}}}function Xf(N){let m,k,c,w,A,b,j,x;return{c(){m=n("p"),k=a("See the "),c=n("a"),w=a("task summary"),A=a(" for which "),b=n("a"),j=a("AutoModel"),x=a(" class to use for which task."),this.h()},l(S){m=l(S,"P",{});var E=i(m);k=r(E,"See the "),c=l(E,"A",{href:!0});var O=i(c);w=r(O,"task summary"),O.forEach(s),A=r(E," for which "),b=l(E,"A",{href:!0});var R=i(b);j=r(R,"AutoModel"),R.forEach(s),x=r(E," class to use for which task."),E.forEach(s),this.h()},h(){h(c,"href","./task_summary"),h(b,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModel")},m(S,E){p(S,m,E),t(m,k),t(m,c),t(c,w),t(m,A),t(m,b),t(b,j),t(m,x)},d(S){S&&s(m)}}}function eu(N){let m,k,c,w,A;return{c(){m=n("p"),k=a("All \u{1F917} Transformers models (PyTorch or TensorFlow) outputs the tensors "),c=n("em"),w=a("before"),A=a(` the final activation function (like softmax) because the final activation function is often fused with the loss.`)},l(b){m=l(b,"P",{});var j=i(m);k=r(j,"All \u{1F917} Transformers models (PyTorch or TensorFlow) outputs the tensors "),c=l(j,"EM",{});var x=i(c);w=r(x,"before"),x.forEach(s),A=r(j,` the final activation function (like softmax) because the final activation function is often fused with the loss.`),j.forEach(s)},m(b,j){p(b,m,j),t(m,k),t(m,c),t(c,w),t(m,A)},d(b){b&&s(m)}}}function tu(N){let m,k,c,w,A;return{c(){m=n("p"),k=a(`\u{1F917} Transformers model outputs are special dataclasses so their attributes are autocompleted in an IDE. The model outputs also behave like a tuple or a dictionary (e.g., you can index with an integer, a slice or a string) in which case the attributes that are `),c=n("code"),w=a("None"),A=a(" are ignored.")},l(b){m=l(b,"P",{});var j=i(m);k=r(j,`\u{1F917} Transformers model outputs are special dataclasses so their attributes are autocompleted in an IDE. The model outputs also behave like a tuple or a dictionary (e.g., you can index with an integer, a slice or a string) in which case the attributes that are `),c=l(j,"CODE",{});var x=i(c);w=r(x,"None"),x.forEach(s),A=r(j," are ignored."),j.forEach(s)},m(b,j){p(b,m,j),t(m,k),t(m,c),t(c,w),t(m,A)},d(b){b&&s(m)}}}function su(N){let m,k,c,w,A,b,j,x,S,E,O,R,H,fo,Tt,uo,ho,xt,mo,co,wa,fe,ka,te,ue,Ts,Oe,_o,xs,go,ja,Le,qt,vo,yo,Ea,De,Aa,he,$o,zt,bo,wo,Ta,Re,qs,ko,jo,xa,T,zs,Eo,Ao,Fs,To,xo,Ps,qo,zo,Ss,Fo,Po,Ms,So,Mo,Cs,Co,Io,Is,No,Oo,Ns,Lo,qa,He,Os,Do,Ro,za,U,Ls,Ho,Uo,Ds,Wo,Bo,Rs,Yo,Fa,Ue,Hs,Go,Qo,Pa,me,Us,Jo,Ko,Ws,Vo,Sa,ce,Ma,se,de,Bs,We,Zo,Ys,Xo,Ca,_e,en,Ft,tn,sn,Ia,Pt,an,Na,Be,Oa,ge,rn,St,on,nn,La,Ye,Da,W,ln,Ge,pn,fn,Gs,un,hn,Ra,Qe,Ha,ve,mn,Mt,cn,dn,Ua,Je,Wa,B,_n,Ct,gn,vn,Ke,yn,$n,Ba,Ve,Ya,ye,bn,It,wn,kn,Ga,Ze,Qa,Y,jn,Xe,En,An,et,Tn,xn,Ja,tt,Ka,Nt,qn,Va,st,Za,$e,zn,Ot,Fn,Pn,Xa,ae,be,Qs,at,Sn,Js,Mn,er,M,Cn,Lt,In,Nn,rt,On,Ln,Dt,Dn,Rn,ot,Hn,Un,tr,nt,sr,G,Wn,Rt,Bn,Yn,Ks,Gn,Qn,ar,lt,rr,Q,Jn,Ht,Kn,Vn,Vs,Zn,Xn,or,it,nr,J,el,Ut,tl,sl,Wt,al,rl,lr,re,we,Zs,pt,ol,Xs,nl,ir,ft,pr,q,ll,Bt,il,pl,Yt,fl,ul,Gt,hl,ml,Qt,cl,dl,ea,_l,gl,Jt,vl,yl,fr,K,$l,ta,bl,wl,Kt,kl,jl,ur,oe,ke,sa,ut,El,aa,Al,hr,V,Tl,ra,xl,ql,Vt,zl,Fl,mr,je,Pl,Zt,Sl,Ml,cr,ht,dr,Ee,Cl,oa,Il,Nl,_r,Xt,Ol,gr,mt,vr,es,Ll,yr,Ae,ts,ss,Dl,Rl,Hl,as,rs,Ul,Wl,$r,Te,Bl,os,Yl,Gl,br,ct,wr,xe,Ql,ns,Jl,Kl,kr,ne,qe,na,dt,Vl,la,Zl,jr,F,Xl,ls,ei,ti,is,si,ai,ps,ri,oi,fs,ni,li,us,ii,pi,Er,_t,Ar,ze,Tr,Fe,fi,ia,ui,hi,xr,gt,qr,Z,mi,pa,ci,di,fa,_i,gi,zr,vt,Fr,Pe,Pr,z,vi,yt,ua,yi,$i,$t,ha,bi,wi,hs,ki,ji,ma,Ei,Ai,bt,Ti,xi,ms,qi,zi,Sr,Se,Mr,le,Me,ca,wt,Fi,da,Pi,Cr,Ce,Si,cs,Mi,Ci,Ir,kt,Nr,Ie,Ii,ds,Ni,Oi,Or,jt,Lr,X,Li,_a,Di,Ri,ga,Hi,Ui,Dr,Et,Rr;return b=new Ne({}),O=new Kf({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/quicktour.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/quicktour.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/pytorch/quicktour.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tensorflow/quicktour.ipynb"}]}}),fe=new ba({props:{$$slots:{default:[Vf]},$$scope:{ctx:N}}}),Oe=new Ne({}),De=new Bf({props:{id:"tiZFewofSLM"}}),ce=new ba({props:{$$slots:{default:[Zf]},$$scope:{ctx:N}}}),We=new Ne({}),Be=new pe({props:{group1:{id:"pt",code:"pip install torch",highlighted:"pip install torch"},group2:{id:"tf",code:"pip install tensorflow",highlighted:"pip install tensorflow"}}}),Ye=new D({props:{code:`from transformers import pipeline classifier = pipeline("sentiment-analysis")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>)`}}),Qe=new D({props:{code:'classifier("We are very happy to show you the \u{1F917} Transformers library.")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998</span>}]`}}),Je=new D({props:{code:`results = classifier(["We are very happy to show you the \u{1F917} Transformers library.", "We hope you don't hate it."]) for result in results: print(f"label: {result['label']}, with score: {round(result['score'], 4)}")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>results = classifier([<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> result <span class="hljs-keyword">in</span> results: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;label: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;label&#x27;</span>]}</span>, with score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>&quot;</span>) label: POSITIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.9998</span> label: NEGATIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.5309</span>`}}),Ve=new D({props:{code:"pip install datasets ",highlighted:"pip install datasets "}}),Ze=new D({props:{code:`import torch from transformers import pipeline speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>speech_recognizer = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>)`}}),tt=new D({props:{code:`import datasets dataset = datasets.load_dataset("superb", name="asr", split="test")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> datasets <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = datasets.load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, name=<span class="hljs-string">&quot;asr&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>)`}}),st=new D({props:{code:`files = dataset["file"] speech_recognizer(files[:4])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>files = dataset[<span class="hljs-string">&quot;file&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>speech_recognizer(files[:<span class="hljs-number">4</span>]) [{<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;HE HOPED THERE WOULD BE STEW FOR DINNER TURNIPS AND CARROTS AND BRUISED POTATOES AND FAT MUTTON PIECES TO BE LADLED OUT IN THICK PEPPERED FLOWER FAT AND SAUCE&#x27;</span>}, {<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;STUFFERED INTO YOU HIS BELLY COUNSELLED HIM&#x27;</span>}, {<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;AFTER EARLY NIGHTFALL THE YELLOW LAMPS WOULD LIGHT UP HERE AND THERE THE SQUALID QUARTER OF THE BROTHELS&#x27;</span>}, {<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;HO BERTIE ANY GOOD IN YOUR MIND&#x27;</span>}]`}}),at=new Ne({}),nt=new D({props:{code:'model_name = "nlptown/bert-base-multilingual-uncased-sentiment"',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span>'}}),lt=new pe({props:{group1:{id:"pt",code:`from transformers import AutoTokenizer, AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)`},group2:{id:"tf",code:`from transformers import AutoTokenizer, TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)`}}}),it=new D({props:{code:`classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) classifier("Nous sommes tr\xE8s heureux de vous pr\xE9senter la biblioth\xE8que \u{1F917} Transformers.")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>, model=model, tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;Nous sommes tr\xE8s heureux de vous pr\xE9senter la biblioth\xE8que \u{1F917} Transformers.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;5 stars&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.7273</span>}]`}}),pt=new Ne({}),ft=new Bf({props:{id:"AhChOFRegn4"}}),ut=new Ne({}),ht=new D({props:{code:`from transformers import AutoTokenizer model_name = "nlptown/bert-base-multilingual-uncased-sentiment" tokenizer = AutoTokenizer.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)`}}),mt=new D({props:{code:`encoding = tokenizer("We are very happy to show you the \u{1F917} Transformers library.") print(encoding)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">11312</span>, <span class="hljs-number">10320</span>, <span class="hljs-number">12495</span>, <span class="hljs-number">19308</span>, <span class="hljs-number">10114</span>, <span class="hljs-number">11391</span>, <span class="hljs-number">10855</span>, <span class="hljs-number">10103</span>, <span class="hljs-number">100</span>, <span class="hljs-number">58263</span>, <span class="hljs-number">13299</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}`}}),ct=new pe({props:{group1:{id:"pt",code:`pt_batch = tokenizer( ["We are very happy to show you the \u{1F917} Transformers library.", "We hope you don't hate it."], padding=True, truncation=True, max_length=512, return_tensors="pt", )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pt_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span>)`},group2:{id:"tf",code:`tf_batch = tokenizer( ["We are very happy to show you the \u{1F917} Transformers library.", "We hope you don't hate it."], padding=True, truncation=True, max_length=512, return_tensors="tf", )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span>)`}}}),dt=new Ne({}),_t=new pe({props:{group1:{id:"pt",code:`from transformers import AutoModelForSequenceClassification model_name = "nlptown/bert-base-multilingual-uncased-sentiment" pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)`},group2:{id:"tf",code:`from transformers import TFAutoModelForSequenceClassification model_name = "nlptown/bert-base-multilingual-uncased-sentiment" tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)`}}}),ze=new ba({props:{$$slots:{default:[Xf]},$$scope:{ctx:N}}}),gt=new pe({props:{group1:{id:"pt",code:"pt_outputs = pt_model(**pt_batch)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>pt_outputs = pt_model(**pt_batch)'},group2:{id:"tf",code:"tf_outputs = tf_model(tf_batch)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_outputs = tf_model(tf_batch)'}}}),vt=new pe({props:{group1:{id:"pt",code:`from torch import nn pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) print(pt_predictions)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-meta">&gt;&gt;&gt; </span>pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(pt_predictions) tensor([[<span class="hljs-number">0.0021</span>, <span class="hljs-number">0.0018</span>, <span class="hljs-number">0.0115</span>, <span class="hljs-number">0.2121</span>, <span class="hljs-number">0.7725</span>], [<span class="hljs-number">0.2084</span>, <span class="hljs-number">0.1826</span>, <span class="hljs-number">0.1969</span>, <span class="hljs-number">0.1755</span>, <span class="hljs-number">0.2365</span>]], grad_fn=&lt;SoftmaxBackward0&gt;)`},group2:{id:"tf",code:`import tensorflow as tf tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) print(tf.math.round(tf_predictions * 10**4) / 10**4)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tf.math.<span class="hljs-built_in">round</span>(tf_predictions * <span class="hljs-number">10</span>**<span class="hljs-number">4</span>) / <span class="hljs-number">10</span>**<span class="hljs-number">4</span>) tf.Tensor( [[<span class="hljs-number">0.0021</span> <span class="hljs-number">0.0018</span> <span class="hljs-number">0.0116</span> <span class="hljs-number">0.2121</span> <span class="hljs-number">0.7725</span>] [<span class="hljs-number">0.2084</span> <span class="hljs-number">0.1826</span> <span class="hljs-number">0.1969</span> <span class="hljs-number">0.1755</span> <span class="hljs-number">0.2365</span>]], shape=(<span class="hljs-number">2</span>, <span class="hljs-number">5</span>), dtype=float32)`}}}),Pe=new ba({props:{$$slots:{default:[eu]},$$scope:{ctx:N}}}),Se=new ba({props:{$$slots:{default:[tu]},$$scope:{ctx:N}}}),wt=new Ne({}),kt=new pe({props:{group1:{id:"pt",code:`pt_save_directory = "./pt_save_pretrained" tokenizer.save_pretrained(pt_save_directory) pt_model.save_pretrained(pt_save_directory)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pt_save_directory = <span class="hljs-string">&quot;./pt_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(pt_save_directory)`},group2:{id:"tf",code:`tf_save_directory = "./tf_save_pretrained" tokenizer.save_pretrained(tf_save_directory) tf_model.save_pretrained(tf_save_directory)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_save_directory = <span class="hljs-string">&quot;./tf_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(tf_save_directory)`}}}),jt=new pe({props:{group1:{id:"pt",code:'pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./pt_save_pretrained&quot;</span>)'},group2:{id:"tf",code:'tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./tf_save_pretrained&quot;</span>)'}}}),Et=new pe({props:{group1:{id:"pt",code:`from transformers import AutoModel tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=<span class="hljs-literal">True</span>)`},group2:{id:"tf",code:`from transformers import TFAutoModel tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=<span class="hljs-literal">True</span>)`}}}),{c(){m=n("meta"),k=f(),c=n("h1"),w=n("a"),A=n("span"),d(b.$$.fragment),j=f(),x=n("span"),S=a("Quick tour"),E=f(),d(O.$$.fragment),R=f(),H=n("p"),fo=a("Get up and running with \u{1F917} Transformers! Start using the "),Tt=n("a"),uo=a("pipeline()"),ho=a(" for rapid inference, and quickly load a pretrained model and tokenizer with an "),xt=n("a"),mo=a("AutoClass"),co=a(" to solve your text, vision or audio task."),wa=f(),d(fe.$$.fragment),ka=f(),te=n("h2"),ue=n("a"),Ts=n("span"),d(Oe.$$.fragment),_o=f(),xs=n("span"),go=a("Pipeline"),ja=f(),Le=n("p"),qt=n("a"),vo=a("pipeline()"),yo=a(" is the easiest way to use a pretrained model for a given task."),Ea=f(),d(De.$$.fragment),Aa=f(),he=n("p"),$o=a("The "),zt=n("a"),bo=a("pipeline()"),wo=a(" supports many common tasks out-of-the-box:"),Ta=f(),Re=n("p"),qs=n("strong"),ko=a("Text"),jo=a(":"),xa=f(),T=n("ul"),zs=n("li"),Eo=a("Sentiment analysis: classify the polarity of a given text."),Ao=f(),Fs=n("li"),To=a("Text generation (in English): generate text from a given input."),xo=f(),Ps=n("li"),qo=a("Name entity recognition (NER): label each word with the entity it represents (person, date, location, etc.)."),zo=f(),Ss=n("li"),Fo=a("Question answering: extract the answer from the context, given some context and a question."),Po=f(),Ms=n("li"),So=a("Fill-mask: fill in the blank given a text with masked words."),Mo=f(),Cs=n("li"),Co=a("Summarization: generate a summary of a long sequence of text or document."),Io=f(),Is=n("li"),No=a("Translation: translate text into another language."),Oo=f(),Ns=n("li"),Lo=a("Feature extraction: create a tensor representation of the text."),qa=f(),He=n("p"),Os=n("strong"),Do=a("Image"),Ro=a(":"),za=f(),U=n("ul"),Ls=n("li"),Ho=a("Image classification: classify an image."),Uo=f(),Ds=n("li"),Wo=a("Image segmentation: classify every pixel in an image."),Bo=f(),Rs=n("li"),Yo=a("Object detection: detect objects within an image."),Fa=f(),Ue=n("p"),Hs=n("strong"),Go=a("Audio"),Qo=a(":"),Pa=f(),me=n("ul"),Us=n("li"),Jo=a("Audio classification: assign a label to a given segment of audio."),Ko=f(),Ws=n("li"),Vo=a("Automatic speech recognition (ASR): transcribe audio data into text."),Sa=f(),d(ce.$$.fragment),Ma=f(),se=n("h3"),de=n("a"),Bs=n("span"),d(We.$$.fragment),Zo=f(),Ys=n("span"),Xo=a("Pipeline usage"),Ca=f(),_e=n("p"),en=a("In the following example, you will use the "),Ft=n("a"),tn=a("pipeline()"),sn=a(" for sentiment analysis."),Ia=f(),Pt=n("p"),an=a("Install the following dependencies if you haven\u2019t already:"),Na=f(),d(Be.$$.fragment),Oa=f(),ge=n("p"),rn=a("Import "),St=n("a"),on=a("pipeline()"),nn=a(" and specify the task you want to complete:"),La=f(),d(Ye.$$.fragment),Da=f(),W=n("p"),ln=a("The pipeline downloads and caches a default "),Ge=n("a"),pn=a("pretrained model"),fn=a(" and tokenizer for sentiment analysis. Now you can use the "),Gs=n("code"),un=a("classifier"),hn=a(" on your target text:"),Ra=f(),d(Qe.$$.fragment),Ha=f(),ve=n("p"),mn=a("For more than one sentence, pass a list of sentences to the "),Mt=n("a"),cn=a("pipeline()"),dn=a(" which returns a list of dictionaries:"),Ua=f(),d(Je.$$.fragment),Wa=f(),B=n("p"),_n=a("The "),Ct=n("a"),gn=a("pipeline()"),vn=a(" can also iterate over an entire dataset. Start by installing the "),Ke=n("a"),yn=a("\u{1F917} Datasets"),$n=a(" library:"),Ba=f(),d(Ve.$$.fragment),Ya=f(),ye=n("p"),bn=a("Create a "),It=n("a"),wn=a("pipeline()"),kn=a(" with the task you want to solve for and the model you want to use."),Ga=f(),d(Ze.$$.fragment),Qa=f(),Y=n("p"),jn=a("Next, load a dataset (see the \u{1F917} Datasets "),Xe=n("a"),En=a("Quick Start"),An=a(" for more details) you\u2019d like to iterate over. For example, let\u2019s load the "),et=n("a"),Tn=a("SUPERB"),xn=a(" dataset:"),Ja=f(),d(tt.$$.fragment),Ka=f(),Nt=n("p"),qn=a("You can pass a whole dataset pipeline:"),Va=f(),d(st.$$.fragment),Za=f(),$e=n("p"),zn=a("For a larger dataset where the inputs are big (like in speech or vision), you will want to pass along a generator instead of a list that loads all the inputs in memory. See the "),Ot=n("a"),Fn=a("pipeline documentation"),Pn=a(" for more information."),Xa=f(),ae=n("h3"),be=n("a"),Qs=n("span"),d(at.$$.fragment),Sn=f(),Js=n("span"),Mn=a("Use another model and tokenizer in the pipeline"),er=f(),M=n("p"),Cn=a("The "),Lt=n("a"),In=a("pipeline()"),Nn=a(" can accommodate any model from the "),rt=n("a"),On=a("Model Hub"),Ln=a(", making it easy to adapt the "),Dt=n("a"),Dn=a("pipeline()"),Rn=a(" for other use-cases. For example, if you\u2019d like a model capable of handling French text, use the tags on the Model Hub to filter for an appropriate model. The top filtered result returns a multilingual "),ot=n("a"),Hn=a("BERT model"),Un=a(" fine-tuned for sentiment analysis. Great, let\u2019s use this model!"),tr=f(),d(nt.$$.fragment),sr=f(),G=n("p"),Wn=a("Use the "),Rt=n("a"),Bn=a("AutoModelForSequenceClassification"),Yn=a(" and [\u2018AutoTokenizer\u2019] to load the pretrained model and it\u2019s associated tokenizer (more on an "),Ks=n("code"),Gn=a("AutoClass"),Qn=a(" below):"),ar=f(),d(lt.$$.fragment),rr=f(),Q=n("p"),Jn=a("Then you can specify the model and tokenizer in the "),Ht=n("a"),Kn=a("pipeline()"),Vn=a(", and apply the "),Vs=n("code"),Zn=a("classifier"),Xn=a(" on your target text:"),or=f(),d(it.$$.fragment),nr=f(),J=n("p"),el=a("If you can\u2019t find a model for your use-case, you will need to fine-tune a pretrained model on your data. Take a look at our "),Ut=n("a"),tl=a("fine-tuning tutorial"),sl=a(" to learn how. Finally, after you\u2019ve fine-tuned your pretrained model, please consider sharing it (see tutorial "),Wt=n("a"),al=a("here"),rl=a(") with the community on the Model Hub to democratize NLP for everyone! \u{1F917}"),lr=f(),re=n("h2"),we=n("a"),Zs=n("span"),d(pt.$$.fragment),ol=f(),Xs=n("span"),nl=a("AutoClass"),ir=f(),d(ft.$$.fragment),pr=f(),q=n("p"),ll=a("Under the hood, the "),Bt=n("a"),il=a("AutoModelForSequenceClassification"),pl=a(" and "),Yt=n("a"),fl=a("AutoTokenizer"),ul=a(" classes work together to power the "),Gt=n("a"),hl=a("pipeline()"),ml=a(". An "),Qt=n("a"),cl=a("AutoClass"),dl=a(" is a shortcut that automatically retrieves the architecture of a pretrained model from it\u2019s name or path. You only need to select the appropriate "),ea=n("code"),_l=a("AutoClass"),gl=a(" for your task and it\u2019s associated tokenizer with "),Jt=n("a"),vl=a("AutoTokenizer"),yl=a("."),fr=f(),K=n("p"),$l=a("Let\u2019s return to our example and see how you can use the "),ta=n("code"),bl=a("AutoClass"),wl=a(" to replicate the results of the "),Kt=n("a"),kl=a("pipeline()"),jl=a("."),ur=f(),oe=n("h3"),ke=n("a"),sa=n("span"),d(ut.$$.fragment),El=f(),aa=n("span"),Al=a("AutoTokenizer"),hr=f(),V=n("p"),Tl=a("A tokenizer is responsible for preprocessing text into a format that is understandable to the model. First, the tokenizer will split the text into words called "),ra=n("em"),xl=a("tokens"),ql=a(". There are multiple rules that govern the tokenization process, including how to split a word and at what level (learn more about tokenization "),Vt=n("a"),zl=a("here"),Fl=a("). The most important thing to remember though is you need to instantiate the tokenizer with the same model name to ensure you\u2019re using the same tokenization rules a model was pretrained with."),mr=f(),je=n("p"),Pl=a("Load a tokenizer with "),Zt=n("a"),Sl=a("AutoTokenizer"),Ml=a(":"),cr=f(),d(ht.$$.fragment),dr=f(),Ee=n("p"),Cl=a("Next, the tokenizer converts the tokens into numbers in order to construct a tensor as input to the model. This is known as the model\u2019s "),oa=n("em"),Il=a("vocabulary"),Nl=a("."),_r=f(),Xt=n("p"),Ol=a("Pass your text to the tokenizer:"),gr=f(),d(mt.$$.fragment),vr=f(),es=n("p"),Ll=a("The tokenizer will return a dictionary containing:"),yr=f(),Ae=n("ul"),ts=n("li"),ss=n("a"),Dl=a("input_ids"),Rl=a(": numerical representions of your tokens."),Hl=f(),as=n("li"),rs=n("a"),Ul=a("atttention_mask"),Wl=a(": indicates which tokens should be attended to."),$r=f(),Te=n("p"),Bl=a("Just like the "),os=n("a"),Yl=a("pipeline()"),Gl=a(", the tokenizer will accept a list of inputs. In addition, the tokenizer can also pad and truncate the text to return a batch with uniform length:"),br=f(),d(ct.$$.fragment),wr=f(),xe=n("p"),Ql=a("Read the "),ns=n("a"),Jl=a("preprocessing"),Kl=a(" tutorial for more details about tokenization."),kr=f(),ne=n("h3"),qe=n("a"),na=n("span"),d(dt.$$.fragment),Vl=f(),la=n("span"),Zl=a("AutoModel"),jr=f(),F=n("p"),Xl=a("\u{1F917} Transformers provides a simple and unified way to load pretrained instances. This means you can load an "),ls=n("a"),ei=a("AutoModel"),ti=a(" like you would load an "),is=n("a"),si=a("AutoTokenizer"),ai=a(". The only difference is selecting the correct "),ps=n("a"),ri=a("AutoModel"),oi=a(" for the task. Since you are doing text - or sequence - classification, load "),fs=n("a"),ni=a("AutoModelForSequenceClassification"),li=a(". The TensorFlow equivalent is simply "),us=n("a"),ii=a("TFAutoModelForSequenceClassification"),pi=a(":"),Er=f(),d(_t.$$.fragment),Ar=f(),d(ze.$$.fragment),Tr=f(),Fe=n("p"),fi=a("Now you can pass your preprocessed batch of inputs directly to the model. If you are using a PyTorch model, unpack the dictionary by adding "),ia=n("code"),ui=a("**"),hi=a(". For TensorFlow models, pass the dictionary keys directly to the tensors:"),xr=f(),d(gt.$$.fragment),qr=f(),Z=n("p"),mi=a("The model outputs the final activations in the "),pa=n("code"),ci=a("logits"),di=a(" attribute. Apply the softmax function to the "),fa=n("code"),_i=a("logits"),gi=a(" to retrieve the probabilities:"),zr=f(),d(vt.$$.fragment),Fr=f(),d(Pe.$$.fragment),Pr=f(),z=n("p"),vi=a("Models are a standard "),yt=n("a"),ua=n("code"),yi=a("torch.nn.Module"),$i=a(" or a "),$t=n("a"),ha=n("code"),bi=a("tf.keras.Model"),wi=a(" so you can use them in your usual training loop. However, to make things easier, \u{1F917} Transformers provides a "),hs=n("a"),ki=a("Trainer"),ji=a(" class for PyTorch that adds functionality for distributed training, mixed precision, and more. For TensorFlow, you can use the "),ma=n("code"),Ei=a("fit"),Ai=a(" method from "),bt=n("a"),Ti=a("Keras"),xi=a(". Refer to the "),ms=n("a"),qi=a("training tutorial"),zi=a(" for more details."),Sr=f(),d(Se.$$.fragment),Mr=f(),le=n("h3"),Me=n("a"),ca=n("span"),d(wt.$$.fragment),Fi=f(),da=n("span"),Pi=a("Save a model"),Cr=f(),Ce=n("p"),Si=a("Once your model is fine-tuned, you can save it with its tokenizer using "),cs=n("a"),Mi=a("PreTrainedModel.save_pretrained()"),Ci=a(":"),Ir=f(),d(kt.$$.fragment),Nr=f(),Ie=n("p"),Ii=a("When you are ready to use the model again, reload it with "),ds=n("a"),Ni=a("PreTrainedModel.from_pretrained()"),Oi=a(":"),Or=f(),d(jt.$$.fragment),Lr=f(),X=n("p"),Li=a("One particularly cool \u{1F917} Transformers feature is the ability to save a model and reload it as either a PyTorch or TensorFlow model. The "),_a=n("code"),Di=a("from_pt"),Ri=a(" or "),ga=n("code"),Hi=a("from_tf"),Ui=a(" parameter can convert the model from one framework to the other:"),Dr=f(),d(Et.$$.fragment),this.h()},l(e){const o=Jf('[data-svelte="svelte-1phssyn"]',document.head);m=l(o,"META",{name:!0,content:!0}),o.forEach(s),k=u(e),c=l(e,"H1",{class:!0});var At=i(c);w=l(At,"A",{id:!0,class:!0,href:!0});var va=i(w);A=l(va,"SPAN",{});var ya=i(A);_(b.$$.fragment,ya),ya.forEach(s),va.forEach(s),j=u(At),x=l(At,"SPAN",{});var $a=i(x);S=r($a,"Quick tour"),$a.forEach(s),At.forEach(s),E=u(e),_(O.$$.fragment,e),R=u(e),H=l(e,"P",{});var ie=i(H);fo=r(ie,"Get up and running with \u{1F917} Transformers! Start using the "),Tt=l(ie,"A",{href:!0});var Ki=i(Tt);uo=r(Ki,"pipeline()"),Ki.forEach(s),ho=r(ie," for rapid inference, and quickly load a pretrained model and tokenizer with an "),xt=l(ie,"A",{href:!0});var Vi=i(xt);mo=r(Vi,"AutoClass"),Vi.forEach(s),co=r(ie," to solve your text, vision or audio task."),ie.forEach(s),wa=u(e),_(fe.$$.fragment,e),ka=u(e),te=l(e,"H2",{class:!0});var Hr=i(te);ue=l(Hr,"A",{id:!0,class:!0,href:!0});var Zi=i(ue);Ts=l(Zi,"SPAN",{});var Xi=i(Ts);_(Oe.$$.fragment,Xi),Xi.forEach(s),Zi.forEach(s),_o=u(Hr),xs=l(Hr,"SPAN",{});var ep=i(xs);go=r(ep,"Pipeline"),ep.forEach(s),Hr.forEach(s),ja=u(e),Le=l(e,"P",{});var Wi=i(Le);qt=l(Wi,"A",{href:!0});var tp=i(qt);vo=r(tp,"pipeline()"),tp.forEach(s),yo=r(Wi," is the easiest way to use a pretrained model for a given task."),Wi.forEach(s),Ea=u(e),_(De.$$.fragment,e),Aa=u(e),he=l(e,"P",{});var Ur=i(he);$o=r(Ur,"The "),zt=l(Ur,"A",{href:!0});var sp=i(zt);bo=r(sp,"pipeline()"),sp.forEach(s),wo=r(Ur," supports many common tasks out-of-the-box:"),Ur.forEach(s),Ta=u(e),Re=l(e,"P",{});var Bi=i(Re);qs=l(Bi,"STRONG",{});var ap=i(qs);ko=r(ap,"Text"),ap.forEach(s),jo=r(Bi,":"),Bi.forEach(s),xa=u(e),T=l(e,"UL",{});var P=i(T);zs=l(P,"LI",{});var rp=i(zs);Eo=r(rp,"Sentiment analysis: classify the polarity of a given text."),rp.forEach(s),Ao=u(P),Fs=l(P,"LI",{});var op=i(Fs);To=r(op,"Text generation (in English): generate text from a given input."),op.forEach(s),xo=u(P),Ps=l(P,"LI",{});var np=i(Ps);qo=r(np,"Name entity recognition (NER): label each word with the entity it represents (person, date, location, etc.)."),np.forEach(s),zo=u(P),Ss=l(P,"LI",{});var lp=i(Ss);Fo=r(lp,"Question answering: extract the answer from the context, given some context and a question."),lp.forEach(s),Po=u(P),Ms=l(P,"LI",{});var ip=i(Ms);So=r(ip,"Fill-mask: fill in the blank given a text with masked words."),ip.forEach(s),Mo=u(P),Cs=l(P,"LI",{});var pp=i(Cs);Co=r(pp,"Summarization: generate a summary of a long sequence of text or document."),pp.forEach(s),Io=u(P),Is=l(P,"LI",{});var fp=i(Is);No=r(fp,"Translation: translate text into another language."),fp.forEach(s),Oo=u(P),Ns=l(P,"LI",{});var up=i(Ns);Lo=r(up,"Feature extraction: create a tensor representation of the text."),up.forEach(s),P.forEach(s),qa=u(e),He=l(e,"P",{});var Yi=i(He);Os=l(Yi,"STRONG",{});var hp=i(Os);Do=r(hp,"Image"),hp.forEach(s),Ro=r(Yi,":"),Yi.forEach(s),za=u(e),U=l(e,"UL",{});var _s=i(U);Ls=l(_s,"LI",{});var mp=i(Ls);Ho=r(mp,"Image classification: classify an image."),mp.forEach(s),Uo=u(_s),Ds=l(_s,"LI",{});var cp=i(Ds);Wo=r(cp,"Image segmentation: classify every pixel in an image."),cp.forEach(s),Bo=u(_s),Rs=l(_s,"LI",{});var dp=i(Rs);Yo=r(dp,"Object detection: detect objects within an image."),dp.forEach(s),_s.forEach(s),Fa=u(e),Ue=l(e,"P",{});var Gi=i(Ue);Hs=l(Gi,"STRONG",{});var _p=i(Hs);Go=r(_p,"Audio"),_p.forEach(s),Qo=r(Gi,":"),Gi.forEach(s),Pa=u(e),me=l(e,"UL",{});var Wr=i(me);Us=l(Wr,"LI",{});var gp=i(Us);Jo=r(gp,"Audio classification: assign a label to a given segment of audio."),gp.forEach(s),Ko=u(Wr),Ws=l(Wr,"LI",{});var vp=i(Ws);Vo=r(vp,"Automatic speech recognition (ASR): transcribe audio data into text."),vp.forEach(s),Wr.forEach(s),Sa=u(e),_(ce.$$.fragment,e),Ma=u(e),se=l(e,"H3",{class:!0});var Br=i(se);de=l(Br,"A",{id:!0,class:!0,href:!0});var yp=i(de);Bs=l(yp,"SPAN",{});var $p=i(Bs);_(We.$$.fragment,$p),$p.forEach(s),yp.forEach(s),Zo=u(Br),Ys=l(Br,"SPAN",{});var bp=i(Ys);Xo=r(bp,"Pipeline usage"),bp.forEach(s),Br.forEach(s),Ca=u(e),_e=l(e,"P",{});var Yr=i(_e);en=r(Yr,"In the following example, you will use the "),Ft=l(Yr,"A",{href:!0});var wp=i(Ft);tn=r(wp,"pipeline()"),wp.forEach(s),sn=r(Yr," for sentiment analysis."),Yr.forEach(s),Ia=u(e),Pt=l(e,"P",{});var kp=i(Pt);an=r(kp,"Install the following dependencies if you haven\u2019t already:"),kp.forEach(s),Na=u(e),_(Be.$$.fragment,e),Oa=u(e),ge=l(e,"P",{});var Gr=i(ge);rn=r(Gr,"Import "),St=l(Gr,"A",{href:!0});var jp=i(St);on=r(jp,"pipeline()"),jp.forEach(s),nn=r(Gr," and specify the task you want to complete:"),Gr.forEach(s),La=u(e),_(Ye.$$.fragment,e),Da=u(e),W=l(e,"P",{});var gs=i(W);ln=r(gs,"The pipeline downloads and caches a default "),Ge=l(gs,"A",{href:!0,rel:!0});var Ep=i(Ge);pn=r(Ep,"pretrained model"),Ep.forEach(s),fn=r(gs," and tokenizer for sentiment analysis. Now you can use the "),Gs=l(gs,"CODE",{});var Ap=i(Gs);un=r(Ap,"classifier"),Ap.forEach(s),hn=r(gs," on your target text:"),gs.forEach(s),Ra=u(e),_(Qe.$$.fragment,e),Ha=u(e),ve=l(e,"P",{});var Qr=i(ve);mn=r(Qr,"For more than one sentence, pass a list of sentences to the "),Mt=l(Qr,"A",{href:!0});var Tp=i(Mt);cn=r(Tp,"pipeline()"),Tp.forEach(s),dn=r(Qr," which returns a list of dictionaries:"),Qr.forEach(s),Ua=u(e),_(Je.$$.fragment,e),Wa=u(e),B=l(e,"P",{});var vs=i(B);_n=r(vs,"The "),Ct=l(vs,"A",{href:!0});var xp=i(Ct);gn=r(xp,"pipeline()"),xp.forEach(s),vn=r(vs," can also iterate over an entire dataset. Start by installing the "),Ke=l(vs,"A",{href:!0,rel:!0});var qp=i(Ke);yn=r(qp,"\u{1F917} Datasets"),qp.forEach(s),$n=r(vs," library:"),vs.forEach(s),Ba=u(e),_(Ve.$$.fragment,e),Ya=u(e),ye=l(e,"P",{});var Jr=i(ye);bn=r(Jr,"Create a "),It=l(Jr,"A",{href:!0});var zp=i(It);wn=r(zp,"pipeline()"),zp.forEach(s),kn=r(Jr," with the task you want to solve for and the model you want to use."),Jr.forEach(s),Ga=u(e),_(Ze.$$.fragment,e),Qa=u(e),Y=l(e,"P",{});var ys=i(Y);jn=r(ys,"Next, load a dataset (see the \u{1F917} Datasets "),Xe=l(ys,"A",{href:!0,rel:!0});var Fp=i(Xe);En=r(Fp,"Quick Start"),Fp.forEach(s),An=r(ys," for more details) you\u2019d like to iterate over. For example, let\u2019s load the "),et=l(ys,"A",{href:!0,rel:!0});var Pp=i(et);Tn=r(Pp,"SUPERB"),Pp.forEach(s),xn=r(ys," dataset:"),ys.forEach(s),Ja=u(e),_(tt.$$.fragment,e),Ka=u(e),Nt=l(e,"P",{});var Sp=i(Nt);qn=r(Sp,"You can pass a whole dataset pipeline:"),Sp.forEach(s),Va=u(e),_(st.$$.fragment,e),Za=u(e),$e=l(e,"P",{});var Kr=i($e);zn=r(Kr,"For a larger dataset where the inputs are big (like in speech or vision), you will want to pass along a generator instead of a list that loads all the inputs in memory. See the "),Ot=l(Kr,"A",{href:!0});var Mp=i(Ot);Fn=r(Mp,"pipeline documentation"),Mp.forEach(s),Pn=r(Kr," for more information."),Kr.forEach(s),Xa=u(e),ae=l(e,"H3",{class:!0});var Vr=i(ae);be=l(Vr,"A",{id:!0,class:!0,href:!0});var Cp=i(be);Qs=l(Cp,"SPAN",{});var Ip=i(Qs);_(at.$$.fragment,Ip),Ip.forEach(s),Cp.forEach(s),Sn=u(Vr),Js=l(Vr,"SPAN",{});var Np=i(Js);Mn=r(Np,"Use another model and tokenizer in the pipeline"),Np.forEach(s),Vr.forEach(s),er=u(e),M=l(e,"P",{});var ee=i(M);Cn=r(ee,"The "),Lt=l(ee,"A",{href:!0});var Op=i(Lt);In=r(Op,"pipeline()"),Op.forEach(s),Nn=r(ee," can accommodate any model from the "),rt=l(ee,"A",{href:!0,rel:!0});var Lp=i(rt);On=r(Lp,"Model Hub"),Lp.forEach(s),Ln=r(ee,", making it easy to adapt the "),Dt=l(ee,"A",{href:!0});var Dp=i(Dt);Dn=r(Dp,"pipeline()"),Dp.forEach(s),Rn=r(ee," for other use-cases. For example, if you\u2019d like a model capable of handling French text, use the tags on the Model Hub to filter for an appropriate model. The top filtered result returns a multilingual "),ot=l(ee,"A",{href:!0,rel:!0});var Rp=i(ot);Hn=r(Rp,"BERT model"),Rp.forEach(s),Un=r(ee," fine-tuned for sentiment analysis. Great, let\u2019s use this model!"),ee.forEach(s),tr=u(e),_(nt.$$.fragment,e),sr=u(e),G=l(e,"P",{});var $s=i(G);Wn=r($s,"Use the "),Rt=l($s,"A",{href:!0});var Hp=i(Rt);Bn=r(Hp,"AutoModelForSequenceClassification"),Hp.forEach(s),Yn=r($s," and [\u2018AutoTokenizer\u2019] to load the pretrained model and it\u2019s associated tokenizer (more on an "),Ks=l($s,"CODE",{});var Up=i(Ks);Gn=r(Up,"AutoClass"),Up.forEach(s),Qn=r($s," below):"),$s.forEach(s),ar=u(e),_(lt.$$.fragment,e),rr=u(e),Q=l(e,"P",{});var bs=i(Q);Jn=r(bs,"Then you can specify the model and tokenizer in the "),Ht=l(bs,"A",{href:!0});var Wp=i(Ht);Kn=r(Wp,"pipeline()"),Wp.forEach(s),Vn=r(bs,", and apply the "),Vs=l(bs,"CODE",{});var Bp=i(Vs);Zn=r(Bp,"classifier"),Bp.forEach(s),Xn=r(bs," on your target text:"),bs.forEach(s),or=u(e),_(it.$$.fragment,e),nr=u(e),J=l(e,"P",{});var ws=i(J);el=r(ws,"If you can\u2019t find a model for your use-case, you will need to fine-tune a pretrained model on your data. Take a look at our "),Ut=l(ws,"A",{href:!0});var Yp=i(Ut);tl=r(Yp,"fine-tuning tutorial"),Yp.forEach(s),sl=r(ws," to learn how. Finally, after you\u2019ve fine-tuned your pretrained model, please consider sharing it (see tutorial "),Wt=l(ws,"A",{href:!0});var Gp=i(Wt);al=r(Gp,"here"),Gp.forEach(s),rl=r(ws,") with the community on the Model Hub to democratize NLP for everyone! \u{1F917}"),ws.forEach(s),lr=u(e),re=l(e,"H2",{class:!0});var Zr=i(re);we=l(Zr,"A",{id:!0,class:!0,href:!0});var Qp=i(we);Zs=l(Qp,"SPAN",{});var Jp=i(Zs);_(pt.$$.fragment,Jp),Jp.forEach(s),Qp.forEach(s),ol=u(Zr),Xs=l(Zr,"SPAN",{});var Kp=i(Xs);nl=r(Kp,"AutoClass"),Kp.forEach(s),Zr.forEach(s),ir=u(e),_(ft.$$.fragment,e),pr=u(e),q=l(e,"P",{});var C=i(q);ll=r(C,"Under the hood, the "),Bt=l(C,"A",{href:!0});var Vp=i(Bt);il=r(Vp,"AutoModelForSequenceClassification"),Vp.forEach(s),pl=r(C," and "),Yt=l(C,"A",{href:!0});var Zp=i(Yt);fl=r(Zp,"AutoTokenizer"),Zp.forEach(s),ul=r(C," classes work together to power the "),Gt=l(C,"A",{href:!0});var Xp=i(Gt);hl=r(Xp,"pipeline()"),Xp.forEach(s),ml=r(C,". An "),Qt=l(C,"A",{href:!0});var ef=i(Qt);cl=r(ef,"AutoClass"),ef.forEach(s),dl=r(C," is a shortcut that automatically retrieves the architecture of a pretrained model from it\u2019s name or path. You only need to select the appropriate "),ea=l(C,"CODE",{});var tf=i(ea);_l=r(tf,"AutoClass"),tf.forEach(s),gl=r(C," for your task and it\u2019s associated tokenizer with "),Jt=l(C,"A",{href:!0});var sf=i(Jt);vl=r(sf,"AutoTokenizer"),sf.forEach(s),yl=r(C,"."),C.forEach(s),fr=u(e),K=l(e,"P",{});var ks=i(K);$l=r(ks,"Let\u2019s return to our example and see how you can use the "),ta=l(ks,"CODE",{});var af=i(ta);bl=r(af,"AutoClass"),af.forEach(s),wl=r(ks," to replicate the results of the "),Kt=l(ks,"A",{href:!0});var rf=i(Kt);kl=r(rf,"pipeline()"),rf.forEach(s),jl=r(ks,"."),ks.forEach(s),ur=u(e),oe=l(e,"H3",{class:!0});var Xr=i(oe);ke=l(Xr,"A",{id:!0,class:!0,href:!0});var of=i(ke);sa=l(of,"SPAN",{});var nf=i(sa);_(ut.$$.fragment,nf),nf.forEach(s),of.forEach(s),El=u(Xr),aa=l(Xr,"SPAN",{});var lf=i(aa);Al=r(lf,"AutoTokenizer"),lf.forEach(s),Xr.forEach(s),hr=u(e),V=l(e,"P",{});var js=i(V);Tl=r(js,"A tokenizer is responsible for preprocessing text into a format that is understandable to the model. First, the tokenizer will split the text into words called "),ra=l(js,"EM",{});var pf=i(ra);xl=r(pf,"tokens"),pf.forEach(s),ql=r(js,". There are multiple rules that govern the tokenization process, including how to split a word and at what level (learn more about tokenization "),Vt=l(js,"A",{href:!0});var ff=i(Vt);zl=r(ff,"here"),ff.forEach(s),Fl=r(js,"). The most important thing to remember though is you need to instantiate the tokenizer with the same model name to ensure you\u2019re using the same tokenization rules a model was pretrained with."),js.forEach(s),mr=u(e),je=l(e,"P",{});var eo=i(je);Pl=r(eo,"Load a tokenizer with "),Zt=l(eo,"A",{href:!0});var uf=i(Zt);Sl=r(uf,"AutoTokenizer"),uf.forEach(s),Ml=r(eo,":"),eo.forEach(s),cr=u(e),_(ht.$$.fragment,e),dr=u(e),Ee=l(e,"P",{});var to=i(Ee);Cl=r(to,"Next, the tokenizer converts the tokens into numbers in order to construct a tensor as input to the model. This is known as the model\u2019s "),oa=l(to,"EM",{});var hf=i(oa);Il=r(hf,"vocabulary"),hf.forEach(s),Nl=r(to,"."),to.forEach(s),_r=u(e),Xt=l(e,"P",{});var mf=i(Xt);Ol=r(mf,"Pass your text to the tokenizer:"),mf.forEach(s),gr=u(e),_(mt.$$.fragment,e),vr=u(e),es=l(e,"P",{});var cf=i(es);Ll=r(cf,"The tokenizer will return a dictionary containing:"),cf.forEach(s),yr=u(e),Ae=l(e,"UL",{});var so=i(Ae);ts=l(so,"LI",{});var Qi=i(ts);ss=l(Qi,"A",{href:!0});var df=i(ss);Dl=r(df,"input_ids"),df.forEach(s),Rl=r(Qi,": numerical representions of your tokens."),Qi.forEach(s),Hl=u(so),as=l(so,"LI",{});var Ji=i(as);rs=l(Ji,"A",{href:!0});var _f=i(rs);Ul=r(_f,"atttention_mask"),_f.forEach(s),Wl=r(Ji,": indicates which tokens should be attended to."),Ji.forEach(s),so.forEach(s),$r=u(e),Te=l(e,"P",{});var ao=i(Te);Bl=r(ao,"Just like the "),os=l(ao,"A",{href:!0});var gf=i(os);Yl=r(gf,"pipeline()"),gf.forEach(s),Gl=r(ao,", the tokenizer will accept a list of inputs. In addition, the tokenizer can also pad and truncate the text to return a batch with uniform length:"),ao.forEach(s),br=u(e),_(ct.$$.fragment,e),wr=u(e),xe=l(e,"P",{});var ro=i(xe);Ql=r(ro,"Read the "),ns=l(ro,"A",{href:!0});var vf=i(ns);Jl=r(vf,"preprocessing"),vf.forEach(s),Kl=r(ro," tutorial for more details about tokenization."),ro.forEach(s),kr=u(e),ne=l(e,"H3",{class:!0});var oo=i(ne);qe=l(oo,"A",{id:!0,class:!0,href:!0});var yf=i(qe);na=l(yf,"SPAN",{});var $f=i(na);_(dt.$$.fragment,$f),$f.forEach(s),yf.forEach(s),Vl=u(oo),la=l(oo,"SPAN",{});var bf=i(la);Zl=r(bf,"AutoModel"),bf.forEach(s),oo.forEach(s),jr=u(e),F=l(e,"P",{});var L=i(F);Xl=r(L,"\u{1F917} Transformers provides a simple and unified way to load pretrained instances. This means you can load an "),ls=l(L,"A",{href:!0});var wf=i(ls);ei=r(wf,"AutoModel"),wf.forEach(s),ti=r(L," like you would load an "),is=l(L,"A",{href:!0});var kf=i(is);si=r(kf,"AutoTokenizer"),kf.forEach(s),ai=r(L,". The only difference is selecting the correct "),ps=l(L,"A",{href:!0});var jf=i(ps);ri=r(jf,"AutoModel"),jf.forEach(s),oi=r(L," for the task. Since you are doing text - or sequence - classification, load "),fs=l(L,"A",{href:!0});var Ef=i(fs);ni=r(Ef,"AutoModelForSequenceClassification"),Ef.forEach(s),li=r(L,". The TensorFlow equivalent is simply "),us=l(L,"A",{href:!0});var Af=i(us);ii=r(Af,"TFAutoModelForSequenceClassification"),Af.forEach(s),pi=r(L,":"),L.forEach(s),Er=u(e),_(_t.$$.fragment,e),Ar=u(e),_(ze.$$.fragment,e),Tr=u(e),Fe=l(e,"P",{});var no=i(Fe);fi=r(no,"Now you can pass your preprocessed batch of inputs directly to the model. If you are using a PyTorch model, unpack the dictionary by adding "),ia=l(no,"CODE",{});var Tf=i(ia);ui=r(Tf,"**"),Tf.forEach(s),hi=r(no,". For TensorFlow models, pass the dictionary keys directly to the tensors:"),no.forEach(s),xr=u(e),_(gt.$$.fragment,e),qr=u(e),Z=l(e,"P",{});var Es=i(Z);mi=r(Es,"The model outputs the final activations in the "),pa=l(Es,"CODE",{});var xf=i(pa);ci=r(xf,"logits"),xf.forEach(s),di=r(Es," attribute. Apply the softmax function to the "),fa=l(Es,"CODE",{});var qf=i(fa);_i=r(qf,"logits"),qf.forEach(s),gi=r(Es," to retrieve the probabilities:"),Es.forEach(s),zr=u(e),_(vt.$$.fragment,e),Fr=u(e),_(Pe.$$.fragment,e),Pr=u(e),z=l(e,"P",{});var I=i(z);vi=r(I,"Models are a standard "),yt=l(I,"A",{href:!0,rel:!0});var zf=i(yt);ua=l(zf,"CODE",{});var Ff=i(ua);yi=r(Ff,"torch.nn.Module"),Ff.forEach(s),zf.forEach(s),$i=r(I," or a "),$t=l(I,"A",{href:!0,rel:!0});var Pf=i($t);ha=l(Pf,"CODE",{});var Sf=i(ha);bi=r(Sf,"tf.keras.Model"),Sf.forEach(s),Pf.forEach(s),wi=r(I," so you can use them in your usual training loop. However, to make things easier, \u{1F917} Transformers provides a "),hs=l(I,"A",{href:!0});var Mf=i(hs);ki=r(Mf,"Trainer"),Mf.forEach(s),ji=r(I," class for PyTorch that adds functionality for distributed training, mixed precision, and more. For TensorFlow, you can use the "),ma=l(I,"CODE",{});var Cf=i(ma);Ei=r(Cf,"fit"),Cf.forEach(s),Ai=r(I," method from "),bt=l(I,"A",{href:!0,rel:!0});var If=i(bt);Ti=r(If,"Keras"),If.forEach(s),xi=r(I,". Refer to the "),ms=l(I,"A",{href:!0});var Nf=i(ms);qi=r(Nf,"training tutorial"),Nf.forEach(s),zi=r(I," for more details."),I.forEach(s),Sr=u(e),_(Se.$$.fragment,e),Mr=u(e),le=l(e,"H3",{class:!0});var lo=i(le);Me=l(lo,"A",{id:!0,class:!0,href:!0});var Of=i(Me);ca=l(Of,"SPAN",{});var Lf=i(ca);_(wt.$$.fragment,Lf),Lf.forEach(s),Of.forEach(s),Fi=u(lo),da=l(lo,"SPAN",{});var Df=i(da);Pi=r(Df,"Save a model"),Df.forEach(s),lo.forEach(s),Cr=u(e),Ce=l(e,"P",{});var io=i(Ce);Si=r(io,"Once your model is fine-tuned, you can save it with its tokenizer using "),cs=l(io,"A",{href:!0});var Rf=i(cs);Mi=r(Rf,"PreTrainedModel.save_pretrained()"),Rf.forEach(s),Ci=r(io,":"),io.forEach(s),Ir=u(e),_(kt.$$.fragment,e),Nr=u(e),Ie=l(e,"P",{});var po=i(Ie);Ii=r(po,"When you are ready to use the model again, reload it with "),ds=l(po,"A",{href:!0});var Hf=i(ds);Ni=r(Hf,"PreTrainedModel.from_pretrained()"),Hf.forEach(s),Oi=r(po,":"),po.forEach(s),Or=u(e),_(jt.$$.fragment,e),Lr=u(e),X=l(e,"P",{});var As=i(X);Li=r(As,"One particularly cool \u{1F917} Transformers feature is the ability to save a model and reload it as either a PyTorch or TensorFlow model. The "),_a=l(As,"CODE",{});var Uf=i(_a);Di=r(Uf,"from_pt"),Uf.forEach(s),Ri=r(As," or "),ga=l(As,"CODE",{});var Wf=i(ga);Hi=r(Wf,"from_tf"),Wf.forEach(s),Ui=r(As," parameter can convert the model from one framework to the other:"),As.forEach(s),Dr=u(e),_(Et.$$.fragment,e),this.h()},h(){h(m,"name","hf:doc:metadata"),h(m,"content",JSON.stringify(au)),h(w,"id","quick-tour"),h(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(w,"href","#quick-tour"),h(c,"class","relative group"),h(Tt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(xt,"href","./model_doc/auto"),h(ue,"id","pipeline"),h(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ue,"href","#pipeline"),h(te,"class","relative group"),h(qt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(zt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(de,"id","pipeline-usage"),h(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(de,"href","#pipeline-usage"),h(se,"class","relative group"),h(Ft,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(St,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(Ge,"href","https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english"),h(Ge,"rel","nofollow"),h(Mt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(Ct,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(Ke,"href","https://huggingface.co/docs/datasets/"),h(Ke,"rel","nofollow"),h(It,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(Xe,"href","https://huggingface.co/docs/datasets/quickstart.html"),h(Xe,"rel","nofollow"),h(et,"href","https://huggingface.co/datasets/superb"),h(et,"rel","nofollow"),h(Ot,"href","./main_classes/pipelines"),h(be,"id","use-another-model-and-tokenizer-in-the-pipeline"),h(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(be,"href","#use-another-model-and-tokenizer-in-the-pipeline"),h(ae,"class","relative group"),h(Lt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(rt,"href","https://huggingface.co/models"),h(rt,"rel","nofollow"),h(Dt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(ot,"href","https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment"),h(ot,"rel","nofollow"),h(Rt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification"),h(Ht,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(Ut,"href","./training"),h(Wt,"href","./model_sharing"),h(we,"id","autoclass"),h(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(we,"href","#autoclass"),h(re,"class","relative group"),h(Bt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification"),h(Yt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),h(Gt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(Qt,"href","./model_doc/auto"),h(Jt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),h(Kt,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(ke,"id","autotokenizer"),h(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ke,"href","#autotokenizer"),h(oe,"class","relative group"),h(Vt,"href","./tokenizer_summary"),h(Zt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),h(ss,"href","./glossary#input-ids"),h(rs,"href",".glossary#attention-mask"),h(os,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),h(ns,"href","./preprocessing"),h(qe,"id","automodel"),h(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(qe,"href","#automodel"),h(ne,"class","relative group"),h(ls,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModel"),h(is,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),h(ps,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModel"),h(fs,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification"),h(us,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSequenceClassification"),h(yt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(yt,"rel","nofollow"),h($t,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h($t,"rel","nofollow"),h(hs,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),h(bt,"href","https://keras.io/"),h(bt,"rel","nofollow"),h(ms,"href","./training"),h(Me,"id","save-a-model"),h(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Me,"href","#save-a-model"),h(le,"class","relative group"),h(cs,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained"),h(ds,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained")},m(e,o){t(document.head,m),p(e,k,o),p(e,c,o),t(c,w),t(w,A),g(b,A,null),t(c,j),t(c,x),t(x,S),p(e,E,o),g(O,e,o),p(e,R,o),p(e,H,o),t(H,fo),t(H,Tt),t(Tt,uo),t(H,ho),t(H,xt),t(xt,mo),t(H,co),p(e,wa,o),g(fe,e,o),p(e,ka,o),p(e,te,o),t(te,ue),t(ue,Ts),g(Oe,Ts,null),t(te,_o),t(te,xs),t(xs,go),p(e,ja,o),p(e,Le,o),t(Le,qt),t(qt,vo),t(Le,yo),p(e,Ea,o),g(De,e,o),p(e,Aa,o),p(e,he,o),t(he,$o),t(he,zt),t(zt,bo),t(he,wo),p(e,Ta,o),p(e,Re,o),t(Re,qs),t(qs,ko),t(Re,jo),p(e,xa,o),p(e,T,o),t(T,zs),t(zs,Eo),t(T,Ao),t(T,Fs),t(Fs,To),t(T,xo),t(T,Ps),t(Ps,qo),t(T,zo),t(T,Ss),t(Ss,Fo),t(T,Po),t(T,Ms),t(Ms,So),t(T,Mo),t(T,Cs),t(Cs,Co),t(T,Io),t(T,Is),t(Is,No),t(T,Oo),t(T,Ns),t(Ns,Lo),p(e,qa,o),p(e,He,o),t(He,Os),t(Os,Do),t(He,Ro),p(e,za,o),p(e,U,o),t(U,Ls),t(Ls,Ho),t(U,Uo),t(U,Ds),t(Ds,Wo),t(U,Bo),t(U,Rs),t(Rs,Yo),p(e,Fa,o),p(e,Ue,o),t(Ue,Hs),t(Hs,Go),t(Ue,Qo),p(e,Pa,o),p(e,me,o),t(me,Us),t(Us,Jo),t(me,Ko),t(me,Ws),t(Ws,Vo),p(e,Sa,o),g(ce,e,o),p(e,Ma,o),p(e,se,o),t(se,de),t(de,Bs),g(We,Bs,null),t(se,Zo),t(se,Ys),t(Ys,Xo),p(e,Ca,o),p(e,_e,o),t(_e,en),t(_e,Ft),t(Ft,tn),t(_e,sn),p(e,Ia,o),p(e,Pt,o),t(Pt,an),p(e,Na,o),g(Be,e,o),p(e,Oa,o),p(e,ge,o),t(ge,rn),t(ge,St),t(St,on),t(ge,nn),p(e,La,o),g(Ye,e,o),p(e,Da,o),p(e,W,o),t(W,ln),t(W,Ge),t(Ge,pn),t(W,fn),t(W,Gs),t(Gs,un),t(W,hn),p(e,Ra,o),g(Qe,e,o),p(e,Ha,o),p(e,ve,o),t(ve,mn),t(ve,Mt),t(Mt,cn),t(ve,dn),p(e,Ua,o),g(Je,e,o),p(e,Wa,o),p(e,B,o),t(B,_n),t(B,Ct),t(Ct,gn),t(B,vn),t(B,Ke),t(Ke,yn),t(B,$n),p(e,Ba,o),g(Ve,e,o),p(e,Ya,o),p(e,ye,o),t(ye,bn),t(ye,It),t(It,wn),t(ye,kn),p(e,Ga,o),g(Ze,e,o),p(e,Qa,o),p(e,Y,o),t(Y,jn),t(Y,Xe),t(Xe,En),t(Y,An),t(Y,et),t(et,Tn),t(Y,xn),p(e,Ja,o),g(tt,e,o),p(e,Ka,o),p(e,Nt,o),t(Nt,qn),p(e,Va,o),g(st,e,o),p(e,Za,o),p(e,$e,o),t($e,zn),t($e,Ot),t(Ot,Fn),t($e,Pn),p(e,Xa,o),p(e,ae,o),t(ae,be),t(be,Qs),g(at,Qs,null),t(ae,Sn),t(ae,Js),t(Js,Mn),p(e,er,o),p(e,M,o),t(M,Cn),t(M,Lt),t(Lt,In),t(M,Nn),t(M,rt),t(rt,On),t(M,Ln),t(M,Dt),t(Dt,Dn),t(M,Rn),t(M,ot),t(ot,Hn),t(M,Un),p(e,tr,o),g(nt,e,o),p(e,sr,o),p(e,G,o),t(G,Wn),t(G,Rt),t(Rt,Bn),t(G,Yn),t(G,Ks),t(Ks,Gn),t(G,Qn),p(e,ar,o),g(lt,e,o),p(e,rr,o),p(e,Q,o),t(Q,Jn),t(Q,Ht),t(Ht,Kn),t(Q,Vn),t(Q,Vs),t(Vs,Zn),t(Q,Xn),p(e,or,o),g(it,e,o),p(e,nr,o),p(e,J,o),t(J,el),t(J,Ut),t(Ut,tl),t(J,sl),t(J,Wt),t(Wt,al),t(J,rl),p(e,lr,o),p(e,re,o),t(re,we),t(we,Zs),g(pt,Zs,null),t(re,ol),t(re,Xs),t(Xs,nl),p(e,ir,o),g(ft,e,o),p(e,pr,o),p(e,q,o),t(q,ll),t(q,Bt),t(Bt,il),t(q,pl),t(q,Yt),t(Yt,fl),t(q,ul),t(q,Gt),t(Gt,hl),t(q,ml),t(q,Qt),t(Qt,cl),t(q,dl),t(q,ea),t(ea,_l),t(q,gl),t(q,Jt),t(Jt,vl),t(q,yl),p(e,fr,o),p(e,K,o),t(K,$l),t(K,ta),t(ta,bl),t(K,wl),t(K,Kt),t(Kt,kl),t(K,jl),p(e,ur,o),p(e,oe,o),t(oe,ke),t(ke,sa),g(ut,sa,null),t(oe,El),t(oe,aa),t(aa,Al),p(e,hr,o),p(e,V,o),t(V,Tl),t(V,ra),t(ra,xl),t(V,ql),t(V,Vt),t(Vt,zl),t(V,Fl),p(e,mr,o),p(e,je,o),t(je,Pl),t(je,Zt),t(Zt,Sl),t(je,Ml),p(e,cr,o),g(ht,e,o),p(e,dr,o),p(e,Ee,o),t(Ee,Cl),t(Ee,oa),t(oa,Il),t(Ee,Nl),p(e,_r,o),p(e,Xt,o),t(Xt,Ol),p(e,gr,o),g(mt,e,o),p(e,vr,o),p(e,es,o),t(es,Ll),p(e,yr,o),p(e,Ae,o),t(Ae,ts),t(ts,ss),t(ss,Dl),t(ts,Rl),t(Ae,Hl),t(Ae,as),t(as,rs),t(rs,Ul),t(as,Wl),p(e,$r,o),p(e,Te,o),t(Te,Bl),t(Te,os),t(os,Yl),t(Te,Gl),p(e,br,o),g(ct,e,o),p(e,wr,o),p(e,xe,o),t(xe,Ql),t(xe,ns),t(ns,Jl),t(xe,Kl),p(e,kr,o),p(e,ne,o),t(ne,qe),t(qe,na),g(dt,na,null),t(ne,Vl),t(ne,la),t(la,Zl),p(e,jr,o),p(e,F,o),t(F,Xl),t(F,ls),t(ls,ei),t(F,ti),t(F,is),t(is,si),t(F,ai),t(F,ps),t(ps,ri),t(F,oi),t(F,fs),t(fs,ni),t(F,li),t(F,us),t(us,ii),t(F,pi),p(e,Er,o),g(_t,e,o),p(e,Ar,o),g(ze,e,o),p(e,Tr,o),p(e,Fe,o),t(Fe,fi),t(Fe,ia),t(ia,ui),t(Fe,hi),p(e,xr,o),g(gt,e,o),p(e,qr,o),p(e,Z,o),t(Z,mi),t(Z,pa),t(pa,ci),t(Z,di),t(Z,fa),t(fa,_i),t(Z,gi),p(e,zr,o),g(vt,e,o),p(e,Fr,o),g(Pe,e,o),p(e,Pr,o),p(e,z,o),t(z,vi),t(z,yt),t(yt,ua),t(ua,yi),t(z,$i),t(z,$t),t($t,ha),t(ha,bi),t(z,wi),t(z,hs),t(hs,ki),t(z,ji),t(z,ma),t(ma,Ei),t(z,Ai),t(z,bt),t(bt,Ti),t(z,xi),t(z,ms),t(ms,qi),t(z,zi),p(e,Sr,o),g(Se,e,o),p(e,Mr,o),p(e,le,o),t(le,Me),t(Me,ca),g(wt,ca,null),t(le,Fi),t(le,da),t(da,Pi),p(e,Cr,o),p(e,Ce,o),t(Ce,Si),t(Ce,cs),t(cs,Mi),t(Ce,Ci),p(e,Ir,o),g(kt,e,o),p(e,Nr,o),p(e,Ie,o),t(Ie,Ii),t(Ie,ds),t(ds,Ni),t(Ie,Oi),p(e,Or,o),g(jt,e,o),p(e,Lr,o),p(e,X,o),t(X,Li),t(X,_a),t(_a,Di),t(X,Ri),t(X,ga),t(ga,Hi),t(X,Ui),p(e,Dr,o),g(Et,e,o),Rr=!0},p(e,[o]){const At={};o&2&&(At.$$scope={dirty:o,ctx:e}),fe.$set(At);const va={};o&2&&(va.$$scope={dirty:o,ctx:e}),ce.$set(va);const ya={};o&2&&(ya.$$scope={dirty:o,ctx:e}),ze.$set(ya);const $a={};o&2&&($a.$$scope={dirty:o,ctx:e}),Pe.$set($a);const ie={};o&2&&(ie.$$scope={dirty:o,ctx:e}),Se.$set(ie)},i(e){Rr||(v(b.$$.fragment,e),v(O.$$.fragment,e),v(fe.$$.fragment,e),v(Oe.$$.fragment,e),v(De.$$.fragment,e),v(ce.$$.fragment,e),v(We.$$.fragment,e),v(Be.$$.fragment,e),v(Ye.$$.fragment,e),v(Qe.$$.fragment,e),v(Je.$$.fragment,e),v(Ve.$$.fragment,e),v(Ze.$$.fragment,e),v(tt.$$.fragment,e),v(st.$$.fragment,e),v(at.$$.fragment,e),v(nt.$$.fragment,e),v(lt.$$.fragment,e),v(it.$$.fragment,e),v(pt.$$.fragment,e),v(ft.$$.fragment,e),v(ut.$$.fragment,e),v(ht.$$.fragment,e),v(mt.$$.fragment,e),v(ct.$$.fragment,e),v(dt.$$.fragment,e),v(_t.$$.fragment,e),v(ze.$$.fragment,e),v(gt.$$.fragment,e),v(vt.$$.fragment,e),v(Pe.$$.fragment,e),v(Se.$$.fragment,e),v(wt.$$.fragment,e),v(kt.$$.fragment,e),v(jt.$$.fragment,e),v(Et.$$.fragment,e),Rr=!0)},o(e){y(b.$$.fragment,e),y(O.$$.fragment,e),y(fe.$$.fragment,e),y(Oe.$$.fragment,e),y(De.$$.fragment,e),y(ce.$$.fragment,e),y(We.$$.fragment,e),y(Be.$$.fragment,e),y(Ye.$$.fragment,e),y(Qe.$$.fragment,e),y(Je.$$.fragment,e),y(Ve.$$.fragment,e),y(Ze.$$.fragment,e),y(tt.$$.fragment,e),y(st.$$.fragment,e),y(at.$$.fragment,e),y(nt.$$.fragment,e),y(lt.$$.fragment,e),y(it.$$.fragment,e),y(pt.$$.fragment,e),y(ft.$$.fragment,e),y(ut.$$.fragment,e),y(ht.$$.fragment,e),y(mt.$$.fragment,e),y(ct.$$.fragment,e),y(dt.$$.fragment,e),y(_t.$$.fragment,e),y(ze.$$.fragment,e),y(gt.$$.fragment,e),y(vt.$$.fragment,e),y(Pe.$$.fragment,e),y(Se.$$.fragment,e),y(wt.$$.fragment,e),y(kt.$$.fragment,e),y(jt.$$.fragment,e),y(Et.$$.fragment,e),Rr=!1},d(e){s(m),e&&s(k),e&&s(c),$(b),e&&s(E),$(O,e),e&&s(R),e&&s(H),e&&s(wa),$(fe,e),e&&s(ka),e&&s(te),$(Oe),e&&s(ja),e&&s(Le),e&&s(Ea),$(De,e),e&&s(Aa),e&&s(he),e&&s(Ta),e&&s(Re),e&&s(xa),e&&s(T),e&&s(qa),e&&s(He),e&&s(za),e&&s(U),e&&s(Fa),e&&s(Ue),e&&s(Pa),e&&s(me),e&&s(Sa),$(ce,e),e&&s(Ma),e&&s(se),$(We),e&&s(Ca),e&&s(_e),e&&s(Ia),e&&s(Pt),e&&s(Na),$(Be,e),e&&s(Oa),e&&s(ge),e&&s(La),$(Ye,e),e&&s(Da),e&&s(W),e&&s(Ra),$(Qe,e),e&&s(Ha),e&&s(ve),e&&s(Ua),$(Je,e),e&&s(Wa),e&&s(B),e&&s(Ba),$(Ve,e),e&&s(Ya),e&&s(ye),e&&s(Ga),$(Ze,e),e&&s(Qa),e&&s(Y),e&&s(Ja),$(tt,e),e&&s(Ka),e&&s(Nt),e&&s(Va),$(st,e),e&&s(Za),e&&s($e),e&&s(Xa),e&&s(ae),$(at),e&&s(er),e&&s(M),e&&s(tr),$(nt,e),e&&s(sr),e&&s(G),e&&s(ar),$(lt,e),e&&s(rr),e&&s(Q),e&&s(or),$(it,e),e&&s(nr),e&&s(J),e&&s(lr),e&&s(re),$(pt),e&&s(ir),$(ft,e),e&&s(pr),e&&s(q),e&&s(fr),e&&s(K),e&&s(ur),e&&s(oe),$(ut),e&&s(hr),e&&s(V),e&&s(mr),e&&s(je),e&&s(cr),$(ht,e),e&&s(dr),e&&s(Ee),e&&s(_r),e&&s(Xt),e&&s(gr),$(mt,e),e&&s(vr),e&&s(es),e&&s(yr),e&&s(Ae),e&&s($r),e&&s(Te),e&&s(br),$(ct,e),e&&s(wr),e&&s(xe),e&&s(kr),e&&s(ne),$(dt),e&&s(jr),e&&s(F),e&&s(Er),$(_t,e),e&&s(Ar),$(ze,e),e&&s(Tr),e&&s(Fe),e&&s(xr),$(gt,e),e&&s(qr),e&&s(Z),e&&s(zr),$(vt,e),e&&s(Fr),$(Pe,e),e&&s(Pr),e&&s(z),e&&s(Sr),$(Se,e),e&&s(Mr),e&&s(le),$(wt),e&&s(Cr),e&&s(Ce),e&&s(Ir),$(kt,e),e&&s(Nr),e&&s(Ie),e&&s(Or),$(jt,e),e&&s(Lr),e&&s(X),e&&s(Dr),$(Et,e)}}}const au={local:"quick-tour",sections:[{local:"pipeline",sections:[{local:"pipeline-usage",title:"Pipeline usage"},{local:"use-another-model-and-tokenizer-in-the-pipeline",title:"Use another model and tokenizer in the pipeline"}],title:"Pipeline"},{local:"autoclass",sections:[{local:"autotokenizer",title:"AutoTokenizer"},{local:"automodel",title:"AutoModel"},{local:"save-a-model",title:"Save a model"}],title:"AutoClass"}],title:"Quick tour"};function ru(N,m,k){let{fw:c}=m;return N.$$set=w=>{"fw"in w&&k(0,c=w.fw)},[c]}class mu extends Yf{constructor(m){super();Gf(this,m,ru,su,Qf,{fw:0})}}export{mu as default,au as metadata};
275
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_summary.mdx-4a71079d.js
import{S as I9,i as q9,s as G9,e as s,k as h,w as u,t as p,W as le,M as N9,c as i,d as t,m as f,a as l,x as d,h as m,X as ne,b as r,N as c,F as a,g as o,y as v,q as g,o as b,B as _}from"../chunks/vendor-4833417e.js";import{T as E9}from"../chunks/Tip-fffd6df1.js";import{Y as hE}from"../chunks/Youtube-27813aed.js";import{I as w}from"../chunks/IconCopyLink-4b81c553.js";function B9(Yt){let k,T;return{c(){k=s("p"),T=p(`This model could be very well be used in an autoencoding setting, there is no checkpoint for such a pretraining yet, though.`)},l(x){k=i(x,"P",{});var A=l(k);T=m(A,`This model could be very well be used in an autoencoding setting, there is no checkpoint for such a pretraining yet, though.`),A.forEach(t)},m(x,A){o(x,k,A),a(k,T)},d(x){x&&t(k)}}}function F9(Yt){let k,T;return{c(){k=s("p"),T=p(`This model could be very well be used in an autoregressive setting, there is no checkpoint for such a pretraining yet, though.`)},l(x){k=i(x,"P",{});var A=l(k);T=m(A,`This model could be very well be used in an autoregressive setting, there is no checkpoint for such a pretraining yet, though.`),A.forEach(t)},m(x,A){o(x,k,A),a(k,T)},d(x){x&&t(k)}}}function D9(Yt){let k,T,x,A,kh,Zt,Cv,xh,zv,Ip,L,jv,Kt,Uv,Xv,Ot,Vv,Yv,Wt,Zv,Kv,qp,Ys,Ov,Gp,$,Ah,Zs,Wv,Jv,Th,Ks,Qv,eg,$h,Os,tg,ag,Mh,Ws,sg,ig,Ph,Js,rg,Np,P,fE,Bp,Qs,lg,Fp,ei,ng,Dp,ti,og,Hp,ai,hg,Cp,si,fg,zp,ii,jp,ge,et,Lh,Jt,pg,Sh,mg,Up,ri,cg,Xp,Qt,Vp,be,tt,Rh,ea,ug,Ih,dg,Yp,q,li,ni,pE,vg,oi,hi,mE,gg,fi,pi,cE,Zp,ta,aa,bg,_g,Kp,mi,wg,Op,ci,Eg,Wp,_e,at,qh,sa,yg,Gh,kg,Jp,G,ui,di,uE,xg,vi,gi,dE,Ag,bi,_i,vE,Qp,ia,ra,Tg,$g,em,wi,Mg,tm,Ei,Pg,am,we,st,Nh,la,Lg,Bh,Sg,sm,N,yi,ki,gE,Rg,xi,Ai,bE,Ig,Ti,$i,_E,im,na,oa,qg,Gg,rm,Mi,Ng,lm,Pi,Bg,nm,Ee,it,Fh,ha,Fg,Dh,Dg,om,B,Li,Si,wE,Hg,Ri,Ii,EE,Cg,qi,Gi,yE,hm,fa,pa,zg,jg,fm,Ni,Ug,pm,Bi,Xg,mm,Fi,Vg,cm,Di,Yg,um,Hi,dm,ye,rt,Hh,ma,Zg,Ch,Kg,vm,F,Ci,zi,kE,Og,ji,Ui,xE,Wg,Xi,Vi,AE,gm,ca,ua,Jg,Qg,bm,Yi,e1,_m,S,da,t1,Zi,a1,s1,i1,va,r1,Ki,l1,n1,o1,zh,h1,f1,jh,p1,wm,Oi,m1,Em,lt,ym,Wi,c1,km,ke,nt,Uh,ga,u1,Xh,d1,xm,D,Ji,Qi,TE,v1,er,tr,$E,g1,ar,sr,ME,Am,ba,_a,b1,_1,Tm,ir,w1,$m,rr,E1,Mm,lr,y1,Pm,nr,Lm,xe,ot,Vh,wa,k1,Yh,x1,Sm,or,A1,Rm,Ea,Im,Ae,ht,Zh,ya,T1,Kh,$1,qm,H,hr,fr,PE,M1,pr,mr,LE,P1,cr,ur,SE,Gm,ka,xa,L1,S1,Nm,dr,R1,Bm,oe,Oh,I1,q1,Wh,G1,N1,Jh,B1,Fm,vr,F1,Dm,gr,D1,Hm,Te,ft,Qh,Aa,H1,ef,C1,Cm,C,br,_r,RE,z1,wr,Er,IE,j1,yr,kr,qE,zm,Ta,$a,U1,X1,jm,xr,V1,Um,he,tf,Y1,Z1,af,K1,O1,sf,W1,Xm,Ar,J1,Vm,$e,pt,rf,Ma,Q1,lf,eb,Ym,z,Tr,$r,GE,tb,Mr,Pr,NE,ab,Lr,Sr,BE,Zm,Pa,La,sb,ib,Km,Rr,rb,Om,R,nf,lb,nb,of,ob,hb,hf,fb,pb,ff,mb,Wm,Ir,cb,Jm,Me,mt,pf,Sa,ub,mf,db,Qm,j,qr,Gr,FE,vb,Nr,Br,DE,gb,Fr,Dr,HE,ec,Ra,Ia,bb,_b,tc,Hr,wb,ac,fe,cf,Eb,yb,uf,kb,xb,df,Ab,sc,Cr,Tb,ic,Pe,ct,vf,qa,$b,gf,Mb,rc,U,zr,jr,CE,Pb,Ur,Xr,zE,Lb,Vr,Yr,jE,lc,Ga,Na,Sb,Rb,nc,Zr,Ib,oc,Kr,qb,hc,Le,ut,bf,Ba,Gb,_f,Nb,fc,X,Or,Wr,UE,Bb,Jr,Qr,XE,Fb,el,tl,VE,pc,Fa,Da,Db,Hb,mc,al,Cb,cc,pe,wf,zb,jb,Ef,Ub,Xb,yf,Vb,uc,I,Yb,kf,Zb,Kb,xf,Ob,Wb,Af,Jb,Qb,dc,sl,e_,vc,Se,dt,Tf,Ha,t_,$f,a_,gc,V,il,rl,YE,s_,ll,nl,ZE,i_,ol,hl,KE,bc,Ca,za,r_,l_,_c,fl,n_,wc,pl,o_,Ec,Re,vt,Mf,ja,h_,Pf,f_,yc,Y,ml,cl,OE,p_,ul,dl,WE,m_,vl,gl,JE,kc,Ua,Xa,c_,u_,xc,bl,d_,Ac,_l,v_,Tc,Ie,gt,Lf,Va,g_,Sf,b_,$c,Z,wl,El,QE,__,yl,kl,ey,w_,xl,Al,ty,Mc,Ya,Za,E_,y_,Pc,Tl,k_,Lc,$l,x_,Sc,qe,bt,Rf,Ka,A_,If,T_,Rc,K,Ml,Pl,ay,$_,Ll,Sl,sy,M_,Rl,Il,iy,Ic,Oa,Wa,P_,L_,qc,ql,S_,Gc,Gl,R_,Nc,Nl,I_,Bc,Bl,q_,Fc,Fl,Dc,Ge,_t,qf,Ja,G_,Gf,N_,Hc,O,Dl,Hl,ry,B_,Cl,zl,ly,F_,jl,Ul,ny,Cc,Qa,es,D_,H_,zc,wt,C_,Xl,z_,j_,jc,Vl,U_,Uc,Et,Xc,Yl,X_,Vc,Zl,Yc,Ne,yt,Nf,ts,V_,Bf,Y_,Zc,Kl,Z_,Kc,as,Oc,Be,kt,Ff,ss,K_,Df,O_,Wc,W,Ol,Wl,oy,W_,Jl,Ql,hy,J_,en,tn,fy,Jc,is,rs,Q_,ew,Qc,an,tw,eu,M,Hf,aw,sw,Cf,iw,rw,zf,lw,nw,jf,ow,hw,Uf,fw,tu,sn,pw,au,Fe,xt,Xf,ls,mw,Vf,cw,su,J,rn,ln,py,uw,nn,on,my,dw,hn,fn,cy,iu,ns,os,vw,gw,ru,pn,bw,lu,At,Yf,_w,ww,Zf,Ew,nu,mn,yw,ou,cn,kw,hu,De,Tt,Kf,hs,xw,Of,Aw,fu,Q,un,dn,uy,Tw,vn,gn,dy,$w,bn,_n,vy,pu,fs,ps,Mw,Pw,mu,wn,Lw,cu,En,Sw,uu,He,$t,Wf,ms,Rw,Jf,Iw,du,ee,yn,kn,gy,qw,xn,An,by,Gw,Tn,$n,_y,vu,cs,us,Nw,Bw,gu,Mn,Fw,bu,Pn,Dw,_u,Ln,Hw,wu,Sn,Cw,Eu,Rn,zw,yu,Ce,Mt,Qf,ds,jw,ep,Uw,ku,te,In,qn,wy,Xw,Gn,Nn,Ey,Vw,Bn,Fn,yy,xu,vs,gs,Yw,Zw,Au,Dn,Kw,Tu,Hn,Ow,$u,ze,Pt,tp,bs,Ww,ap,Jw,Mu,ae,Cn,zn,ky,Qw,jn,Un,xy,e2,Xn,Vn,Ay,Pu,_s,ws,t2,a2,Lu,Yn,s2,Su,Zn,i2,Ru,Lt,r2,Es,l2,n2,Iu,me,o2,ys,h2,f2,sp,p2,m2,qu,je,St,ip,ks,c2,rp,u2,Gu,se,Kn,On,Ty,d2,Wn,Jn,$y,v2,Qn,eo,My,Nu,xs,As,g2,b2,Bu,ce,_2,lp,w2,E2,np,y2,k2,Fu,to,x2,Du,Ue,Rt,op,Ts,A2,hp,T2,Hu,ie,ao,so,Py,$2,io,ro,Ly,M2,lo,no,Sy,Cu,$s,Ms,P2,L2,zu,It,S2,Ps,R2,I2,ju,oo,q2,Uu,ho,Xu,Xe,qt,fp,Ls,G2,pp,N2,Vu,fo,B2,Yu,Ve,Gt,mp,Ss,F2,cp,D2,Zu,Rs,Is,H2,C2,Ku,po,z2,Ou,mo,j2,Wu,co,U2,Ju,uo,Qu,Ye,Nt,up,qs,X2,dp,V2,ed,vo,Y2,td,Ze,Bt,vp,Gs,Z2,gp,K2,ad,re,go,bo,Ry,O2,_o,wo,Iy,W2,Eo,yo,qy,sd,Ns,Bs,J2,Q2,id,ko,e0,rd,xo,t0,ld,ue,bp,a0,s0,_p,i0,r0,wp,l0,nd,Ao,n0,od,Ke,Ft,Ep,Fs,o0,yp,h0,hd,Oe,To,$o,Gy,f0,Mo,Po,Ny,fd,Ds,Hs,p0,m0,pd,Lo,c0,md,So,u0,cd,We,Dt,kp,Cs,d0,xp,v0,ud,Je,Ht,Ap,zs,g0,Tp,b0,dd,Ro,_0,vd,Io,gd,qo,$p,w0,bd,js,Go,E0,y0,_d,No,wd,Bo,Mp,k0,Ed,Us,Fo,x0,A0,yd,Do,T0,kd,Ct,By,xd,Ho,$0,Ad,Qe,zt,Pp,Xs,M0,Lp,P0,Td,Co,$d,zo,Sp,L0,Md,E,jo,S0,R0,Pd,y9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>l</mi></mrow><annotation encoding="application/x-tex">l</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span></span></span></span>',Ld,Sd,k9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>d</mi></mrow><annotation encoding="application/x-tex">d</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span></span></span></span>',Rd,Id,x9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>l</mi></mrow><annotation encoding="application/x-tex">l</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span></span></span></span>',qd,Gd,A9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>d</mi></mrow><annotation encoding="application/x-tex">d</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span></span></span></span>',Nd,Bd,T9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>l</mi><mn>1</mn></msub><mo>\xD7</mo><msub><mi>d</mi><mn>1</mn></msub></mrow><annotation encoding="application/x-tex">l_{1} \\times d_{1}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',Fd,Dd,$9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>l</mi><mn>2</mn></msub><mo>\xD7</mo><msub><mi>d</mi><mn>2</mn></msub></mrow><annotation encoding="application/x-tex">l_{2} \\times d_{2}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>',Hd,Cd,M9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>l</mi><mn>1</mn></msub><mo>\xD7</mo><msub><mi>l</mi><mn>2</mn></msub><mo>=</mo><mi>l</mi></mrow><annotation encoding="application/x-tex">l_{1} \\times l_{2} = l</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">\xD7</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span></span></span></span>',zd,jd,P9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>d</mi><mn>1</mn></msub><mo>+</mo><msub><mi>d</mi><mn>2</mn></msub><mo>=</mo><mi>d</mi></mrow><annotation encoding="application/x-tex">d_{1} + d_{2} = d</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">\u200B</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span></span></span></span>',Ud,Xd,L9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>j</mi></mrow><annotation encoding="application/x-tex">j</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.854em;vertical-align:-0.1944em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span></span></span></span>',Vd,Yd,S9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>j</mi><mi mathvariant="normal">%</mi><mi>l</mi><mn>1</mn></mrow><annotation encoding="application/x-tex">j \\% l1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.9444em;vertical-align:-0.1944em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mord">%</span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="mord">1</span></span></span></span>',Zd,Kd,R9='<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>j</mi><mi mathvariant="normal">/</mi><mi mathvariant="normal">/</mi><mi>l</mi><mn>1</mn></mrow><annotation encoding="application/x-tex">j // l1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mord">//</span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="mord">1</span></span></span></span>',Od,Wd;return Zt=new w({}),Jt=new w({}),Qt=new hE({props:{id:"d_ixlCubqQw"}}),ea=new w({}),sa=new w({}),la=new w({}),ha=new w({}),ma=new w({}),lt=new E9({props:{$$slots:{default:[B9]},$$scope:{ctx:Yt}}}),ga=new w({}),wa=new w({}),Ea=new hE({props:{id:"MUqNwgPjJvQ"}}),ya=new w({}),Aa=new w({}),Ma=new w({}),Sa=new w({}),qa=new w({}),Ba=new w({}),Ha=new w({}),ja=new w({}),Va=new w({}),Ka=new w({}),Ja=new w({}),Et=new E9({props:{$$slots:{default:[F9]},$$scope:{ctx:Yt}}}),ts=new w({}),as=new hE({props:{id:"0_4KEb08xrE"}}),ss=new w({}),ls=new w({}),hs=new w({}),ms=new w({}),ds=new w({}),bs=new w({}),ks=new w({}),Ts=new w({}),Ls=new w({}),Ss=new w({}),qs=new w({}),Gs=new w({}),Fs=new w({}),Cs=new w({}),zs=new w({}),Xs=new w({}),{c(){k=s("meta"),T=h(),x=s("h1"),A=s("a"),kh=s("span"),u(Zt.$$.fragment),Cv=h(),xh=s("span"),zv=p("Summary of the models"),Ip=h(),L=s("p"),jv=p("This is a summary of the models available in \u{1F917} Transformers. It assumes you\u2019re familiar with the original "),Kt=s("a"),Uv=p(`transformer model`),Xv=p(". For a gentle introduction check the "),Ot=s("a"),Vv=p("annotated transformer"),Yv=p(`. Here we focus on the high-level differences between the models. You can check them more in detail in their respective documentation. Also check out `),Wt=s("a"),Zv=p("the Model Hub"),Kv=p(" where you can filter the checkpoints by model architecture."),qp=h(),Ys=s("p"),Ov=p("Each one of the models in the library falls into one of the following categories:"),Gp=h(),$=s("ul"),Ah=s("li"),Zs=s("a"),Wv=p("autoregressive-models"),Jv=h(),Th=s("li"),Ks=s("a"),Qv=p("autoencoding-models"),eg=h(),$h=s("li"),Os=s("a"),tg=p("seq-to-seq-models"),ag=h(),Mh=s("li"),Ws=s("a"),sg=p("multimodal-models"),ig=h(),Ph=s("li"),Js=s("a"),rg=p("retrieval-based-models"),Np=h(),P=s("iframe"),Bp=h(),Qs=s("p"),lg=p(`Autoregressive models are pretrained on the classic language modeling task: guess the next token having read all the previous ones. They correspond to the decoder of the original transformer model, and a mask is used on top of the full sentence so that the attention heads can only see what was before in the text, and not what\u2019s after. Although those models can be fine-tuned and achieve great results on many tasks, the most natural application is text generation. A typical example of such models is GPT.`),Fp=h(),ei=s("p"),ng=p(`Autoencoding models are pretrained by corrupting the input tokens in some way and trying to reconstruct the original sentence. They correspond to the encoder of the original transformer model in the sense that they get access to the full inputs without any mask. Those models usually build a bidirectional representation of the whole sentence. They can be fine-tuned and achieve great results on many tasks such as text generation, but their most natural application is sentence classification or token classification. A typical example of such models is BERT.`),Dp=h(),ti=s("p"),og=p(`Note that the only difference between autoregressive models and autoencoding models is in the way the model is pretrained. Therefore, the same architecture can be used for both autoregressive and autoencoding models. When a given model has been used for both types of pretraining, we have put it in the category corresponding to the article where it was first introduced.`),Hp=h(),ai=s("p"),hg=p(`Sequence-to-sequence models use both the encoder and the decoder of the original transformer, either for translation tasks or by transforming other tasks to sequence-to-sequence problems. They can be fine-tuned to many tasks but their most natural applications are translation, summarization and question answering. The original transformer model is an example of such a model (only for translation), T5 is an example that can be fine-tuned on other tasks.`),Cp=h(),si=s("p"),fg=p("Multimodal models mix text inputs with other kinds (e.g. images) and are more specific to a given task."),zp=h(),ii=s("a"),jp=h(),ge=s("h2"),et=s("a"),Lh=s("span"),u(Jt.$$.fragment),pg=h(),Sh=s("span"),mg=p("Decoders or autoregressive models"),Up=h(),ri=s("p"),cg=p(`As mentioned before, these models rely on the decoder part of the original transformer and use an attention mask so that at each position, the model can only look at the tokens before the attention heads.`),Xp=h(),u(Qt.$$.fragment),Vp=h(),be=s("h3"),tt=s("a"),Rh=s("span"),u(ea.$$.fragment),ug=h(),Ih=s("span"),dg=p("Original GPT"),Yp=h(),q=s("div"),li=s("a"),ni=s("img"),vg=h(),oi=s("a"),hi=s("img"),gg=h(),fi=s("a"),pi=s("img"),Zp=h(),ta=s("p"),aa=s("a"),bg=p("Improving Language Understanding by Generative Pre-Training"),_g=p(", Alec Radford et al."),Kp=h(),mi=s("p"),wg=p("The first autoregressive model based on the transformer architecture, pretrained on the Book Corpus dataset."),Op=h(),ci=s("p"),Eg=p(`The library provides versions of the model for language modeling and multitask language modeling/multiple choice classification.`),Wp=h(),_e=s("h3"),at=s("a"),qh=s("span"),u(sa.$$.fragment),yg=h(),Gh=s("span"),kg=p("GPT-2"),Jp=h(),G=s("div"),ui=s("a"),di=s("img"),xg=h(),vi=s("a"),gi=s("img"),Ag=h(),bi=s("a"),_i=s("img"),Qp=h(),ia=s("p"),ra=s("a"),Tg=p("Language Models are Unsupervised Multitask Learners"),$g=p(`, Alec Radford et al.`),em=h(),wi=s("p"),Mg=p(`A bigger and better version of GPT, pretrained on WebText (web pages from outgoing links in Reddit with 3 karmas or more).`),tm=h(),Ei=s("p"),Pg=p(`The library provides versions of the model for language modeling and multitask language modeling/multiple choice classification.`),am=h(),we=s("h3"),st=s("a"),Nh=s("span"),u(la.$$.fragment),Lg=h(),Bh=s("span"),Sg=p("CTRL"),sm=h(),N=s("div"),yi=s("a"),ki=s("img"),Rg=h(),xi=s("a"),Ai=s("img"),Ig=h(),Ti=s("a"),$i=s("img"),im=h(),na=s("p"),oa=s("a"),qg=p("CTRL: A Conditional Transformer Language Model for Controllable Generation"),Gg=p(`, Nitish Shirish Keskar et al.`),rm=h(),Mi=s("p"),Ng=p(`Same as the GPT model but adds the idea of control codes. Text is generated from a prompt (can be empty) and one (or several) of those control codes which are then used to influence the text generation: generate with the style of wikipedia article, a book or a movie review.`),lm=h(),Pi=s("p"),Bg=p("The library provides a version of the model for language modeling only."),nm=h(),Ee=s("h3"),it=s("a"),Fh=s("span"),u(ha.$$.fragment),Fg=h(),Dh=s("span"),Dg=p("Transformer-XL"),om=h(),B=s("div"),Li=s("a"),Si=s("img"),Hg=h(),Ri=s("a"),Ii=s("img"),Cg=h(),qi=s("a"),Gi=s("img"),hm=h(),fa=s("p"),pa=s("a"),zg=p("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),jg=p(`, Zihang Dai et al.`),fm=h(),Ni=s("p"),Ug=p(`Same as a regular GPT model, but introduces a recurrence mechanism for two consecutive segments (similar to a regular RNNs with two consecutive inputs). In this context, a segment is a number of consecutive tokens (for instance 512) that may span across multiple documents, and segments are fed in order to the model.`),pm=h(),Bi=s("p"),Xg=p(`Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention scores. This allows the model to pay attention to information that was in the previous segment as well as the current one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments.`),mm=h(),Fi=s("p"),Vg=p(`This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would give the same results in the current input and the current hidden state at a given position) and needs to make some adjustments in the way attention scores are computed.`),cm=h(),Di=s("p"),Yg=p("The library provides a version of the model for language modeling only."),um=h(),Hi=s("a"),dm=h(),ye=s("h3"),rt=s("a"),Hh=s("span"),u(ma.$$.fragment),Zg=h(),Ch=s("span"),Kg=p("Reformer"),vm=h(),F=s("div"),Ci=s("a"),zi=s("img"),Og=h(),ji=s("a"),Ui=s("img"),Wg=h(),Xi=s("a"),Vi=s("img"),gm=h(),ca=s("p"),ua=s("a"),Jg=p("Reformer: The Efficient Transformer"),Qg=p(", Nikita Kitaev et al ."),bm=h(),Yi=s("p"),e1=p(`An autoregressive transformer model with lots of tricks to reduce memory footprint and compute time. Those tricks include:`),_m=h(),S=s("ul"),da=s("li"),t1=p("Use "),Zi=s("a"),a1=p("Axial position encoding"),s1=p(` (see below for more details). It\u2019s a mechanism to avoid having a huge positional encoding matrix (when the sequence length is very big) by factorizing it into smaller matrices.`),i1=h(),va=s("li"),r1=p("Replace traditional attention by "),Ki=s("a"),l1=p("LSH (local-sensitive hashing) attention"),n1=p(` (see below for more details). It\u2019s a technique to avoid computing the full product query-key in the attention layers.`),o1=h(),zh=s("li"),h1=p(`Avoid storing the intermediate results of each layer by using reversible transformer layers to obtain them during the backward pass (subtracting the residuals from the input of the next layer gives them back) or recomputing them for results inside a given layer (less efficient than storing them but saves memory).`),f1=h(),jh=s("li"),p1=p("Compute the feedforward operations by chunks and not on the whole batch."),wm=h(),Oi=s("p"),m1=p("With those tricks, the model can be fed much larger sentences than traditional transformer autoregressive models."),Em=h(),u(lt.$$.fragment),ym=h(),Wi=s("p"),c1=p("The library provides a version of the model for language modeling only."),km=h(),ke=s("h3"),nt=s("a"),Uh=s("span"),u(ga.$$.fragment),u1=h(),Xh=s("span"),d1=p("XLNet"),xm=h(),D=s("div"),Ji=s("a"),Qi=s("img"),v1=h(),er=s("a"),tr=s("img"),g1=h(),ar=s("a"),sr=s("img"),Am=h(),ba=s("p"),_a=s("a"),b1=p("XLNet: Generalized Autoregressive Pretraining for Language Understanding"),_1=p(`, Zhilin Yang et al.`),Tm=h(),ir=s("p"),w1=p(`XLNet is not a traditional autoregressive model but uses a training strategy that builds on that. It permutes the tokens in the sentence, then allows the model to use the last n tokens to predict the token n+1. Since this is all done with a mask, the sentence is actually fed in the model in the right order, but instead of masking the first n tokens for n+1, XLNet uses a mask that hides the previous tokens in some given permutation of 1,\u2026,sequence length.`),$m=h(),rr=s("p"),E1=p("XLNet also uses the same recurrence mechanism as Transformer-XL to build long-term dependencies."),Mm=h(),lr=s("p"),y1=p(`The library provides a version of the model for language modeling, token classification, sentence classification, multiple choice classification and question answering.`),Pm=h(),nr=s("a"),Lm=h(),xe=s("h2"),ot=s("a"),Vh=s("span"),u(wa.$$.fragment),k1=h(),Yh=s("span"),x1=p("Encoders or autoencoding models"),Sm=h(),or=s("p"),A1=p(`As mentioned before, these models rely on the encoder part of the original transformer and use no mask so the model can look at all the tokens in the attention heads. For pretraining, targets are the original sentences and inputs are their corrupted versions.`),Rm=h(),u(Ea.$$.fragment),Im=h(),Ae=s("h3"),ht=s("a"),Zh=s("span"),u(ya.$$.fragment),T1=h(),Kh=s("span"),$1=p("BERT"),qm=h(),H=s("div"),hr=s("a"),fr=s("img"),M1=h(),pr=s("a"),mr=s("img"),P1=h(),cr=s("a"),ur=s("img"),Gm=h(),ka=s("p"),xa=s("a"),L1=p("BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),S1=p(`, Jacob Devlin et al.`),Nm=h(),dr=s("p"),R1=p(`Corrupts the inputs by using random masking, more precisely, during pretraining, a given percentage of tokens (usually 15%) is masked by:`),Bm=h(),oe=s("ul"),Oh=s("li"),I1=p("a special mask token with probability 0.8"),q1=h(),Wh=s("li"),G1=p("a random token different from the one masked with probability 0.1"),N1=h(),Jh=s("li"),B1=p("the same token with probability 0.1"),Fm=h(),vr=s("p"),F1=p(`The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50% they are not related. The model has to predict if the sentences are consecutive or not.`),Dm=h(),gr=s("p"),D1=p(`The library provides a version of the model for language modeling (traditional or masked), next sentence prediction, token classification, sentence classification, multiple choice classification and question answering.`),Hm=h(),Te=s("h3"),ft=s("a"),Qh=s("span"),u(Aa.$$.fragment),H1=h(),ef=s("span"),C1=p("ALBERT"),Cm=h(),C=s("div"),br=s("a"),_r=s("img"),z1=h(),wr=s("a"),Er=s("img"),j1=h(),yr=s("a"),kr=s("img"),zm=h(),Ta=s("p"),$a=s("a"),U1=p("ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),X1=p(`, Zhenzhong Lan et al.`),jm=h(),xr=s("p"),V1=p("Same as BERT but with a few tweaks:"),Um=h(),he=s("ul"),tf=s("li"),Y1=p(`Embedding size E is different from hidden size H justified because the embeddings are context independent (one embedding vector represents one token), whereas hidden states are context dependent (one hidden state represents a sequence of tokens) so it\u2019s more logical to have H >> E. Also, the embedding matrix is large since it\u2019s V x E (V being the vocab size). If E < H, it has less parameters.`),Z1=h(),af=s("li"),K1=p("Layers are split in groups that share parameters (to save memory)."),O1=h(),sf=s("li"),W1=p(`Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not.`),Xm=h(),Ar=s("p"),J1=p(`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),Vm=h(),$e=s("h3"),pt=s("a"),rf=s("span"),u(Ma.$$.fragment),Q1=h(),lf=s("span"),eb=p("RoBERTa"),Ym=h(),z=s("div"),Tr=s("a"),$r=s("img"),tb=h(),Mr=s("a"),Pr=s("img"),ab=h(),Lr=s("a"),Sr=s("img"),Zm=h(),Pa=s("p"),La=s("a"),sb=p("RoBERTa: A Robustly Optimized BERT Pretraining Approach"),ib=p(", Yinhan Liu et al."),Km=h(),Rr=s("p"),rb=p("Same as BERT with better pretraining tricks:"),Om=h(),R=s("ul"),nf=s("li"),lb=p("dynamic masking: tokens are masked differently at each epoch, whereas BERT does it once and for all"),nb=h(),of=s("li"),ob=p(`no NSP (next sentence prediction) loss and instead of putting just two sentences together, put a chunk of contiguous texts together to reach 512 tokens (so the sentences are in an order than may span several documents)`),hb=h(),hf=s("li"),fb=p("train with larger batches"),pb=h(),ff=s("li"),mb=p("use BPE with bytes as a subunit and not characters (because of unicode characters)"),Wm=h(),Ir=s("p"),cb=p(`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),Jm=h(),Me=s("h3"),mt=s("a"),pf=s("span"),u(Sa.$$.fragment),ub=h(),mf=s("span"),db=p("DistilBERT"),Qm=h(),j=s("div"),qr=s("a"),Gr=s("img"),vb=h(),Nr=s("a"),Br=s("img"),gb=h(),Fr=s("a"),Dr=s("img"),ec=h(),Ra=s("p"),Ia=s("a"),bb=p("DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"),_b=p(`, Victor Sanh et al.`),tc=h(),Hr=s("p"),wb=p(`Same as BERT but smaller. Trained by distillation of the pretrained BERT model, meaning it\u2019s been trained to predict the same probabilities as the larger model. The actual objective is a combination of:`),ac=h(),fe=s("ul"),cf=s("li"),Eb=p("finding the same probabilities as the teacher model"),yb=h(),uf=s("li"),kb=p("predicting the masked tokens correctly (but no next-sentence objective)"),xb=h(),df=s("li"),Ab=p("a cosine similarity between the hidden states of the student and the teacher model"),sc=h(),Cr=s("p"),Tb=p(`The library provides a version of the model for masked language modeling, token classification, sentence classification and question answering.`),ic=h(),Pe=s("h3"),ct=s("a"),vf=s("span"),u(qa.$$.fragment),$b=h(),gf=s("span"),Mb=p("ConvBERT"),rc=h(),U=s("div"),zr=s("a"),jr=s("img"),Pb=h(),Ur=s("a"),Xr=s("img"),Lb=h(),Vr=s("a"),Yr=s("img"),lc=h(),Ga=s("p"),Na=s("a"),Sb=p("ConvBERT: Improving BERT with Span-based Dynamic Convolution"),Rb=p(`, Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.`),nc=h(),Zr=s("p"),Ib=p(`Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost.`),oc=h(),Kr=s("p"),qb=p(`The library provides a version of the model for masked language modeling, token classification, sentence classification and question answering.`),hc=h(),Le=s("h3"),ut=s("a"),bf=s("span"),u(Ba.$$.fragment),Gb=h(),_f=s("span"),Nb=p("XLM"),fc=h(),X=s("div"),Or=s("a"),Wr=s("img"),Bb=h(),Jr=s("a"),Qr=s("img"),Fb=h(),el=s("a"),tl=s("img"),pc=h(),Fa=s("p"),Da=s("a"),Db=p("Cross-lingual Language Model Pretraining"),Hb=p(", Guillaume Lample and Alexis Conneau"),mc=h(),al=s("p"),Cb=p(`A transformer model trained on several languages. There are three different type of training for this model and the library provides checkpoints for all of them:`),cc=h(),pe=s("ul"),wf=s("li"),zb=p(`Causal language modeling (CLM) which is the traditional autoregressive training (so this model could be in the previous section as well). One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages.`),jb=h(),Ef=s("li"),Ub=p(`Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages, with dynamic masking of the tokens.`),Xb=h(),yf=s("li"),Vb=p(`A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two different languages, with random masking. To predict one of the masked tokens, the model can use both, the surrounding context in language 1 and the context given by language 2.`),uc=h(),I=s("p"),Yb=p("Checkpoints refer to which method was used for pretraining by having "),kf=s("em"),Zb=p("clm"),Kb=p(", "),xf=s("em"),Ob=p("mlm"),Wb=p(" or "),Af=s("em"),Jb=p("mlm-tlm"),Qb=p(` in their names. On top of positional embeddings, the model has language embeddings. When training using MLM/CLM, this gives the model an indication of the language used, and when training using MLM+TLM, an indication of the language used for each part.`),dc=h(),sl=s("p"),e_=p(`The library provides a version of the model for language modeling, token classification, sentence classification and question answering.`),vc=h(),Se=s("h3"),dt=s("a"),Tf=s("span"),u(Ha.$$.fragment),t_=h(),$f=s("span"),a_=p("XLM-RoBERTa"),gc=h(),V=s("div"),il=s("a"),rl=s("img"),s_=h(),ll=s("a"),nl=s("img"),i_=h(),ol=s("a"),hl=s("img"),bc=h(),Ca=s("p"),za=s("a"),r_=p("Unsupervised Cross-lingual Representation Learning at Scale"),l_=p(`, Alexis Conneau et al.`),_c=h(),fl=s("p"),n_=p(`Uses RoBERTa tricks on the XLM approach, but does not use the translation language modeling objective. It only uses masked language modeling on sentences coming from one language. However, the model is trained on many more languages (100) and doesn\u2019t use the language embeddings, so it\u2019s capable of detecting the input language by itself.`),wc=h(),pl=s("p"),o_=p(`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),Ec=h(),Re=s("h3"),vt=s("a"),Mf=s("span"),u(ja.$$.fragment),h_=h(),Pf=s("span"),f_=p("FlauBERT"),yc=h(),Y=s("div"),ml=s("a"),cl=s("img"),p_=h(),ul=s("a"),dl=s("img"),m_=h(),vl=s("a"),gl=s("img"),kc=h(),Ua=s("p"),Xa=s("a"),c_=p("FlauBERT: Unsupervised Language Model Pre-training for French"),u_=p(", Hang Le et al."),xc=h(),bl=s("p"),d_=p("Like RoBERTa, without the sentence ordering prediction (so just trained on the MLM objective)."),Ac=h(),_l=s("p"),v_=p("The library provides a version of the model for language modeling and sentence classification."),Tc=h(),Ie=s("h3"),gt=s("a"),Lf=s("span"),u(Va.$$.fragment),g_=h(),Sf=s("span"),b_=p("ELECTRA"),$c=h(),Z=s("div"),wl=s("a"),El=s("img"),__=h(),yl=s("a"),kl=s("img"),w_=h(),xl=s("a"),Al=s("img"),Mc=h(),Ya=s("p"),Za=s("a"),E_=p("ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators"),y_=p(`, Kevin Clark et al.`),Pc=h(),Tl=s("p"),k_=p(`ELECTRA is a transformer model pretrained with the use of another (small) masked language model. The inputs are corrupted by that language model, which takes an input text that is randomly masked and outputs a text in which ELECTRA has to predict which token is an original and which one has been replaced. Like for GAN training, the small language model is trained for a few steps (but with the original texts as objective, not to fool the ELECTRA model like in a traditional GAN setting) then the ELECTRA model is trained for a few steps.`),Lc=h(),$l=s("p"),x_=p(`The library provides a version of the model for masked language modeling, token classification and sentence classification.`),Sc=h(),qe=s("h3"),bt=s("a"),Rf=s("span"),u(Ka.$$.fragment),A_=h(),If=s("span"),T_=p("Funnel Transformer"),Rc=h(),K=s("div"),Ml=s("a"),Pl=s("img"),$_=h(),Ll=s("a"),Sl=s("img"),M_=h(),Rl=s("a"),Il=s("img"),Ic=h(),Oa=s("p"),Wa=s("a"),P_=p("Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing"),L_=p(", Zihang Dai et al."),qc=h(),ql=s("p"),S_=p(`Funnel Transformer is a transformer model using pooling, a bit like a ResNet model: layers are grouped in blocks, and at the beginning of each block (except the first one), the hidden states are pooled among the sequence dimension. This way, their length is divided by 2, which speeds up the computation of the next hidden states. All pretrained models have three blocks, which means the final hidden state has a sequence length that is one fourth of the original sequence length.`),Gc=h(),Gl=s("p"),R_=p(`For tasks such as classification, this is not a problem, but for tasks like masked language modeling or token classification, we need a hidden state with the same sequence length as the original input. In those cases, the final hidden states are upsampled to the input sequence length and go through two additional layers. That\u2019s why there are two versions of each checkpoint. The version suffixed with \u201C-base\u201D contains only the three blocks, while the version without that suffix contains the three blocks and the upsampling head with its additional layers.`),Nc=h(),Nl=s("p"),I_=p("The pretrained models available use the same pretraining objective as ELECTRA."),Bc=h(),Bl=s("p"),q_=p(`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),Fc=h(),Fl=s("a"),Dc=h(),Ge=s("h3"),_t=s("a"),qf=s("span"),u(Ja.$$.fragment),G_=h(),Gf=s("span"),N_=p("Longformer"),Hc=h(),O=s("div"),Dl=s("a"),Hl=s("img"),B_=h(),Cl=s("a"),zl=s("img"),F_=h(),jl=s("a"),Ul=s("img"),Cc=h(),Qa=s("p"),es=s("a"),D_=p("Longformer: The Long-Document Transformer"),H_=p(", Iz Beltagy et al."),zc=h(),wt=s("p"),C_=p(`A transformer model replacing the attention matrices by sparse matrices to go faster. Often, the local context (e.g., what are the two tokens left and right?) is enough to take action for a given token. Some preselected input tokens are still given global attention, but the attention matrix has way less parameters, resulting in a speed-up. See the `),Xl=s("a"),z_=p("local attention section"),j_=p(" for more information."),jc=h(),Vl=s("p"),U_=p("It is pretrained the same way a RoBERTa otherwise."),Uc=h(),u(Et.$$.fragment),Xc=h(),Yl=s("p"),X_=p(`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),Vc=h(),Zl=s("a"),Yc=h(),Ne=s("h2"),yt=s("a"),Nf=s("span"),u(ts.$$.fragment),V_=h(),Bf=s("span"),Y_=p("Sequence-to-sequence models"),Zc=h(),Kl=s("p"),Z_=p("As mentioned before, these models keep both the encoder and the decoder of the original transformer."),Kc=h(),u(as.$$.fragment),Oc=h(),Be=s("h3"),kt=s("a"),Ff=s("span"),u(ss.$$.fragment),K_=h(),Df=s("span"),O_=p("BART"),Wc=h(),W=s("div"),Ol=s("a"),Wl=s("img"),W_=h(),Jl=s("a"),Ql=s("img"),J_=h(),en=s("a"),tn=s("img"),Jc=h(),is=s("p"),rs=s("a"),Q_=p("BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"),ew=p(", Mike Lewis et al."),Qc=h(),an=s("p"),tw=p(`Sequence-to-sequence model with an encoder and a decoder. Encoder is fed a corrupted version of the tokens, decoder is fed the original tokens (but has a mask to hide the future words like a regular transformers decoder). A composition of the following transformations are applied on the pretraining tasks for the encoder:`),eu=h(),M=s("ul"),Hf=s("li"),aw=p("mask random tokens (like in BERT)"),sw=h(),Cf=s("li"),iw=p("delete random tokens"),rw=h(),zf=s("li"),lw=p("mask a span of k tokens with a single mask token (a span of 0 tokens is an insertion of a mask token)"),nw=h(),jf=s("li"),ow=p("permute sentences"),hw=h(),Uf=s("li"),fw=p("rotate the document to make it start at a specific token"),tu=h(),sn=s("p"),pw=p("The library provides a version of this model for conditional generation and sequence classification."),au=h(),Fe=s("h3"),xt=s("a"),Xf=s("span"),u(ls.$$.fragment),mw=h(),Vf=s("span"),cw=p("Pegasus"),su=h(),J=s("div"),rn=s("a"),ln=s("img"),uw=h(),nn=s("a"),on=s("img"),dw=h(),hn=s("a"),fn=s("img"),iu=h(),ns=s("p"),os=s("a"),vw=p("PEGASUS: Pre-training with Extracted Gap-sentences forAbstractive Summarization"),gw=p(", Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019."),ru=h(),pn=s("p"),bw=p(`Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG).`),lu=h(),At=s("ul"),Yf=s("li"),_w=p(`MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT)`),ww=h(),Zf=s("li"),Ew=p(`GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder.`),nu=h(),mn=s("p"),yw=p(`In contrast to BART, Pegasus\u2019 pretraining task is intentionally similar to summarization: important sentences are masked and are generated together as one output sequence from the remaining sentences, similar to an extractive summary.`),ou=h(),cn=s("p"),kw=p("The library provides a version of this model for conditional generation, which should be used for summarization."),hu=h(),De=s("h3"),Tt=s("a"),Kf=s("span"),u(hs.$$.fragment),xw=h(),Of=s("span"),Aw=p("MarianMT"),fu=h(),Q=s("div"),un=s("a"),dn=s("img"),Tw=h(),vn=s("a"),gn=s("img"),$w=h(),bn=s("a"),_n=s("img"),pu=h(),fs=s("p"),ps=s("a"),Mw=p("Marian: Fast Neural Machine Translation in C++"),Pw=p(", Marcin Junczys-Dowmunt et al."),mu=h(),wn=s("p"),Lw=p("A framework for translation models, using the same models as BART"),cu=h(),En=s("p"),Sw=p("The library provides a version of this model for conditional generation."),uu=h(),He=s("h3"),$t=s("a"),Wf=s("span"),u(ms.$$.fragment),Rw=h(),Jf=s("span"),Iw=p("T5"),du=h(),ee=s("div"),yn=s("a"),kn=s("img"),qw=h(),xn=s("a"),An=s("img"),Gw=h(),Tn=s("a"),$n=s("img"),vu=h(),cs=s("p"),us=s("a"),Nw=p("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),Bw=p(", Colin Raffel et al."),gu=h(),Mn=s("p"),Fw=p(`Uses the traditional transformer model (with a slight change in the positional embeddings, which are learned at each layer). To be able to operate on all NLP tasks, it transforms them into text-to-text problems by using specific prefixes: \u201Csummarize: \u201D, \u201Cquestion: \u201D, \u201Ctranslate English to German: \u201D and so forth.`),bu=h(),Pn=s("p"),Dw=p(`The pretraining includes both supervised and self-supervised training. Supervised training is conducted on downstream tasks provided by the GLUE and SuperGLUE benchmarks (converting them into text-to-text tasks as explained above).`),_u=h(),Ln=s("p"),Hw=p(`Self-supervised training uses corrupted tokens, by randomly removing 15% of the tokens and replacing them with individual sentinel tokens (if several consecutive tokens are marked for removal, the whole group is replaced with a single sentinel token). The input of the encoder is the corrupted sentence, the input of the decoder is the original sentence and the target is then the dropped out tokens delimited by their sentinel tokens.`),wu=h(),Sn=s("p"),Cw=p(`For instance, if we have the sentence \u201CMy dog is very cute .\u201D, and we decide to remove the tokens: \u201Cdog\u201D, \u201Cis\u201D and \u201Ccute\u201D, the encoder input becomes \u201CMy <x> very <y> .\u201D and the target input becomes \u201C<x> dog is <y> cute .<z>\u201D`),Eu=h(),Rn=s("p"),zw=p("The library provides a version of this model for conditional generation."),yu=h(),Ce=s("h3"),Mt=s("a"),Qf=s("span"),u(ds.$$.fragment),jw=h(),ep=s("span"),Uw=p("MT5"),ku=h(),te=s("div"),In=s("a"),qn=s("img"),Xw=h(),Gn=s("a"),Nn=s("img"),Vw=h(),Bn=s("a"),Fn=s("img"),xu=h(),vs=s("p"),gs=s("a"),Yw=p("mT5: A massively multilingual pre-trained text-to-text transformer"),Zw=p(`, Linting Xue et al.`),Au=h(),Dn=s("p"),Kw=p(`The model architecture is same as T5. mT5\u2019s pretraining objective includes T5\u2019s self-supervised training, but not T5\u2019s supervised training. mT5 is trained on 101 languages.`),Tu=h(),Hn=s("p"),Ow=p("The library provides a version of this model for conditional generation."),$u=h(),ze=s("h3"),Pt=s("a"),tp=s("span"),u(bs.$$.fragment),Ww=h(),ap=s("span"),Jw=p("MBart"),Mu=h(),ae=s("div"),Cn=s("a"),zn=s("img"),Qw=h(),jn=s("a"),Un=s("img"),e2=h(),Xn=s("a"),Vn=s("img"),Pu=h(),_s=s("p"),ws=s("a"),t2=p("Multilingual Denoising Pre-training for Neural Machine Translation"),a2=p(` by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.`),Lu=h(),Yn=s("p"),s2=p(`The model architecture and pretraining objective is same as BART, but MBart is trained on 25 languages and is intended for supervised and unsupervised machine translation. MBart is one of the first methods for pretraining a complete sequence-to-sequence model by denoising full texts in multiple languages,`),Su=h(),Zn=s("p"),i2=p("The library provides a version of this model for conditional generation."),Ru=h(),Lt=s("p"),r2=p("The "),Es=s("a"),l2=p("mbart-large-en-ro checkpoint"),n2=p(` can be used for english -> romanian translation.`),Iu=h(),me=s("p"),o2=p("The "),ys=s("a"),h2=p("mbart-large-cc25"),f2=p(` checkpoint can be finetuned for other translation and summarization tasks, using code in `),sp=s("code"),p2=p("examples/pytorch/translation/"),m2=p(` , but is not very useful without finetuning.`),qu=h(),je=s("h3"),St=s("a"),ip=s("span"),u(ks.$$.fragment),c2=h(),rp=s("span"),u2=p("ProphetNet"),Gu=h(),se=s("div"),Kn=s("a"),On=s("img"),d2=h(),Wn=s("a"),Jn=s("img"),v2=h(),Qn=s("a"),eo=s("img"),Nu=h(),xs=s("p"),As=s("a"),g2=p("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),b2=p(` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou.`),Bu=h(),ce=s("p"),_2=p("ProphetNet introduces a novel "),lp=s("em"),w2=p("sequence-to-sequence"),E2=p(" pretraining objective, called "),np=s("em"),y2=p("future n-gram prediction"),k2=p(`. In future n-gram prediction, the model predicts the next n tokens simultaneously based on previous context tokens at each time step instead instead of just the single next token. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. The model architecture is based on the original Transformer, but replaces the \u201Cstandard\u201D self-attention mechanism in the decoder by a a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism.`),Fu=h(),to=s("p"),x2=p(`The library provides a pre-trained version of this model for conditional generation and a fine-tuned version for summarization.`),Du=h(),Ue=s("h3"),Rt=s("a"),op=s("span"),u(Ts.$$.fragment),A2=h(),hp=s("span"),T2=p("XLM-ProphetNet"),Hu=h(),ie=s("div"),ao=s("a"),so=s("img"),$2=h(),io=s("a"),ro=s("img"),M2=h(),lo=s("a"),no=s("img"),Cu=h(),$s=s("p"),Ms=s("a"),P2=p("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),L2=p(` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou.`),zu=h(),It=s("p"),S2=p(`XLM-ProphetNet\u2019s model architecture and pretraining objective is same as ProphetNet, but XLM-ProphetNet was pre-trained on the cross-lingual dataset `),Ps=s("a"),R2=p("XGLUE"),I2=p("."),ju=h(),oo=s("p"),q2=p(`The library provides a pre-trained version of this model for multi-lingual conditional generation and fine-tuned versions for headline generation and question generation, respectively.`),Uu=h(),ho=s("a"),Xu=h(),Xe=s("h2"),qt=s("a"),fp=s("span"),u(Ls.$$.fragment),G2=h(),pp=s("span"),N2=p("Multimodal models"),Vu=h(),fo=s("p"),B2=p(`There is one multimodal model in the library which has not been pretrained in the self-supervised fashion like the others.`),Yu=h(),Ve=s("h3"),Gt=s("a"),mp=s("span"),u(Ss.$$.fragment),F2=h(),cp=s("span"),D2=p("MMBT"),Zu=h(),Rs=s("p"),Is=s("a"),H2=p("Supervised Multimodal Bitransformers for Classifying Images and Text"),C2=p(`, Douwe Kiela et al.`),Ku=h(),po=s("p"),z2=p(`A transformers model used in multimodal settings, combining a text and an image to make predictions. The transformer model takes as inputs the embeddings of the tokenized text and the final activations of a pretrained on images resnet (after the pooling layer) that goes through a linear layer (to go from number of features at the end of the resnet to the hidden state dimension of the transformer).`),Ou=h(),mo=s("p"),j2=p(`The different inputs are concatenated, and on top of the positional embeddings, a segment embedding is added to let the model know which part of the input vector corresponds to the text and which to the image.`),Wu=h(),co=s("p"),U2=p("The pretrained model only works for classification."),Ju=h(),uo=s("a"),Qu=h(),Ye=s("h2"),Nt=s("a"),up=s("span"),u(qs.$$.fragment),X2=h(),dp=s("span"),V2=p("Retrieval-based models"),ed=h(),vo=s("p"),Y2=p("Some models use documents retrieval during (pre)training and inference for open-domain question answering, for example."),td=h(),Ze=s("h3"),Bt=s("a"),vp=s("span"),u(Gs.$$.fragment),Z2=h(),gp=s("span"),K2=p("DPR"),ad=h(),re=s("div"),go=s("a"),bo=s("img"),O2=h(),_o=s("a"),wo=s("img"),W2=h(),Eo=s("a"),yo=s("img"),sd=h(),Ns=s("p"),Bs=s("a"),J2=p("Dense Passage Retrieval for Open-Domain Question Answering"),Q2=p(`, Vladimir Karpukhin et al.`),id=h(),ko=s("p"),e0=p(`Dense Passage Retrieval (DPR) - is a set of tools and models for state-of-the-art open-domain question-answering research.`),rd=h(),xo=s("p"),t0=p("DPR consists in three models:"),ld=h(),ue=s("ul"),bp=s("li"),a0=p("Question encoder: encode questions as vectors"),s0=h(),_p=s("li"),i0=p("Context encoder: encode contexts as vectors"),r0=h(),wp=s("li"),l0=p(`Reader: extract the answer of the questions inside retrieved contexts, along with a relevance score (high if the inferred span actually answers the question).`),nd=h(),Ao=s("p"),n0=p(`DPR\u2019s pipeline (not implemented yet) uses a retrieval step to find the top k contexts given a certain question, and then it calls the reader with the question and the retrieved documents to get the answer.`),od=h(),Ke=s("h3"),Ft=s("a"),Ep=s("span"),u(Fs.$$.fragment),o0=h(),yp=s("span"),h0=p("RAG"),hd=h(),Oe=s("div"),To=s("a"),$o=s("img"),f0=h(),Mo=s("a"),Po=s("img"),fd=h(),Ds=s("p"),Hs=s("a"),p0=p("Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks"),m0=p(`, Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\xFCttler, Mike Lewis, Wen-tau Yih, Tim Rockt\xE4schel, Sebastian Riedel, Douwe Kiela`),pd=h(),Lo=s("p"),c0=p(`Retrieval-augmented generation (\u201CRAG\u201D) models combine the powers of pretrained dense retrieval (DPR) and Seq2Seq models. RAG models retrieve docs, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks.`),md=h(),So=s("p"),u0=p("The two models RAG-Token and RAG-Sequence are available for generation."),cd=h(),We=s("h2"),Dt=s("a"),kp=s("span"),u(Cs.$$.fragment),d0=h(),xp=s("span"),v0=p("More technical aspects"),ud=h(),Je=s("h3"),Ht=s("a"),Ap=s("span"),u(zs.$$.fragment),g0=h(),Tp=s("span"),b0=p("Full vs sparse attention"),dd=h(),Ro=s("p"),_0=p(`Most transformer models use full attention in the sense that the attention matrix is square. It can be a big computational bottleneck when you have long texts. Longformer and reformer are models that try to be more efficient and use a sparse version of the attention matrix to speed up training.`),vd=h(),Io=s("a"),gd=h(),qo=s("p"),$p=s("strong"),w0=p("LSH attention"),bd=h(),js=s("p"),Go=s("a"),E0=p("Reformer"),y0=p(` uses LSH attention. In the softmax(QK^t), only the biggest elements (in the softmax dimension) of the matrix QK^t are going to give useful contributions. So for each query q in Q, we can consider only the keys k in K that are close to q. A hash function is used to determine if q and k are close. The attention mask is modified to mask the current token (except at the first position), because it will give a query and a key equal (so very similar to each other). Since the hash can be a bit random, several hash functions are used in practice (determined by a n_rounds parameter) and then are averaged together.`),_d=h(),No=s("a"),wd=h(),Bo=s("p"),Mp=s("strong"),k0=p("Local attention"),Ed=h(),Us=s("p"),Fo=s("a"),x0=p("Longformer"),A0=p(` uses local attention: often, the local context (e.g., what are the two tokens to the left and right?) is enough to take action for a given token. Also, by stacking attention layers that have a small window, the last layer will have a receptive field of more than just the tokens in the window, allowing them to build a representation of the whole sentence.`),yd=h(),Do=s("p"),T0=p(`Some preselected input tokens are also given global attention: for those few tokens, the attention matrix can access all tokens and this process is symmetric: all other tokens have access to those specific tokens (on top of the ones in their local window). This is shown in Figure 2d of the paper, see below for a sample attention mask:`),kd=h(),Ct=s("img"),xd=h(),Ho=s("p"),$0=p(`Using those attention matrices with less parameters then allows the model to have inputs having a bigger sequence length.`),Ad=h(),Qe=s("h3"),zt=s("a"),Pp=s("span"),u(Xs.$$.fragment),M0=h(),Lp=s("span"),P0=p("Other tricks"),Td=h(),Co=s("a"),$d=h(),zo=s("p"),Sp=s("strong"),L0=p("Axial positional encodings"),Md=h(),E=s("p"),jo=s("a"),S0=p("Reformer"),R0=p(` uses axial positional encodings: in traditional transformer models, the positional encoding E is a matrix of size `),Pd=new le,Ld=p(" by "),Sd=new le,Rd=p(", "),Id=new le,qd=p(" being the sequence length and "),Gd=new le,Nd=p(` the dimension of the hidden state. If you have very long texts, this matrix can be huge and take way too much space on the GPU. To alleviate that, axial positional encodings consist of factorizing that big matrix E in two smaller matrices E1 and E2, with dimensions `),Bd=new le,Fd=p(" and "),Dd=new le,Hd=p(", such that "),Cd=new le,zd=p(` and `),jd=new le,Ud=p(` (with the product for the lengths, this ends up being way smaller). The embedding for time step `),Xd=new le,Vd=p(" in E is obtained by concatenating the embeddings for timestep "),Yd=new le,Zd=p(" in E1 and "),Kd=new le,Od=p(` in E2.`),this.h()},l(e){const n=N9('[data-svelte="svelte-1phssyn"]',document.head);k=i(n,"META",{name:!0,content:!0}),n.forEach(t),T=f(e),x=i(e,"H1",{class:!0});var Vs=l(x);A=i(Vs,"A",{id:!0,class:!0,href:!0});var Rp=l(A);kh=i(Rp,"SPAN",{});var Fy=l(kh);d(Zt.$$.fragment,Fy),Fy.forEach(t),Rp.forEach(t),Cv=f(Vs),xh=i(Vs,"SPAN",{});var Dy=l(xh);zv=m(Dy,"Summary of the models"),Dy.forEach(t),Vs.forEach(t),Ip=f(e),L=i(e,"P",{});var jt=l(L);jv=m(jt,"This is a summary of the models available in \u{1F917} Transformers. It assumes you\u2019re familiar with the original "),Kt=i(jt,"A",{href:!0,rel:!0});var Hy=l(Kt);Uv=m(Hy,`transformer model`),Hy.forEach(t),Xv=m(jt,". For a gentle introduction check the "),Ot=i(jt,"A",{href:!0,rel:!0});var Cy=l(Ot);Vv=m(Cy,"annotated transformer"),Cy.forEach(t),Yv=m(jt,`. Here we focus on the high-level differences between the models. You can check them more in detail in their respective documentation. Also check out `),Wt=i(jt,"A",{href:!0,rel:!0});var zy=l(Wt);Zv=m(zy,"the Model Hub"),zy.forEach(t),Kv=m(jt," where you can filter the checkpoints by model architecture."),jt.forEach(t),qp=f(e),Ys=i(e,"P",{});var jy=l(Ys);Ov=m(jy,"Each one of the models in the library falls into one of the following categories:"),jy.forEach(t),Gp=f(e),$=i(e,"UL",{});var de=l($);Ah=i(de,"LI",{});var Uy=l(Ah);Zs=i(Uy,"A",{href:!0});var Xy=l(Zs);Wv=m(Xy,"autoregressive-models"),Xy.forEach(t),Uy.forEach(t),Jv=f(de),Th=i(de,"LI",{});var Vy=l(Th);Ks=i(Vy,"A",{href:!0});var Yy=l(Ks);Qv=m(Yy,"autoencoding-models"),Yy.forEach(t),Vy.forEach(t),eg=f(de),$h=i(de,"LI",{});var Zy=l($h);Os=i(Zy,"A",{href:!0});var Ky=l(Os);tg=m(Ky,"seq-to-seq-models"),Ky.forEach(t),Zy.forEach(t),ag=f(de),Mh=i(de,"LI",{});var Oy=l(Mh);Ws=i(Oy,"A",{href:!0});var Wy=l(Ws);sg=m(Wy,"multimodal-models"),Wy.forEach(t),Oy.forEach(t),ig=f(de),Ph=i(de,"LI",{});var Jy=l(Ph);Js=i(Jy,"A",{href:!0});var Qy=l(Js);rg=m(Qy,"retrieval-based-models"),Qy.forEach(t),Jy.forEach(t),de.forEach(t),Np=f(e),P=i(e,"IFRAME",{width:!0,height:!0,src:!0,title:!0,frameborder:!0,allow:!0}),l(P).forEach(t),Bp=f(e),Qs=i(e,"P",{});var e3=l(Qs);lg=m(e3,`Autoregressive models are pretrained on the classic language modeling task: guess the next token having read all the previous ones. They correspond to the decoder of the original transformer model, and a mask is used on top of the full sentence so that the attention heads can only see what was before in the text, and not what\u2019s after. Although those models can be fine-tuned and achieve great results on many tasks, the most natural application is text generation. A typical example of such models is GPT.`),e3.forEach(t),Fp=f(e),ei=i(e,"P",{});var t3=l(ei);ng=m(t3,`Autoencoding models are pretrained by corrupting the input tokens in some way and trying to reconstruct the original sentence. They correspond to the encoder of the original transformer model in the sense that they get access to the full inputs without any mask. Those models usually build a bidirectional representation of the whole sentence. They can be fine-tuned and achieve great results on many tasks such as text generation, but their most natural application is sentence classification or token classification. A typical example of such models is BERT.`),t3.forEach(t),Dp=f(e),ti=i(e,"P",{});var a3=l(ti);og=m(a3,`Note that the only difference between autoregressive models and autoencoding models is in the way the model is pretrained. Therefore, the same architecture can be used for both autoregressive and autoencoding models. When a given model has been used for both types of pretraining, we have put it in the category corresponding to the article where it was first introduced.`),a3.forEach(t),Hp=f(e),ai=i(e,"P",{});var s3=l(ai);hg=m(s3,`Sequence-to-sequence models use both the encoder and the decoder of the original transformer, either for translation tasks or by transforming other tasks to sequence-to-sequence problems. They can be fine-tuned to many tasks but their most natural applications are translation, summarization and question answering. The original transformer model is an example of such a model (only for translation), T5 is an example that can be fine-tuned on other tasks.`),s3.forEach(t),Cp=f(e),si=i(e,"P",{});var i3=l(si);fg=m(i3,"Multimodal models mix text inputs with other kinds (e.g. images) and are more specific to a given task."),i3.forEach(t),zp=f(e),ii=i(e,"A",{id:!0}),l(ii).forEach(t),jp=f(e),ge=i(e,"H2",{class:!0});var Jd=l(ge);et=i(Jd,"A",{id:!0,class:!0,href:!0});var r3=l(et);Lh=i(r3,"SPAN",{});var l3=l(Lh);d(Jt.$$.fragment,l3),l3.forEach(t),r3.forEach(t),pg=f(Jd),Sh=i(Jd,"SPAN",{});var n3=l(Sh);mg=m(n3,"Decoders or autoregressive models"),n3.forEach(t),Jd.forEach(t),Up=f(e),ri=i(e,"P",{});var o3=l(ri);cg=m(o3,`As mentioned before, these models rely on the decoder part of the original transformer and use an attention mask so that at each position, the model can only look at the tokens before the attention heads.`),o3.forEach(t),Xp=f(e),d(Qt.$$.fragment,e),Vp=f(e),be=i(e,"H3",{class:!0});var Qd=l(be);tt=i(Qd,"A",{id:!0,class:!0,href:!0});var h3=l(tt);Rh=i(h3,"SPAN",{});var f3=l(Rh);d(ea.$$.fragment,f3),f3.forEach(t),h3.forEach(t),ug=f(Qd),Ih=i(Qd,"SPAN",{});var p3=l(Ih);dg=m(p3,"Original GPT"),p3.forEach(t),Qd.forEach(t),Yp=f(e),q=i(e,"DIV",{class:!0});var Uo=l(q);li=i(Uo,"A",{href:!0});var m3=l(li);ni=i(m3,"IMG",{alt:!0,src:!0}),m3.forEach(t),vg=f(Uo),oi=i(Uo,"A",{href:!0});var c3=l(oi);hi=i(c3,"IMG",{alt:!0,src:!0}),c3.forEach(t),gg=f(Uo),fi=i(Uo,"A",{href:!0});var u3=l(fi);pi=i(u3,"IMG",{alt:!0,src:!0}),u3.forEach(t),Uo.forEach(t),Zp=f(e),ta=i(e,"P",{});var I0=l(ta);aa=i(I0,"A",{href:!0,rel:!0});var d3=l(aa);bg=m(d3,"Improving Language Understanding by Generative Pre-Training"),d3.forEach(t),_g=m(I0,", Alec Radford et al."),I0.forEach(t),Kp=f(e),mi=i(e,"P",{});var v3=l(mi);wg=m(v3,"The first autoregressive model based on the transformer architecture, pretrained on the Book Corpus dataset."),v3.forEach(t),Op=f(e),ci=i(e,"P",{});var g3=l(ci);Eg=m(g3,`The library provides versions of the model for language modeling and multitask language modeling/multiple choice classification.`),g3.forEach(t),Wp=f(e),_e=i(e,"H3",{class:!0});var ev=l(_e);at=i(ev,"A",{id:!0,class:!0,href:!0});var b3=l(at);qh=i(b3,"SPAN",{});var _3=l(qh);d(sa.$$.fragment,_3),_3.forEach(t),b3.forEach(t),yg=f(ev),Gh=i(ev,"SPAN",{});var w3=l(Gh);kg=m(w3,"GPT-2"),w3.forEach(t),ev.forEach(t),Jp=f(e),G=i(e,"DIV",{class:!0});var Xo=l(G);ui=i(Xo,"A",{href:!0});var E3=l(ui);di=i(E3,"IMG",{alt:!0,src:!0}),E3.forEach(t),xg=f(Xo),vi=i(Xo,"A",{href:!0});var y3=l(vi);gi=i(y3,"IMG",{alt:!0,src:!0}),y3.forEach(t),Ag=f(Xo),bi=i(Xo,"A",{href:!0});var k3=l(bi);_i=i(k3,"IMG",{alt:!0,src:!0}),k3.forEach(t),Xo.forEach(t),Qp=f(e),ia=i(e,"P",{});var q0=l(ia);ra=i(q0,"A",{href:!0,rel:!0});var x3=l(ra);Tg=m(x3,"Language Models are Unsupervised Multitask Learners"),x3.forEach(t),$g=m(q0,`, Alec Radford et al.`),q0.forEach(t),em=f(e),wi=i(e,"P",{});var A3=l(wi);Mg=m(A3,`A bigger and better version of GPT, pretrained on WebText (web pages from outgoing links in Reddit with 3 karmas or more).`),A3.forEach(t),tm=f(e),Ei=i(e,"P",{});var T3=l(Ei);Pg=m(T3,`The library provides versions of the model for language modeling and multitask language modeling/multiple choice classification.`),T3.forEach(t),am=f(e),we=i(e,"H3",{class:!0});var tv=l(we);st=i(tv,"A",{id:!0,class:!0,href:!0});var $3=l(st);Nh=i($3,"SPAN",{});var M3=l(Nh);d(la.$$.fragment,M3),M3.forEach(t),$3.forEach(t),Lg=f(tv),Bh=i(tv,"SPAN",{});var P3=l(Bh);Sg=m(P3,"CTRL"),P3.forEach(t),tv.forEach(t),sm=f(e),N=i(e,"DIV",{class:!0});var Vo=l(N);yi=i(Vo,"A",{href:!0});var L3=l(yi);ki=i(L3,"IMG",{alt:!0,src:!0}),L3.forEach(t),Rg=f(Vo),xi=i(Vo,"A",{href:!0});var S3=l(xi);Ai=i(S3,"IMG",{alt:!0,src:!0}),S3.forEach(t),Ig=f(Vo),Ti=i(Vo,"A",{href:!0});var R3=l(Ti);$i=i(R3,"IMG",{alt:!0,src:!0}),R3.forEach(t),Vo.forEach(t),im=f(e),na=i(e,"P",{});var G0=l(na);oa=i(G0,"A",{href:!0,rel:!0});var I3=l(oa);qg=m(I3,"CTRL: A Conditional Transformer Language Model for Controllable Generation"),I3.forEach(t),Gg=m(G0,`, Nitish Shirish Keskar et al.`),G0.forEach(t),rm=f(e),Mi=i(e,"P",{});var q3=l(Mi);Ng=m(q3,`Same as the GPT model but adds the idea of control codes. Text is generated from a prompt (can be empty) and one (or several) of those control codes which are then used to influence the text generation: generate with the style of wikipedia article, a book or a movie review.`),q3.forEach(t),lm=f(e),Pi=i(e,"P",{});var G3=l(Pi);Bg=m(G3,"The library provides a version of the model for language modeling only."),G3.forEach(t),nm=f(e),Ee=i(e,"H3",{class:!0});var av=l(Ee);it=i(av,"A",{id:!0,class:!0,href:!0});var N3=l(it);Fh=i(N3,"SPAN",{});var B3=l(Fh);d(ha.$$.fragment,B3),B3.forEach(t),N3.forEach(t),Fg=f(av),Dh=i(av,"SPAN",{});var F3=l(Dh);Dg=m(F3,"Transformer-XL"),F3.forEach(t),av.forEach(t),om=f(e),B=i(e,"DIV",{class:!0});var Yo=l(B);Li=i(Yo,"A",{href:!0});var D3=l(Li);Si=i(D3,"IMG",{alt:!0,src:!0}),D3.forEach(t),Hg=f(Yo),Ri=i(Yo,"A",{href:!0});var H3=l(Ri);Ii=i(H3,"IMG",{alt:!0,src:!0}),H3.forEach(t),Cg=f(Yo),qi=i(Yo,"A",{href:!0});var C3=l(qi);Gi=i(C3,"IMG",{alt:!0,src:!0}),C3.forEach(t),Yo.forEach(t),hm=f(e),fa=i(e,"P",{});var N0=l(fa);pa=i(N0,"A",{href:!0,rel:!0});var z3=l(pa);zg=m(z3,"Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),z3.forEach(t),jg=m(N0,`, Zihang Dai et al.`),N0.forEach(t),fm=f(e),Ni=i(e,"P",{});var j3=l(Ni);Ug=m(j3,`Same as a regular GPT model, but introduces a recurrence mechanism for two consecutive segments (similar to a regular RNNs with two consecutive inputs). In this context, a segment is a number of consecutive tokens (for instance 512) that may span across multiple documents, and segments are fed in order to the model.`),j3.forEach(t),pm=f(e),Bi=i(e,"P",{});var U3=l(Bi);Xg=m(U3,`Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention scores. This allows the model to pay attention to information that was in the previous segment as well as the current one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments.`),U3.forEach(t),mm=f(e),Fi=i(e,"P",{});var X3=l(Fi);Vg=m(X3,`This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would give the same results in the current input and the current hidden state at a given position) and needs to make some adjustments in the way attention scores are computed.`),X3.forEach(t),cm=f(e),Di=i(e,"P",{});var V3=l(Di);Yg=m(V3,"The library provides a version of the model for language modeling only."),V3.forEach(t),um=f(e),Hi=i(e,"A",{id:!0}),l(Hi).forEach(t),dm=f(e),ye=i(e,"H3",{class:!0});var sv=l(ye);rt=i(sv,"A",{id:!0,class:!0,href:!0});var Y3=l(rt);Hh=i(Y3,"SPAN",{});var Z3=l(Hh);d(ma.$$.fragment,Z3),Z3.forEach(t),Y3.forEach(t),Zg=f(sv),Ch=i(sv,"SPAN",{});var K3=l(Ch);Kg=m(K3,"Reformer"),K3.forEach(t),sv.forEach(t),vm=f(e),F=i(e,"DIV",{class:!0});var Zo=l(F);Ci=i(Zo,"A",{href:!0});var O3=l(Ci);zi=i(O3,"IMG",{alt:!0,src:!0}),O3.forEach(t),Og=f(Zo),ji=i(Zo,"A",{href:!0});var W3=l(ji);Ui=i(W3,"IMG",{alt:!0,src:!0}),W3.forEach(t),Wg=f(Zo),Xi=i(Zo,"A",{href:!0});var J3=l(Xi);Vi=i(J3,"IMG",{alt:!0,src:!0}),J3.forEach(t),Zo.forEach(t),gm=f(e),ca=i(e,"P",{});var B0=l(ca);ua=i(B0,"A",{href:!0,rel:!0});var Q3=l(ua);Jg=m(Q3,"Reformer: The Efficient Transformer"),Q3.forEach(t),Qg=m(B0,", Nikita Kitaev et al ."),B0.forEach(t),bm=f(e),Yi=i(e,"P",{});var e5=l(Yi);e1=m(e5,`An autoregressive transformer model with lots of tricks to reduce memory footprint and compute time. Those tricks include:`),e5.forEach(t),_m=f(e),S=i(e,"UL",{});var Ut=l(S);da=i(Ut,"LI",{});var iv=l(da);t1=m(iv,"Use "),Zi=i(iv,"A",{href:!0});var t5=l(Zi);a1=m(t5,"Axial position encoding"),t5.forEach(t),s1=m(iv,` (see below for more details). It\u2019s a mechanism to avoid having a huge positional encoding matrix (when the sequence length is very big) by factorizing it into smaller matrices.`),iv.forEach(t),i1=f(Ut),va=i(Ut,"LI",{});var rv=l(va);r1=m(rv,"Replace traditional attention by "),Ki=i(rv,"A",{href:!0});var a5=l(Ki);l1=m(a5,"LSH (local-sensitive hashing) attention"),a5.forEach(t),n1=m(rv,` (see below for more details). It\u2019s a technique to avoid computing the full product query-key in the attention layers.`),rv.forEach(t),o1=f(Ut),zh=i(Ut,"LI",{});var s5=l(zh);h1=m(s5,`Avoid storing the intermediate results of each layer by using reversible transformer layers to obtain them during the backward pass (subtracting the residuals from the input of the next layer gives them back) or recomputing them for results inside a given layer (less efficient than storing them but saves memory).`),s5.forEach(t),f1=f(Ut),jh=i(Ut,"LI",{});var i5=l(jh);p1=m(i5,"Compute the feedforward operations by chunks and not on the whole batch."),i5.forEach(t),Ut.forEach(t),wm=f(e),Oi=i(e,"P",{});var r5=l(Oi);m1=m(r5,"With those tricks, the model can be fed much larger sentences than traditional transformer autoregressive models."),r5.forEach(t),Em=f(e),d(lt.$$.fragment,e),ym=f(e),Wi=i(e,"P",{});var l5=l(Wi);c1=m(l5,"The library provides a version of the model for language modeling only."),l5.forEach(t),km=f(e),ke=i(e,"H3",{class:!0});var lv=l(ke);nt=i(lv,"A",{id:!0,class:!0,href:!0});var n5=l(nt);Uh=i(n5,"SPAN",{});var o5=l(Uh);d(ga.$$.fragment,o5),o5.forEach(t),n5.forEach(t),u1=f(lv),Xh=i(lv,"SPAN",{});var h5=l(Xh);d1=m(h5,"XLNet"),h5.forEach(t),lv.forEach(t),xm=f(e),D=i(e,"DIV",{class:!0});var Ko=l(D);Ji=i(Ko,"A",{href:!0});var f5=l(Ji);Qi=i(f5,"IMG",{alt:!0,src:!0}),f5.forEach(t),v1=f(Ko),er=i(Ko,"A",{href:!0});var p5=l(er);tr=i(p5,"IMG",{alt:!0,src:!0}),p5.forEach(t),g1=f(Ko),ar=i(Ko,"A",{href:!0});var m5=l(ar);sr=i(m5,"IMG",{alt:!0,src:!0}),m5.forEach(t),Ko.forEach(t),Am=f(e),ba=i(e,"P",{});var F0=l(ba);_a=i(F0,"A",{href:!0,rel:!0});var c5=l(_a);b1=m(c5,"XLNet: Generalized Autoregressive Pretraining for Language Understanding"),c5.forEach(t),_1=m(F0,`, Zhilin Yang et al.`),F0.forEach(t),Tm=f(e),ir=i(e,"P",{});var u5=l(ir);w1=m(u5,`XLNet is not a traditional autoregressive model but uses a training strategy that builds on that. It permutes the tokens in the sentence, then allows the model to use the last n tokens to predict the token n+1. Since this is all done with a mask, the sentence is actually fed in the model in the right order, but instead of masking the first n tokens for n+1, XLNet uses a mask that hides the previous tokens in some given permutation of 1,\u2026,sequence length.`),u5.forEach(t),$m=f(e),rr=i(e,"P",{});var d5=l(rr);E1=m(d5,"XLNet also uses the same recurrence mechanism as Transformer-XL to build long-term dependencies."),d5.forEach(t),Mm=f(e),lr=i(e,"P",{});var v5=l(lr);y1=m(v5,`The library provides a version of the model for language modeling, token classification, sentence classification, multiple choice classification and question answering.`),v5.forEach(t),Pm=f(e),nr=i(e,"A",{id:!0}),l(nr).forEach(t),Lm=f(e),xe=i(e,"H2",{class:!0});var nv=l(xe);ot=i(nv,"A",{id:!0,class:!0,href:!0});var g5=l(ot);Vh=i(g5,"SPAN",{});var b5=l(Vh);d(wa.$$.fragment,b5),b5.forEach(t),g5.forEach(t),k1=f(nv),Yh=i(nv,"SPAN",{});var _5=l(Yh);x1=m(_5,"Encoders or autoencoding models"),_5.forEach(t),nv.forEach(t),Sm=f(e),or=i(e,"P",{});var w5=l(or);A1=m(w5,`As mentioned before, these models rely on the encoder part of the original transformer and use no mask so the model can look at all the tokens in the attention heads. For pretraining, targets are the original sentences and inputs are their corrupted versions.`),w5.forEach(t),Rm=f(e),d(Ea.$$.fragment,e),Im=f(e),Ae=i(e,"H3",{class:!0});var ov=l(Ae);ht=i(ov,"A",{id:!0,class:!0,href:!0});var E5=l(ht);Zh=i(E5,"SPAN",{});var y5=l(Zh);d(ya.$$.fragment,y5),y5.forEach(t),E5.forEach(t),T1=f(ov),Kh=i(ov,"SPAN",{});var k5=l(Kh);$1=m(k5,"BERT"),k5.forEach(t),ov.forEach(t),qm=f(e),H=i(e,"DIV",{class:!0});var Oo=l(H);hr=i(Oo,"A",{href:!0});var x5=l(hr);fr=i(x5,"IMG",{alt:!0,src:!0}),x5.forEach(t),M1=f(Oo),pr=i(Oo,"A",{href:!0});var A5=l(pr);mr=i(A5,"IMG",{alt:!0,src:!0}),A5.forEach(t),P1=f(Oo),cr=i(Oo,"A",{href:!0});var T5=l(cr);ur=i(T5,"IMG",{alt:!0,src:!0}),T5.forEach(t),Oo.forEach(t),Gm=f(e),ka=i(e,"P",{});var D0=l(ka);xa=i(D0,"A",{href:!0,rel:!0});var $5=l(xa);L1=m($5,"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),$5.forEach(t),S1=m(D0,`, Jacob Devlin et al.`),D0.forEach(t),Nm=f(e),dr=i(e,"P",{});var M5=l(dr);R1=m(M5,`Corrupts the inputs by using random masking, more precisely, during pretraining, a given percentage of tokens (usually 15%) is masked by:`),M5.forEach(t),Bm=f(e),oe=i(e,"UL",{});var Wo=l(oe);Oh=i(Wo,"LI",{});var P5=l(Oh);I1=m(P5,"a special mask token with probability 0.8"),P5.forEach(t),q1=f(Wo),Wh=i(Wo,"LI",{});var L5=l(Wh);G1=m(L5,"a random token different from the one masked with probability 0.1"),L5.forEach(t),N1=f(Wo),Jh=i(Wo,"LI",{});var S5=l(Jh);B1=m(S5,"the same token with probability 0.1"),S5.forEach(t),Wo.forEach(t),Fm=f(e),vr=i(e,"P",{});var R5=l(vr);F1=m(R5,`The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50% they are not related. The model has to predict if the sentences are consecutive or not.`),R5.forEach(t),Dm=f(e),gr=i(e,"P",{});var I5=l(gr);D1=m(I5,`The library provides a version of the model for language modeling (traditional or masked), next sentence prediction, token classification, sentence classification, multiple choice classification and question answering.`),I5.forEach(t),Hm=f(e),Te=i(e,"H3",{class:!0});var hv=l(Te);ft=i(hv,"A",{id:!0,class:!0,href:!0});var q5=l(ft);Qh=i(q5,"SPAN",{});var G5=l(Qh);d(Aa.$$.fragment,G5),G5.forEach(t),q5.forEach(t),H1=f(hv),ef=i(hv,"SPAN",{});var N5=l(ef);C1=m(N5,"ALBERT"),N5.forEach(t),hv.forEach(t),Cm=f(e),C=i(e,"DIV",{class:!0});var Jo=l(C);br=i(Jo,"A",{href:!0});var B5=l(br);_r=i(B5,"IMG",{alt:!0,src:!0}),B5.forEach(t),z1=f(Jo),wr=i(Jo,"A",{href:!0});var F5=l(wr);Er=i(F5,"IMG",{alt:!0,src:!0}),F5.forEach(t),j1=f(Jo),yr=i(Jo,"A",{href:!0});var D5=l(yr);kr=i(D5,"IMG",{alt:!0,src:!0}),D5.forEach(t),Jo.forEach(t),zm=f(e),Ta=i(e,"P",{});var H0=l(Ta);$a=i(H0,"A",{href:!0,rel:!0});var H5=l($a);U1=m(H5,"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),H5.forEach(t),X1=m(H0,`, Zhenzhong Lan et al.`),H0.forEach(t),jm=f(e),xr=i(e,"P",{});var C5=l(xr);V1=m(C5,"Same as BERT but with a few tweaks:"),C5.forEach(t),Um=f(e),he=i(e,"UL",{});var Qo=l(he);tf=i(Qo,"LI",{});var z5=l(tf);Y1=m(z5,`Embedding size E is different from hidden size H justified because the embeddings are context independent (one embedding vector represents one token), whereas hidden states are context dependent (one hidden state represents a sequence of tokens) so it\u2019s more logical to have H >> E. Also, the embedding matrix is large since it\u2019s V x E (V being the vocab size). If E < H, it has less parameters.`),z5.forEach(t),Z1=f(Qo),af=i(Qo,"LI",{});var j5=l(af);K1=m(j5,"Layers are split in groups that share parameters (to save memory)."),j5.forEach(t),O1=f(Qo),sf=i(Qo,"LI",{});var U5=l(sf);W1=m(U5,`Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not.`),U5.forEach(t),Qo.forEach(t),Xm=f(e),Ar=i(e,"P",{});var X5=l(Ar);J1=m(X5,`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),X5.forEach(t),Vm=f(e),$e=i(e,"H3",{class:!0});var fv=l($e);pt=i(fv,"A",{id:!0,class:!0,href:!0});var V5=l(pt);rf=i(V5,"SPAN",{});var Y5=l(rf);d(Ma.$$.fragment,Y5),Y5.forEach(t),V5.forEach(t),Q1=f(fv),lf=i(fv,"SPAN",{});var Z5=l(lf);eb=m(Z5,"RoBERTa"),Z5.forEach(t),fv.forEach(t),Ym=f(e),z=i(e,"DIV",{class:!0});var eh=l(z);Tr=i(eh,"A",{href:!0});var K5=l(Tr);$r=i(K5,"IMG",{alt:!0,src:!0}),K5.forEach(t),tb=f(eh),Mr=i(eh,"A",{href:!0});var O5=l(Mr);Pr=i(O5,"IMG",{alt:!0,src:!0}),O5.forEach(t),ab=f(eh),Lr=i(eh,"A",{href:!0});var W5=l(Lr);Sr=i(W5,"IMG",{alt:!0,src:!0}),W5.forEach(t),eh.forEach(t),Zm=f(e),Pa=i(e,"P",{});var C0=l(Pa);La=i(C0,"A",{href:!0,rel:!0});var J5=l(La);sb=m(J5,"RoBERTa: A Robustly Optimized BERT Pretraining Approach"),J5.forEach(t),ib=m(C0,", Yinhan Liu et al."),C0.forEach(t),Km=f(e),Rr=i(e,"P",{});var Q5=l(Rr);rb=m(Q5,"Same as BERT with better pretraining tricks:"),Q5.forEach(t),Om=f(e),R=i(e,"UL",{});var Xt=l(R);nf=i(Xt,"LI",{});var ek=l(nf);lb=m(ek,"dynamic masking: tokens are masked differently at each epoch, whereas BERT does it once and for all"),ek.forEach(t),nb=f(Xt),of=i(Xt,"LI",{});var tk=l(of);ob=m(tk,`no NSP (next sentence prediction) loss and instead of putting just two sentences together, put a chunk of contiguous texts together to reach 512 tokens (so the sentences are in an order than may span several documents)`),tk.forEach(t),hb=f(Xt),hf=i(Xt,"LI",{});var ak=l(hf);fb=m(ak,"train with larger batches"),ak.forEach(t),pb=f(Xt),ff=i(Xt,"LI",{});var sk=l(ff);mb=m(sk,"use BPE with bytes as a subunit and not characters (because of unicode characters)"),sk.forEach(t),Xt.forEach(t),Wm=f(e),Ir=i(e,"P",{});var ik=l(Ir);cb=m(ik,`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),ik.forEach(t),Jm=f(e),Me=i(e,"H3",{class:!0});var pv=l(Me);mt=i(pv,"A",{id:!0,class:!0,href:!0});var rk=l(mt);pf=i(rk,"SPAN",{});var lk=l(pf);d(Sa.$$.fragment,lk),lk.forEach(t),rk.forEach(t),ub=f(pv),mf=i(pv,"SPAN",{});var nk=l(mf);db=m(nk,"DistilBERT"),nk.forEach(t),pv.forEach(t),Qm=f(e),j=i(e,"DIV",{class:!0});var th=l(j);qr=i(th,"A",{href:!0});var ok=l(qr);Gr=i(ok,"IMG",{alt:!0,src:!0}),ok.forEach(t),vb=f(th),Nr=i(th,"A",{href:!0});var hk=l(Nr);Br=i(hk,"IMG",{alt:!0,src:!0}),hk.forEach(t),gb=f(th),Fr=i(th,"A",{href:!0});var fk=l(Fr);Dr=i(fk,"IMG",{alt:!0,src:!0}),fk.forEach(t),th.forEach(t),ec=f(e),Ra=i(e,"P",{});var z0=l(Ra);Ia=i(z0,"A",{href:!0,rel:!0});var pk=l(Ia);bb=m(pk,"DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"),pk.forEach(t),_b=m(z0,`, Victor Sanh et al.`),z0.forEach(t),tc=f(e),Hr=i(e,"P",{});var mk=l(Hr);wb=m(mk,`Same as BERT but smaller. Trained by distillation of the pretrained BERT model, meaning it\u2019s been trained to predict the same probabilities as the larger model. The actual objective is a combination of:`),mk.forEach(t),ac=f(e),fe=i(e,"UL",{});var ah=l(fe);cf=i(ah,"LI",{});var ck=l(cf);Eb=m(ck,"finding the same probabilities as the teacher model"),ck.forEach(t),yb=f(ah),uf=i(ah,"LI",{});var uk=l(uf);kb=m(uk,"predicting the masked tokens correctly (but no next-sentence objective)"),uk.forEach(t),xb=f(ah),df=i(ah,"LI",{});var dk=l(df);Ab=m(dk,"a cosine similarity between the hidden states of the student and the teacher model"),dk.forEach(t),ah.forEach(t),sc=f(e),Cr=i(e,"P",{});var vk=l(Cr);Tb=m(vk,`The library provides a version of the model for masked language modeling, token classification, sentence classification and question answering.`),vk.forEach(t),ic=f(e),Pe=i(e,"H3",{class:!0});var mv=l(Pe);ct=i(mv,"A",{id:!0,class:!0,href:!0});var gk=l(ct);vf=i(gk,"SPAN",{});var bk=l(vf);d(qa.$$.fragment,bk),bk.forEach(t),gk.forEach(t),$b=f(mv),gf=i(mv,"SPAN",{});var _k=l(gf);Mb=m(_k,"ConvBERT"),_k.forEach(t),mv.forEach(t),rc=f(e),U=i(e,"DIV",{class:!0});var sh=l(U);zr=i(sh,"A",{href:!0});var wk=l(zr);jr=i(wk,"IMG",{alt:!0,src:!0}),wk.forEach(t),Pb=f(sh),Ur=i(sh,"A",{href:!0});var Ek=l(Ur);Xr=i(Ek,"IMG",{alt:!0,src:!0}),Ek.forEach(t),Lb=f(sh),Vr=i(sh,"A",{href:!0});var yk=l(Vr);Yr=i(yk,"IMG",{alt:!0,src:!0}),yk.forEach(t),sh.forEach(t),lc=f(e),Ga=i(e,"P",{});var j0=l(Ga);Na=i(j0,"A",{href:!0,rel:!0});var kk=l(Na);Sb=m(kk,"ConvBERT: Improving BERT with Span-based Dynamic Convolution"),kk.forEach(t),Rb=m(j0,`, Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.`),j0.forEach(t),nc=f(e),Zr=i(e,"P",{});var xk=l(Zr);Ib=m(xk,`Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost.`),xk.forEach(t),oc=f(e),Kr=i(e,"P",{});var Ak=l(Kr);qb=m(Ak,`The library provides a version of the model for masked language modeling, token classification, sentence classification and question answering.`),Ak.forEach(t),hc=f(e),Le=i(e,"H3",{class:!0});var cv=l(Le);ut=i(cv,"A",{id:!0,class:!0,href:!0});var Tk=l(ut);bf=i(Tk,"SPAN",{});var $k=l(bf);d(Ba.$$.fragment,$k),$k.forEach(t),Tk.forEach(t),Gb=f(cv),_f=i(cv,"SPAN",{});var Mk=l(_f);Nb=m(Mk,"XLM"),Mk.forEach(t),cv.forEach(t),fc=f(e),X=i(e,"DIV",{class:!0});var ih=l(X);Or=i(ih,"A",{href:!0});var Pk=l(Or);Wr=i(Pk,"IMG",{alt:!0,src:!0}),Pk.forEach(t),Bb=f(ih),Jr=i(ih,"A",{href:!0});var Lk=l(Jr);Qr=i(Lk,"IMG",{alt:!0,src:!0}),Lk.forEach(t),Fb=f(ih),el=i(ih,"A",{href:!0});var Sk=l(el);tl=i(Sk,"IMG",{alt:!0,src:!0}),Sk.forEach(t),ih.forEach(t),pc=f(e),Fa=i(e,"P",{});var U0=l(Fa);Da=i(U0,"A",{href:!0,rel:!0});var Rk=l(Da);Db=m(Rk,"Cross-lingual Language Model Pretraining"),Rk.forEach(t),Hb=m(U0,", Guillaume Lample and Alexis Conneau"),U0.forEach(t),mc=f(e),al=i(e,"P",{});var Ik=l(al);Cb=m(Ik,`A transformer model trained on several languages. There are three different type of training for this model and the library provides checkpoints for all of them:`),Ik.forEach(t),cc=f(e),pe=i(e,"UL",{});var rh=l(pe);wf=i(rh,"LI",{});var qk=l(wf);zb=m(qk,`Causal language modeling (CLM) which is the traditional autoregressive training (so this model could be in the previous section as well). One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages.`),qk.forEach(t),jb=f(rh),Ef=i(rh,"LI",{});var Gk=l(Ef);Ub=m(Gk,`Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages, with dynamic masking of the tokens.`),Gk.forEach(t),Xb=f(rh),yf=i(rh,"LI",{});var Nk=l(yf);Vb=m(Nk,`A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two different languages, with random masking. To predict one of the masked tokens, the model can use both, the surrounding context in language 1 and the context given by language 2.`),Nk.forEach(t),rh.forEach(t),uc=f(e),I=i(e,"P",{});var Vt=l(I);Yb=m(Vt,"Checkpoints refer to which method was used for pretraining by having "),kf=i(Vt,"EM",{});var Bk=l(kf);Zb=m(Bk,"clm"),Bk.forEach(t),Kb=m(Vt,", "),xf=i(Vt,"EM",{});var Fk=l(xf);Ob=m(Fk,"mlm"),Fk.forEach(t),Wb=m(Vt," or "),Af=i(Vt,"EM",{});var Dk=l(Af);Jb=m(Dk,"mlm-tlm"),Dk.forEach(t),Qb=m(Vt,` in their names. On top of positional embeddings, the model has language embeddings. When training using MLM/CLM, this gives the model an indication of the language used, and when training using MLM+TLM, an indication of the language used for each part.`),Vt.forEach(t),dc=f(e),sl=i(e,"P",{});var Hk=l(sl);e_=m(Hk,`The library provides a version of the model for language modeling, token classification, sentence classification and question answering.`),Hk.forEach(t),vc=f(e),Se=i(e,"H3",{class:!0});var uv=l(Se);dt=i(uv,"A",{id:!0,class:!0,href:!0});var Ck=l(dt);Tf=i(Ck,"SPAN",{});var zk=l(Tf);d(Ha.$$.fragment,zk),zk.forEach(t),Ck.forEach(t),t_=f(uv),$f=i(uv,"SPAN",{});var jk=l($f);a_=m(jk,"XLM-RoBERTa"),jk.forEach(t),uv.forEach(t),gc=f(e),V=i(e,"DIV",{class:!0});var lh=l(V);il=i(lh,"A",{href:!0});var Uk=l(il);rl=i(Uk,"IMG",{alt:!0,src:!0}),Uk.forEach(t),s_=f(lh),ll=i(lh,"A",{href:!0});var Xk=l(ll);nl=i(Xk,"IMG",{alt:!0,src:!0}),Xk.forEach(t),i_=f(lh),ol=i(lh,"A",{href:!0});var Vk=l(ol);hl=i(Vk,"IMG",{alt:!0,src:!0}),Vk.forEach(t),lh.forEach(t),bc=f(e),Ca=i(e,"P",{});var X0=l(Ca);za=i(X0,"A",{href:!0,rel:!0});var Yk=l(za);r_=m(Yk,"Unsupervised Cross-lingual Representation Learning at Scale"),Yk.forEach(t),l_=m(X0,`, Alexis Conneau et al.`),X0.forEach(t),_c=f(e),fl=i(e,"P",{});var Zk=l(fl);n_=m(Zk,`Uses RoBERTa tricks on the XLM approach, but does not use the translation language modeling objective. It only uses masked language modeling on sentences coming from one language. However, the model is trained on many more languages (100) and doesn\u2019t use the language embeddings, so it\u2019s capable of detecting the input language by itself.`),Zk.forEach(t),wc=f(e),pl=i(e,"P",{});var Kk=l(pl);o_=m(Kk,`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),Kk.forEach(t),Ec=f(e),Re=i(e,"H3",{class:!0});var dv=l(Re);vt=i(dv,"A",{id:!0,class:!0,href:!0});var Ok=l(vt);Mf=i(Ok,"SPAN",{});var Wk=l(Mf);d(ja.$$.fragment,Wk),Wk.forEach(t),Ok.forEach(t),h_=f(dv),Pf=i(dv,"SPAN",{});var Jk=l(Pf);f_=m(Jk,"FlauBERT"),Jk.forEach(t),dv.forEach(t),yc=f(e),Y=i(e,"DIV",{class:!0});var nh=l(Y);ml=i(nh,"A",{href:!0});var Qk=l(ml);cl=i(Qk,"IMG",{alt:!0,src:!0}),Qk.forEach(t),p_=f(nh),ul=i(nh,"A",{href:!0});var e4=l(ul);dl=i(e4,"IMG",{alt:!0,src:!0}),e4.forEach(t),m_=f(nh),vl=i(nh,"A",{href:!0});var t4=l(vl);gl=i(t4,"IMG",{alt:!0,src:!0}),t4.forEach(t),nh.forEach(t),kc=f(e),Ua=i(e,"P",{});var V0=l(Ua);Xa=i(V0,"A",{href:!0,rel:!0});var a4=l(Xa);c_=m(a4,"FlauBERT: Unsupervised Language Model Pre-training for French"),a4.forEach(t),u_=m(V0,", Hang Le et al."),V0.forEach(t),xc=f(e),bl=i(e,"P",{});var s4=l(bl);d_=m(s4,"Like RoBERTa, without the sentence ordering prediction (so just trained on the MLM objective)."),s4.forEach(t),Ac=f(e),_l=i(e,"P",{});var i4=l(_l);v_=m(i4,"The library provides a version of the model for language modeling and sentence classification."),i4.forEach(t),Tc=f(e),Ie=i(e,"H3",{class:!0});var vv=l(Ie);gt=i(vv,"A",{id:!0,class:!0,href:!0});var r4=l(gt);Lf=i(r4,"SPAN",{});var l4=l(Lf);d(Va.$$.fragment,l4),l4.forEach(t),r4.forEach(t),g_=f(vv),Sf=i(vv,"SPAN",{});var n4=l(Sf);b_=m(n4,"ELECTRA"),n4.forEach(t),vv.forEach(t),$c=f(e),Z=i(e,"DIV",{class:!0});var oh=l(Z);wl=i(oh,"A",{href:!0});var o4=l(wl);El=i(o4,"IMG",{alt:!0,src:!0}),o4.forEach(t),__=f(oh),yl=i(oh,"A",{href:!0});var h4=l(yl);kl=i(h4,"IMG",{alt:!0,src:!0}),h4.forEach(t),w_=f(oh),xl=i(oh,"A",{href:!0});var f4=l(xl);Al=i(f4,"IMG",{alt:!0,src:!0}),f4.forEach(t),oh.forEach(t),Mc=f(e),Ya=i(e,"P",{});var Y0=l(Ya);Za=i(Y0,"A",{href:!0,rel:!0});var p4=l(Za);E_=m(p4,"ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators"),p4.forEach(t),y_=m(Y0,`, Kevin Clark et al.`),Y0.forEach(t),Pc=f(e),Tl=i(e,"P",{});var m4=l(Tl);k_=m(m4,`ELECTRA is a transformer model pretrained with the use of another (small) masked language model. The inputs are corrupted by that language model, which takes an input text that is randomly masked and outputs a text in which ELECTRA has to predict which token is an original and which one has been replaced. Like for GAN training, the small language model is trained for a few steps (but with the original texts as objective, not to fool the ELECTRA model like in a traditional GAN setting) then the ELECTRA model is trained for a few steps.`),m4.forEach(t),Lc=f(e),$l=i(e,"P",{});var c4=l($l);x_=m(c4,`The library provides a version of the model for masked language modeling, token classification and sentence classification.`),c4.forEach(t),Sc=f(e),qe=i(e,"H3",{class:!0});var gv=l(qe);bt=i(gv,"A",{id:!0,class:!0,href:!0});var u4=l(bt);Rf=i(u4,"SPAN",{});var d4=l(Rf);d(Ka.$$.fragment,d4),d4.forEach(t),u4.forEach(t),A_=f(gv),If=i(gv,"SPAN",{});var v4=l(If);T_=m(v4,"Funnel Transformer"),v4.forEach(t),gv.forEach(t),Rc=f(e),K=i(e,"DIV",{class:!0});var hh=l(K);Ml=i(hh,"A",{href:!0});var g4=l(Ml);Pl=i(g4,"IMG",{alt:!0,src:!0}),g4.forEach(t),$_=f(hh),Ll=i(hh,"A",{href:!0});var b4=l(Ll);Sl=i(b4,"IMG",{alt:!0,src:!0}),b4.forEach(t),M_=f(hh),Rl=i(hh,"A",{href:!0});var _4=l(Rl);Il=i(_4,"IMG",{alt:!0,src:!0}),_4.forEach(t),hh.forEach(t),Ic=f(e),Oa=i(e,"P",{});var Z0=l(Oa);Wa=i(Z0,"A",{href:!0,rel:!0});var w4=l(Wa);P_=m(w4,"Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing"),w4.forEach(t),L_=m(Z0,", Zihang Dai et al."),Z0.forEach(t),qc=f(e),ql=i(e,"P",{});var E4=l(ql);S_=m(E4,`Funnel Transformer is a transformer model using pooling, a bit like a ResNet model: layers are grouped in blocks, and at the beginning of each block (except the first one), the hidden states are pooled among the sequence dimension. This way, their length is divided by 2, which speeds up the computation of the next hidden states. All pretrained models have three blocks, which means the final hidden state has a sequence length that is one fourth of the original sequence length.`),E4.forEach(t),Gc=f(e),Gl=i(e,"P",{});var y4=l(Gl);R_=m(y4,`For tasks such as classification, this is not a problem, but for tasks like masked language modeling or token classification, we need a hidden state with the same sequence length as the original input. In those cases, the final hidden states are upsampled to the input sequence length and go through two additional layers. That\u2019s why there are two versions of each checkpoint. The version suffixed with \u201C-base\u201D contains only the three blocks, while the version without that suffix contains the three blocks and the upsampling head with its additional layers.`),y4.forEach(t),Nc=f(e),Nl=i(e,"P",{});var k4=l(Nl);I_=m(k4,"The pretrained models available use the same pretraining objective as ELECTRA."),k4.forEach(t),Bc=f(e),Bl=i(e,"P",{});var x4=l(Bl);q_=m(x4,`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),x4.forEach(t),Fc=f(e),Fl=i(e,"A",{id:!0}),l(Fl).forEach(t),Dc=f(e),Ge=i(e,"H3",{class:!0});var bv=l(Ge);_t=i(bv,"A",{id:!0,class:!0,href:!0});var A4=l(_t);qf=i(A4,"SPAN",{});var T4=l(qf);d(Ja.$$.fragment,T4),T4.forEach(t),A4.forEach(t),G_=f(bv),Gf=i(bv,"SPAN",{});var $4=l(Gf);N_=m($4,"Longformer"),$4.forEach(t),bv.forEach(t),Hc=f(e),O=i(e,"DIV",{class:!0});var fh=l(O);Dl=i(fh,"A",{href:!0});var M4=l(Dl);Hl=i(M4,"IMG",{alt:!0,src:!0}),M4.forEach(t),B_=f(fh),Cl=i(fh,"A",{href:!0});var P4=l(Cl);zl=i(P4,"IMG",{alt:!0,src:!0}),P4.forEach(t),F_=f(fh),jl=i(fh,"A",{href:!0});var L4=l(jl);Ul=i(L4,"IMG",{alt:!0,src:!0}),L4.forEach(t),fh.forEach(t),Cc=f(e),Qa=i(e,"P",{});var K0=l(Qa);es=i(K0,"A",{href:!0,rel:!0});var S4=l(es);D_=m(S4,"Longformer: The Long-Document Transformer"),S4.forEach(t),H_=m(K0,", Iz Beltagy et al."),K0.forEach(t),zc=f(e),wt=i(e,"P",{});var _v=l(wt);C_=m(_v,`A transformer model replacing the attention matrices by sparse matrices to go faster. Often, the local context (e.g., what are the two tokens left and right?) is enough to take action for a given token. Some preselected input tokens are still given global attention, but the attention matrix has way less parameters, resulting in a speed-up. See the `),Xl=i(_v,"A",{href:!0});var R4=l(Xl);z_=m(R4,"local attention section"),R4.forEach(t),j_=m(_v," for more information."),_v.forEach(t),jc=f(e),Vl=i(e,"P",{});var I4=l(Vl);U_=m(I4,"It is pretrained the same way a RoBERTa otherwise."),I4.forEach(t),Uc=f(e),d(Et.$$.fragment,e),Xc=f(e),Yl=i(e,"P",{});var q4=l(Yl);X_=m(q4,`The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.`),q4.forEach(t),Vc=f(e),Zl=i(e,"A",{id:!0}),l(Zl).forEach(t),Yc=f(e),Ne=i(e,"H2",{class:!0});var wv=l(Ne);yt=i(wv,"A",{id:!0,class:!0,href:!0});var G4=l(yt);Nf=i(G4,"SPAN",{});var N4=l(Nf);d(ts.$$.fragment,N4),N4.forEach(t),G4.forEach(t),V_=f(wv),Bf=i(wv,"SPAN",{});var B4=l(Bf);Y_=m(B4,"Sequence-to-sequence models"),B4.forEach(t),wv.forEach(t),Zc=f(e),Kl=i(e,"P",{});var F4=l(Kl);Z_=m(F4,"As mentioned before, these models keep both the encoder and the decoder of the original transformer."),F4.forEach(t),Kc=f(e),d(as.$$.fragment,e),Oc=f(e),Be=i(e,"H3",{class:!0});var Ev=l(Be);kt=i(Ev,"A",{id:!0,class:!0,href:!0});var D4=l(kt);Ff=i(D4,"SPAN",{});var H4=l(Ff);d(ss.$$.fragment,H4),H4.forEach(t),D4.forEach(t),K_=f(Ev),Df=i(Ev,"SPAN",{});var C4=l(Df);O_=m(C4,"BART"),C4.forEach(t),Ev.forEach(t),Wc=f(e),W=i(e,"DIV",{class:!0});var ph=l(W);Ol=i(ph,"A",{href:!0});var z4=l(Ol);Wl=i(z4,"IMG",{alt:!0,src:!0}),z4.forEach(t),W_=f(ph),Jl=i(ph,"A",{href:!0});var j4=l(Jl);Ql=i(j4,"IMG",{alt:!0,src:!0}),j4.forEach(t),J_=f(ph),en=i(ph,"A",{href:!0});var U4=l(en);tn=i(U4,"IMG",{alt:!0,src:!0}),U4.forEach(t),ph.forEach(t),Jc=f(e),is=i(e,"P",{});var O0=l(is);rs=i(O0,"A",{href:!0,rel:!0});var X4=l(rs);Q_=m(X4,"BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"),X4.forEach(t),ew=m(O0,", Mike Lewis et al."),O0.forEach(t),Qc=f(e),an=i(e,"P",{});var V4=l(an);tw=m(V4,`Sequence-to-sequence model with an encoder and a decoder. Encoder is fed a corrupted version of the tokens, decoder is fed the original tokens (but has a mask to hide the future words like a regular transformers decoder). A composition of the following transformations are applied on the pretraining tasks for the encoder:`),V4.forEach(t),eu=f(e),M=i(e,"UL",{});var ve=l(M);Hf=i(ve,"LI",{});var Y4=l(Hf);aw=m(Y4,"mask random tokens (like in BERT)"),Y4.forEach(t),sw=f(ve),Cf=i(ve,"LI",{});var Z4=l(Cf);iw=m(Z4,"delete random tokens"),Z4.forEach(t),rw=f(ve),zf=i(ve,"LI",{});var K4=l(zf);lw=m(K4,"mask a span of k tokens with a single mask token (a span of 0 tokens is an insertion of a mask token)"),K4.forEach(t),nw=f(ve),jf=i(ve,"LI",{});var O4=l(jf);ow=m(O4,"permute sentences"),O4.forEach(t),hw=f(ve),Uf=i(ve,"LI",{});var W4=l(Uf);fw=m(W4,"rotate the document to make it start at a specific token"),W4.forEach(t),ve.forEach(t),tu=f(e),sn=i(e,"P",{});var J4=l(sn);pw=m(J4,"The library provides a version of this model for conditional generation and sequence classification."),J4.forEach(t),au=f(e),Fe=i(e,"H3",{class:!0});var yv=l(Fe);xt=i(yv,"A",{id:!0,class:!0,href:!0});var Q4=l(xt);Xf=i(Q4,"SPAN",{});var ex=l(Xf);d(ls.$$.fragment,ex),ex.forEach(t),Q4.forEach(t),mw=f(yv),Vf=i(yv,"SPAN",{});var tx=l(Vf);cw=m(tx,"Pegasus"),tx.forEach(t),yv.forEach(t),su=f(e),J=i(e,"DIV",{class:!0});var mh=l(J);rn=i(mh,"A",{href:!0});var ax=l(rn);ln=i(ax,"IMG",{alt:!0,src:!0}),ax.forEach(t),uw=f(mh),nn=i(mh,"A",{href:!0});var sx=l(nn);on=i(sx,"IMG",{alt:!0,src:!0}),sx.forEach(t),dw=f(mh),hn=i(mh,"A",{href:!0});var ix=l(hn);fn=i(ix,"IMG",{alt:!0,src:!0}),ix.forEach(t),mh.forEach(t),iu=f(e),ns=i(e,"P",{});var W0=l(ns);os=i(W0,"A",{href:!0,rel:!0});var rx=l(os);vw=m(rx,"PEGASUS: Pre-training with Extracted Gap-sentences forAbstractive Summarization"),rx.forEach(t),gw=m(W0,", Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019."),W0.forEach(t),ru=f(e),pn=i(e,"P",{});var lx=l(pn);bw=m(lx,`Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG).`),lx.forEach(t),lu=f(e),At=i(e,"UL",{});var kv=l(At);Yf=i(kv,"LI",{});var nx=l(Yf);_w=m(nx,`MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT)`),nx.forEach(t),ww=f(kv),Zf=i(kv,"LI",{});var ox=l(Zf);Ew=m(ox,`GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder.`),ox.forEach(t),kv.forEach(t),nu=f(e),mn=i(e,"P",{});var hx=l(mn);yw=m(hx,`In contrast to BART, Pegasus\u2019 pretraining task is intentionally similar to summarization: important sentences are masked and are generated together as one output sequence from the remaining sentences, similar to an extractive summary.`),hx.forEach(t),ou=f(e),cn=i(e,"P",{});var fx=l(cn);kw=m(fx,"The library provides a version of this model for conditional generation, which should be used for summarization."),fx.forEach(t),hu=f(e),De=i(e,"H3",{class:!0});var xv=l(De);Tt=i(xv,"A",{id:!0,class:!0,href:!0});var px=l(Tt);Kf=i(px,"SPAN",{});var mx=l(Kf);d(hs.$$.fragment,mx),mx.forEach(t),px.forEach(t),xw=f(xv),Of=i(xv,"SPAN",{});var cx=l(Of);Aw=m(cx,"MarianMT"),cx.forEach(t),xv.forEach(t),fu=f(e),Q=i(e,"DIV",{class:!0});var ch=l(Q);un=i(ch,"A",{href:!0});var ux=l(un);dn=i(ux,"IMG",{alt:!0,src:!0}),ux.forEach(t),Tw=f(ch),vn=i(ch,"A",{href:!0});var dx=l(vn);gn=i(dx,"IMG",{alt:!0,src:!0}),dx.forEach(t),$w=f(ch),bn=i(ch,"A",{href:!0});var vx=l(bn);_n=i(vx,"IMG",{alt:!0,src:!0}),vx.forEach(t),ch.forEach(t),pu=f(e),fs=i(e,"P",{});var J0=l(fs);ps=i(J0,"A",{href:!0,rel:!0});var gx=l(ps);Mw=m(gx,"Marian: Fast Neural Machine Translation in C++"),gx.forEach(t),Pw=m(J0,", Marcin Junczys-Dowmunt et al."),J0.forEach(t),mu=f(e),wn=i(e,"P",{});var bx=l(wn);Lw=m(bx,"A framework for translation models, using the same models as BART"),bx.forEach(t),cu=f(e),En=i(e,"P",{});var _x=l(En);Sw=m(_x,"The library provides a version of this model for conditional generation."),_x.forEach(t),uu=f(e),He=i(e,"H3",{class:!0});var Av=l(He);$t=i(Av,"A",{id:!0,class:!0,href:!0});var wx=l($t);Wf=i(wx,"SPAN",{});var Ex=l(Wf);d(ms.$$.fragment,Ex),Ex.forEach(t),wx.forEach(t),Rw=f(Av),Jf=i(Av,"SPAN",{});var yx=l(Jf);Iw=m(yx,"T5"),yx.forEach(t),Av.forEach(t),du=f(e),ee=i(e,"DIV",{class:!0});var uh=l(ee);yn=i(uh,"A",{href:!0});var kx=l(yn);kn=i(kx,"IMG",{alt:!0,src:!0}),kx.forEach(t),qw=f(uh),xn=i(uh,"A",{href:!0});var xx=l(xn);An=i(xx,"IMG",{alt:!0,src:!0}),xx.forEach(t),Gw=f(uh),Tn=i(uh,"A",{href:!0});var Ax=l(Tn);$n=i(Ax,"IMG",{alt:!0,src:!0}),Ax.forEach(t),uh.forEach(t),vu=f(e),cs=i(e,"P",{});var Q0=l(cs);us=i(Q0,"A",{href:!0,rel:!0});var Tx=l(us);Nw=m(Tx,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),Tx.forEach(t),Bw=m(Q0,", Colin Raffel et al."),Q0.forEach(t),gu=f(e),Mn=i(e,"P",{});var $x=l(Mn);Fw=m($x,`Uses the traditional transformer model (with a slight change in the positional embeddings, which are learned at each layer). To be able to operate on all NLP tasks, it transforms them into text-to-text problems by using specific prefixes: \u201Csummarize: \u201D, \u201Cquestion: \u201D, \u201Ctranslate English to German: \u201D and so forth.`),$x.forEach(t),bu=f(e),Pn=i(e,"P",{});var Mx=l(Pn);Dw=m(Mx,`The pretraining includes both supervised and self-supervised training. Supervised training is conducted on downstream tasks provided by the GLUE and SuperGLUE benchmarks (converting them into text-to-text tasks as explained above).`),Mx.forEach(t),_u=f(e),Ln=i(e,"P",{});var Px=l(Ln);Hw=m(Px,`Self-supervised training uses corrupted tokens, by randomly removing 15% of the tokens and replacing them with individual sentinel tokens (if several consecutive tokens are marked for removal, the whole group is replaced with a single sentinel token). The input of the encoder is the corrupted sentence, the input of the decoder is the original sentence and the target is then the dropped out tokens delimited by their sentinel tokens.`),Px.forEach(t),wu=f(e),Sn=i(e,"P",{});var Lx=l(Sn);Cw=m(Lx,`For instance, if we have the sentence \u201CMy dog is very cute .\u201D, and we decide to remove the tokens: \u201Cdog\u201D, \u201Cis\u201D and \u201Ccute\u201D, the encoder input becomes \u201CMy <x> very <y> .\u201D and the target input becomes \u201C<x> dog is <y> cute .<z>\u201D`),Lx.forEach(t),Eu=f(e),Rn=i(e,"P",{});var Sx=l(Rn);zw=m(Sx,"The library provides a version of this model for conditional generation."),Sx.forEach(t),yu=f(e),Ce=i(e,"H3",{class:!0});var Tv=l(Ce);Mt=i(Tv,"A",{id:!0,class:!0,href:!0});var Rx=l(Mt);Qf=i(Rx,"SPAN",{});var Ix=l(Qf);d(ds.$$.fragment,Ix),Ix.forEach(t),Rx.forEach(t),jw=f(Tv),ep=i(Tv,"SPAN",{});var qx=l(ep);Uw=m(qx,"MT5"),qx.forEach(t),Tv.forEach(t),ku=f(e),te=i(e,"DIV",{class:!0});var dh=l(te);In=i(dh,"A",{href:!0});var Gx=l(In);qn=i(Gx,"IMG",{alt:!0,src:!0}),Gx.forEach(t),Xw=f(dh),Gn=i(dh,"A",{href:!0});var Nx=l(Gn);Nn=i(Nx,"IMG",{alt:!0,src:!0}),Nx.forEach(t),Vw=f(dh),Bn=i(dh,"A",{href:!0});var Bx=l(Bn);Fn=i(Bx,"IMG",{alt:!0,src:!0}),Bx.forEach(t),dh.forEach(t),xu=f(e),vs=i(e,"P",{});var eE=l(vs);gs=i(eE,"A",{href:!0,rel:!0});var Fx=l(gs);Yw=m(Fx,"mT5: A massively multilingual pre-trained text-to-text transformer"),Fx.forEach(t),Zw=m(eE,`, Linting Xue et al.`),eE.forEach(t),Au=f(e),Dn=i(e,"P",{});var Dx=l(Dn);Kw=m(Dx,`The model architecture is same as T5. mT5\u2019s pretraining objective includes T5\u2019s self-supervised training, but not T5\u2019s supervised training. mT5 is trained on 101 languages.`),Dx.forEach(t),Tu=f(e),Hn=i(e,"P",{});var Hx=l(Hn);Ow=m(Hx,"The library provides a version of this model for conditional generation."),Hx.forEach(t),$u=f(e),ze=i(e,"H3",{class:!0});var $v=l(ze);Pt=i($v,"A",{id:!0,class:!0,href:!0});var Cx=l(Pt);tp=i(Cx,"SPAN",{});var zx=l(tp);d(bs.$$.fragment,zx),zx.forEach(t),Cx.forEach(t),Ww=f($v),ap=i($v,"SPAN",{});var jx=l(ap);Jw=m(jx,"MBart"),jx.forEach(t),$v.forEach(t),Mu=f(e),ae=i(e,"DIV",{class:!0});var vh=l(ae);Cn=i(vh,"A",{href:!0});var Ux=l(Cn);zn=i(Ux,"IMG",{alt:!0,src:!0}),Ux.forEach(t),Qw=f(vh),jn=i(vh,"A",{href:!0});var Xx=l(jn);Un=i(Xx,"IMG",{alt:!0,src:!0}),Xx.forEach(t),e2=f(vh),Xn=i(vh,"A",{href:!0});var Vx=l(Xn);Vn=i(Vx,"IMG",{alt:!0,src:!0}),Vx.forEach(t),vh.forEach(t),Pu=f(e),_s=i(e,"P",{});var tE=l(_s);ws=i(tE,"A",{href:!0,rel:!0});var Yx=l(ws);t2=m(Yx,"Multilingual Denoising Pre-training for Neural Machine Translation"),Yx.forEach(t),a2=m(tE,` by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.`),tE.forEach(t),Lu=f(e),Yn=i(e,"P",{});var Zx=l(Yn);s2=m(Zx,`The model architecture and pretraining objective is same as BART, but MBart is trained on 25 languages and is intended for supervised and unsupervised machine translation. MBart is one of the first methods for pretraining a complete sequence-to-sequence model by denoising full texts in multiple languages,`),Zx.forEach(t),Su=f(e),Zn=i(e,"P",{});var Kx=l(Zn);i2=m(Kx,"The library provides a version of this model for conditional generation."),Kx.forEach(t),Ru=f(e),Lt=i(e,"P",{});var Mv=l(Lt);r2=m(Mv,"The "),Es=i(Mv,"A",{href:!0,rel:!0});var Ox=l(Es);l2=m(Ox,"mbart-large-en-ro checkpoint"),Ox.forEach(t),n2=m(Mv,` can be used for english -> romanian translation.`),Mv.forEach(t),Iu=f(e),me=i(e,"P",{});var gh=l(me);o2=m(gh,"The "),ys=i(gh,"A",{href:!0,rel:!0});var Wx=l(ys);h2=m(Wx,"mbart-large-cc25"),Wx.forEach(t),f2=m(gh,` checkpoint can be finetuned for other translation and summarization tasks, using code in `),sp=i(gh,"CODE",{});var Jx=l(sp);p2=m(Jx,"examples/pytorch/translation/"),Jx.forEach(t),m2=m(gh,` , but is not very useful without finetuning.`),gh.forEach(t),qu=f(e),je=i(e,"H3",{class:!0});var Pv=l(je);St=i(Pv,"A",{id:!0,class:!0,href:!0});var Qx=l(St);ip=i(Qx,"SPAN",{});var eA=l(ip);d(ks.$$.fragment,eA),eA.forEach(t),Qx.forEach(t),c2=f(Pv),rp=i(Pv,"SPAN",{});var tA=l(rp);u2=m(tA,"ProphetNet"),tA.forEach(t),Pv.forEach(t),Gu=f(e),se=i(e,"DIV",{class:!0});var bh=l(se);Kn=i(bh,"A",{href:!0});var aA=l(Kn);On=i(aA,"IMG",{alt:!0,src:!0}),aA.forEach(t),d2=f(bh),Wn=i(bh,"A",{href:!0});var sA=l(Wn);Jn=i(sA,"IMG",{alt:!0,src:!0}),sA.forEach(t),v2=f(bh),Qn=i(bh,"A",{href:!0});var iA=l(Qn);eo=i(iA,"IMG",{alt:!0,src:!0}),iA.forEach(t),bh.forEach(t),Nu=f(e),xs=i(e,"P",{});var aE=l(xs);As=i(aE,"A",{href:!0,rel:!0});var rA=l(As);g2=m(rA,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),rA.forEach(t),b2=m(aE,` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou.`),aE.forEach(t),Bu=f(e),ce=i(e,"P",{});var _h=l(ce);_2=m(_h,"ProphetNet introduces a novel "),lp=i(_h,"EM",{});var lA=l(lp);w2=m(lA,"sequence-to-sequence"),lA.forEach(t),E2=m(_h," pretraining objective, called "),np=i(_h,"EM",{});var nA=l(np);y2=m(nA,"future n-gram prediction"),nA.forEach(t),k2=m(_h,`. In future n-gram prediction, the model predicts the next n tokens simultaneously based on previous context tokens at each time step instead instead of just the single next token. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. The model architecture is based on the original Transformer, but replaces the \u201Cstandard\u201D self-attention mechanism in the decoder by a a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism.`),_h.forEach(t),Fu=f(e),to=i(e,"P",{});var oA=l(to);x2=m(oA,`The library provides a pre-trained version of this model for conditional generation and a fine-tuned version for summarization.`),oA.forEach(t),Du=f(e),Ue=i(e,"H3",{class:!0});var Lv=l(Ue);Rt=i(Lv,"A",{id:!0,class:!0,href:!0});var hA=l(Rt);op=i(hA,"SPAN",{});var fA=l(op);d(Ts.$$.fragment,fA),fA.forEach(t),hA.forEach(t),A2=f(Lv),hp=i(Lv,"SPAN",{});var pA=l(hp);T2=m(pA,"XLM-ProphetNet"),pA.forEach(t),Lv.forEach(t),Hu=f(e),ie=i(e,"DIV",{class:!0});var wh=l(ie);ao=i(wh,"A",{href:!0});var mA=l(ao);so=i(mA,"IMG",{alt:!0,src:!0}),mA.forEach(t),$2=f(wh),io=i(wh,"A",{href:!0});var cA=l(io);ro=i(cA,"IMG",{alt:!0,src:!0}),cA.forEach(t),M2=f(wh),lo=i(wh,"A",{href:!0});var uA=l(lo);no=i(uA,"IMG",{alt:!0,src:!0}),uA.forEach(t),wh.forEach(t),Cu=f(e),$s=i(e,"P",{});var sE=l($s);Ms=i(sE,"A",{href:!0,rel:!0});var dA=l(Ms);P2=m(dA,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,"),dA.forEach(t),L2=m(sE,` by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou.`),sE.forEach(t),zu=f(e),It=i(e,"P",{});var Sv=l(It);S2=m(Sv,`XLM-ProphetNet\u2019s model architecture and pretraining objective is same as ProphetNet, but XLM-ProphetNet was pre-trained on the cross-lingual dataset `),Ps=i(Sv,"A",{href:!0,rel:!0});var vA=l(Ps);R2=m(vA,"XGLUE"),vA.forEach(t),I2=m(Sv,"."),Sv.forEach(t),ju=f(e),oo=i(e,"P",{});var gA=l(oo);q2=m(gA,`The library provides a pre-trained version of this model for multi-lingual conditional generation and fine-tuned versions for headline generation and question generation, respectively.`),gA.forEach(t),Uu=f(e),ho=i(e,"A",{id:!0}),l(ho).forEach(t),Xu=f(e),Xe=i(e,"H2",{class:!0});var Rv=l(Xe);qt=i(Rv,"A",{id:!0,class:!0,href:!0});var bA=l(qt);fp=i(bA,"SPAN",{});var _A=l(fp);d(Ls.$$.fragment,_A),_A.forEach(t),bA.forEach(t),G2=f(Rv),pp=i(Rv,"SPAN",{});var wA=l(pp);N2=m(wA,"Multimodal models"),wA.forEach(t),Rv.forEach(t),Vu=f(e),fo=i(e,"P",{});var EA=l(fo);B2=m(EA,`There is one multimodal model in the library which has not been pretrained in the self-supervised fashion like the others.`),EA.forEach(t),Yu=f(e),Ve=i(e,"H3",{class:!0});var Iv=l(Ve);Gt=i(Iv,"A",{id:!0,class:!0,href:!0});var yA=l(Gt);mp=i(yA,"SPAN",{});var kA=l(mp);d(Ss.$$.fragment,kA),kA.forEach(t),yA.forEach(t),F2=f(Iv),cp=i(Iv,"SPAN",{});var xA=l(cp);D2=m(xA,"MMBT"),xA.forEach(t),Iv.forEach(t),Zu=f(e),Rs=i(e,"P",{});var iE=l(Rs);Is=i(iE,"A",{href:!0,rel:!0});var AA=l(Is);H2=m(AA,"Supervised Multimodal Bitransformers for Classifying Images and Text"),AA.forEach(t),C2=m(iE,`, Douwe Kiela et al.`),iE.forEach(t),Ku=f(e),po=i(e,"P",{});var TA=l(po);z2=m(TA,`A transformers model used in multimodal settings, combining a text and an image to make predictions. The transformer model takes as inputs the embeddings of the tokenized text and the final activations of a pretrained on images resnet (after the pooling layer) that goes through a linear layer (to go from number of features at the end of the resnet to the hidden state dimension of the transformer).`),TA.forEach(t),Ou=f(e),mo=i(e,"P",{});var $A=l(mo);j2=m($A,`The different inputs are concatenated, and on top of the positional embeddings, a segment embedding is added to let the model know which part of the input vector corresponds to the text and which to the image.`),$A.forEach(t),Wu=f(e),co=i(e,"P",{});var MA=l(co);U2=m(MA,"The pretrained model only works for classification."),MA.forEach(t),Ju=f(e),uo=i(e,"A",{id:!0}),l(uo).forEach(t),Qu=f(e),Ye=i(e,"H2",{class:!0});var qv=l(Ye);Nt=i(qv,"A",{id:!0,class:!0,href:!0});var PA=l(Nt);up=i(PA,"SPAN",{});var LA=l(up);d(qs.$$.fragment,LA),LA.forEach(t),PA.forEach(t),X2=f(qv),dp=i(qv,"SPAN",{});var SA=l(dp);V2=m(SA,"Retrieval-based models"),SA.forEach(t),qv.forEach(t),ed=f(e),vo=i(e,"P",{});var RA=l(vo);Y2=m(RA,"Some models use documents retrieval during (pre)training and inference for open-domain question answering, for example."),RA.forEach(t),td=f(e),Ze=i(e,"H3",{class:!0});var Gv=l(Ze);Bt=i(Gv,"A",{id:!0,class:!0,href:!0});var IA=l(Bt);vp=i(IA,"SPAN",{});var qA=l(vp);d(Gs.$$.fragment,qA),qA.forEach(t),IA.forEach(t),Z2=f(Gv),gp=i(Gv,"SPAN",{});var GA=l(gp);K2=m(GA,"DPR"),GA.forEach(t),Gv.forEach(t),ad=f(e),re=i(e,"DIV",{class:!0});var Eh=l(re);go=i(Eh,"A",{href:!0});var NA=l(go);bo=i(NA,"IMG",{alt:!0,src:!0}),NA.forEach(t),O2=f(Eh),_o=i(Eh,"A",{href:!0});var BA=l(_o);wo=i(BA,"IMG",{alt:!0,src:!0}),BA.forEach(t),W2=f(Eh),Eo=i(Eh,"A",{href:!0});var FA=l(Eo);yo=i(FA,"IMG",{alt:!0,src:!0}),FA.forEach(t),Eh.forEach(t),sd=f(e),Ns=i(e,"P",{});var rE=l(Ns);Bs=i(rE,"A",{href:!0,rel:!0});var DA=l(Bs);J2=m(DA,"Dense Passage Retrieval for Open-Domain Question Answering"),DA.forEach(t),Q2=m(rE,`, Vladimir Karpukhin et al.`),rE.forEach(t),id=f(e),ko=i(e,"P",{});var HA=l(ko);e0=m(HA,`Dense Passage Retrieval (DPR) - is a set of tools and models for state-of-the-art open-domain question-answering research.`),HA.forEach(t),rd=f(e),xo=i(e,"P",{});var CA=l(xo);t0=m(CA,"DPR consists in three models:"),CA.forEach(t),ld=f(e),ue=i(e,"UL",{});var yh=l(ue);bp=i(yh,"LI",{});var zA=l(bp);a0=m(zA,"Question encoder: encode questions as vectors"),zA.forEach(t),s0=f(yh),_p=i(yh,"LI",{});var jA=l(_p);i0=m(jA,"Context encoder: encode contexts as vectors"),jA.forEach(t),r0=f(yh),wp=i(yh,"LI",{});var UA=l(wp);l0=m(UA,`Reader: extract the answer of the questions inside retrieved contexts, along with a relevance score (high if the inferred span actually answers the question).`),UA.forEach(t),yh.forEach(t),nd=f(e),Ao=i(e,"P",{});var XA=l(Ao);n0=m(XA,`DPR\u2019s pipeline (not implemented yet) uses a retrieval step to find the top k contexts given a certain question, and then it calls the reader with the question and the retrieved documents to get the answer.`),XA.forEach(t),od=f(e),Ke=i(e,"H3",{class:!0});var Nv=l(Ke);Ft=i(Nv,"A",{id:!0,class:!0,href:!0});var VA=l(Ft);Ep=i(VA,"SPAN",{});var YA=l(Ep);d(Fs.$$.fragment,YA),YA.forEach(t),VA.forEach(t),o0=f(Nv),yp=i(Nv,"SPAN",{});var ZA=l(yp);h0=m(ZA,"RAG"),ZA.forEach(t),Nv.forEach(t),hd=f(e),Oe=i(e,"DIV",{class:!0});var Bv=l(Oe);To=i(Bv,"A",{href:!0});var KA=l(To);$o=i(KA,"IMG",{alt:!0,src:!0}),KA.forEach(t),f0=f(Bv),Mo=i(Bv,"A",{href:!0});var OA=l(Mo);Po=i(OA,"IMG",{alt:!0,src:!0}),OA.forEach(t),Bv.forEach(t),fd=f(e),Ds=i(e,"P",{});var lE=l(Ds);Hs=i(lE,"A",{href:!0,rel:!0});var WA=l(Hs);p0=m(WA,"Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks"),WA.forEach(t),m0=m(lE,`, Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\xFCttler, Mike Lewis, Wen-tau Yih, Tim Rockt\xE4schel, Sebastian Riedel, Douwe Kiela`),lE.forEach(t),pd=f(e),Lo=i(e,"P",{});var JA=l(Lo);c0=m(JA,`Retrieval-augmented generation (\u201CRAG\u201D) models combine the powers of pretrained dense retrieval (DPR) and Seq2Seq models. RAG models retrieve docs, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks.`),JA.forEach(t),md=f(e),So=i(e,"P",{});var QA=l(So);u0=m(QA,"The two models RAG-Token and RAG-Sequence are available for generation."),QA.forEach(t),cd=f(e),We=i(e,"H2",{class:!0});var Fv=l(We);Dt=i(Fv,"A",{id:!0,class:!0,href:!0});var e9=l(Dt);kp=i(e9,"SPAN",{});var t9=l(kp);d(Cs.$$.fragment,t9),t9.forEach(t),e9.forEach(t),d0=f(Fv),xp=i(Fv,"SPAN",{});var a9=l(xp);v0=m(a9,"More technical aspects"),a9.forEach(t),Fv.forEach(t),ud=f(e),Je=i(e,"H3",{class:!0});var Dv=l(Je);Ht=i(Dv,"A",{id:!0,class:!0,href:!0});var s9=l(Ht);Ap=i(s9,"SPAN",{});var i9=l(Ap);d(zs.$$.fragment,i9),i9.forEach(t),s9.forEach(t),g0=f(Dv),Tp=i(Dv,"SPAN",{});var r9=l(Tp);b0=m(r9,"Full vs sparse attention"),r9.forEach(t),Dv.forEach(t),dd=f(e),Ro=i(e,"P",{});var l9=l(Ro);_0=m(l9,`Most transformer models use full attention in the sense that the attention matrix is square. It can be a big computational bottleneck when you have long texts. Longformer and reformer are models that try to be more efficient and use a sparse version of the attention matrix to speed up training.`),l9.forEach(t),vd=f(e),Io=i(e,"A",{id:!0}),l(Io).forEach(t),gd=f(e),qo=i(e,"P",{});var n9=l(qo);$p=i(n9,"STRONG",{});var o9=l($p);w0=m(o9,"LSH attention"),o9.forEach(t),n9.forEach(t),bd=f(e),js=i(e,"P",{});var nE=l(js);Go=i(nE,"A",{href:!0});var h9=l(Go);E0=m(h9,"Reformer"),h9.forEach(t),y0=m(nE,` uses LSH attention. In the softmax(QK^t), only the biggest elements (in the softmax dimension) of the matrix QK^t are going to give useful contributions. So for each query q in Q, we can consider only the keys k in K that are close to q. A hash function is used to determine if q and k are close. The attention mask is modified to mask the current token (except at the first position), because it will give a query and a key equal (so very similar to each other). Since the hash can be a bit random, several hash functions are used in practice (determined by a n_rounds parameter) and then are averaged together.`),nE.forEach(t),_d=f(e),No=i(e,"A",{id:!0}),l(No).forEach(t),wd=f(e),Bo=i(e,"P",{});var f9=l(Bo);Mp=i(f9,"STRONG",{});var p9=l(Mp);k0=m(p9,"Local attention"),p9.forEach(t),f9.forEach(t),Ed=f(e),Us=i(e,"P",{});var oE=l(Us);Fo=i(oE,"A",{href:!0});var m9=l(Fo);x0=m(m9,"Longformer"),m9.forEach(t),A0=m(oE,` uses local attention: often, the local context (e.g., what are the two tokens to the left and right?) is enough to take action for a given token. Also, by stacking attention layers that have a small window, the last layer will have a receptive field of more than just the tokens in the window, allowing them to build a representation of the whole sentence.`),oE.forEach(t),yd=f(e),Do=i(e,"P",{});var c9=l(Do);T0=m(c9,`Some preselected input tokens are also given global attention: for those few tokens, the attention matrix can access all tokens and this process is symmetric: all other tokens have access to those specific tokens (on top of the ones in their local window). This is shown in Figure 2d of the paper, see below for a sample attention mask:`),c9.forEach(t),kd=f(e),Ct=i(e,"IMG",{scale:!0,align:!0,src:!0}),xd=f(e),Ho=i(e,"P",{});var u9=l(Ho);$0=m(u9,`Using those attention matrices with less parameters then allows the model to have inputs having a bigger sequence length.`),u9.forEach(t),Ad=f(e),Qe=i(e,"H3",{class:!0});var Hv=l(Qe);zt=i(Hv,"A",{id:!0,class:!0,href:!0});var d9=l(zt);Pp=i(d9,"SPAN",{});var v9=l(Pp);d(Xs.$$.fragment,v9),v9.forEach(t),d9.forEach(t),M0=f(Hv),Lp=i(Hv,"SPAN",{});var g9=l(Lp);P0=m(g9,"Other tricks"),g9.forEach(t),Hv.forEach(t),Td=f(e),Co=i(e,"A",{id:!0}),l(Co).forEach(t),$d=f(e),zo=i(e,"P",{});var b9=l(zo);Sp=i(b9,"STRONG",{});var _9=l(Sp);L0=m(_9,"Axial positional encodings"),_9.forEach(t),b9.forEach(t),Md=f(e),E=i(e,"P",{});var y=l(E);jo=i(y,"A",{href:!0});var w9=l(jo);S0=m(w9,"Reformer"),w9.forEach(t),R0=m(y,` uses axial positional encodings: in traditional transformer models, the positional encoding E is a matrix of size `),Pd=ne(y),Ld=m(y," by "),Sd=ne(y),Rd=m(y,", "),Id=ne(y),qd=m(y," being the sequence length and "),Gd=ne(y),Nd=m(y,` the dimension of the hidden state. If you have very long texts, this matrix can be huge and take way too much space on the GPU. To alleviate that, axial positional encodings consist of factorizing that big matrix E in two smaller matrices E1 and E2, with dimensions `),Bd=ne(y),Fd=m(y," and "),Dd=ne(y),Hd=m(y,", such that "),Cd=ne(y),zd=m(y,` and `),jd=ne(y),Ud=m(y,` (with the product for the lengths, this ends up being way smaller). The embedding for time step `),Xd=ne(y),Vd=m(y," in E is obtained by concatenating the embeddings for timestep "),Yd=ne(y),Zd=m(y," in E1 and "),Kd=ne(y),Od=m(y,` in E2.`),y.forEach(t),this.h()},h(){r(k,"name","hf:doc:metadata"),r(k,"content",JSON.stringify(H9)),r(A,"id","summary-of-the-models"),r(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(A,"href","#summary-of-the-models"),r(x,"class","relative group"),r(Kt,"href","https://arxiv.org/abs/1706.03762"),r(Kt,"rel","nofollow"),r(Ot,"href","http://nlp.seas.harvard.edu/2018/04/03/attention.html"),r(Ot,"rel","nofollow"),r(Wt,"href","https://huggingface.co/models"),r(Wt,"rel","nofollow"),r(Zs,"href","#autoregressive-models"),r(Ks,"href","#autoencoding-models"),r(Os,"href","#seq-to-seq-models"),r(Ws,"href","#multimodal-models"),r(Js,"href","#retrieval-based-models"),r(P,"width","560"),r(P,"height","315"),c(P.src,fE="https://www.youtube.com/embed/H39Z_720T5s")||r(P,"src",fE),r(P,"title","YouTube video player"),r(P,"frameborder","0"),r(P,"allow",`accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture`),P.allowFullscreen=!0,r(ii,"id","autoregressive-models"),r(et,"id","decoders-or-autoregressive-models"),r(et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(et,"href","#decoders-or-autoregressive-models"),r(ge,"class","relative group"),r(tt,"id","original-gpt"),r(tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(tt,"href","#original-gpt"),r(be,"class","relative group"),r(ni,"alt","Models"),c(ni.src,pE="https://img.shields.io/badge/All_model_pages-openai--gpt-blueviolet")||r(ni,"src",pE),r(li,"href","https://huggingface.co/models?filter=openai-gpt"),r(hi,"alt","Doc"),c(hi.src,mE="https://img.shields.io/badge/Model_documentation-openai--gpt-blueviolet")||r(hi,"src",mE),r(oi,"href","model_doc/openai-gpt"),r(pi,"alt","Spaces"),c(pi.src,cE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(pi,"src",cE),r(fi,"href","https://huggingface.co/spaces/docs-demos/openai-gpt"),r(q,"class","flex flex-wrap space-x-1"),r(aa,"href","https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf"),r(aa,"rel","nofollow"),r(at,"id","gpt2"),r(at,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(at,"href","#gpt2"),r(_e,"class","relative group"),r(di,"alt","Models"),c(di.src,uE="https://img.shields.io/badge/All_model_pages-gpt2-blueviolet")||r(di,"src",uE),r(ui,"href","https://huggingface.co/models?filter=gpt2"),r(gi,"alt","Doc"),c(gi.src,dE="https://img.shields.io/badge/Model_documentation-gpt2-blueviolet")||r(gi,"src",dE),r(vi,"href","model_doc/gpt2"),r(_i,"alt","Spaces"),c(_i.src,vE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(_i,"src",vE),r(bi,"href","https://huggingface.co/spaces/docs-demos/gpt2"),r(G,"class","flex flex-wrap space-x-1"),r(ra,"href","https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf"),r(ra,"rel","nofollow"),r(st,"id","ctrl"),r(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(st,"href","#ctrl"),r(we,"class","relative group"),r(ki,"alt","Models"),c(ki.src,gE="https://img.shields.io/badge/All_model_pages-ctrl-blueviolet")||r(ki,"src",gE),r(yi,"href","https://huggingface.co/models?filter=ctrl"),r(Ai,"alt","Doc"),c(Ai.src,bE="https://img.shields.io/badge/Model_documentation-ctrl-blueviolet")||r(Ai,"src",bE),r(xi,"href","model_doc/ctrl"),r($i,"alt","Spaces"),c($i.src,_E="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r($i,"src",_E),r(Ti,"href","https://huggingface.co/spaces/docs-demos/tiny-ctrl"),r(N,"class","flex flex-wrap space-x-1"),r(oa,"href","https://arxiv.org/abs/1909.05858"),r(oa,"rel","nofollow"),r(it,"id","transformerxl"),r(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(it,"href","#transformerxl"),r(Ee,"class","relative group"),r(Si,"alt","Models"),c(Si.src,wE="https://img.shields.io/badge/All_model_pages-transfo--xl-blueviolet")||r(Si,"src",wE),r(Li,"href","https://huggingface.co/models?filter=transfo-xl"),r(Ii,"alt","Doc"),c(Ii.src,EE="https://img.shields.io/badge/Model_documentation-transfo--xl-blueviolet")||r(Ii,"src",EE),r(Ri,"href","model_doc/transfo-xl"),r(Gi,"alt","Spaces"),c(Gi.src,yE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Gi,"src",yE),r(qi,"href","https://huggingface.co/spaces/docs-demos/transfo-xl-wt103"),r(B,"class","flex flex-wrap space-x-1"),r(pa,"href","https://arxiv.org/abs/1901.02860"),r(pa,"rel","nofollow"),r(Hi,"id","reformer"),r(rt,"id","reformer"),r(rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(rt,"href","#reformer"),r(ye,"class","relative group"),r(zi,"alt","Models"),c(zi.src,kE="https://img.shields.io/badge/All_model_pages-reformer-blueviolet")||r(zi,"src",kE),r(Ci,"href","https://huggingface.co/models?filter=reformer"),r(Ui,"alt","Doc"),c(Ui.src,xE="https://img.shields.io/badge/Model_documentation-reformer-blueviolet")||r(Ui,"src",xE),r(ji,"href","model_doc/reformer"),r(Vi,"alt","Spaces"),c(Vi.src,AE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Vi,"src",AE),r(Xi,"href","https://huggingface.co/spaces/docs-demos/reformer-crime-and-punishment"),r(F,"class","flex flex-wrap space-x-1"),r(ua,"href","https://arxiv.org/abs/2001.04451"),r(ua,"rel","nofollow"),r(Zi,"href","#axial-pos-encoding"),r(Ki,"href","#lsh-attention"),r(nt,"id","xlnet"),r(nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(nt,"href","#xlnet"),r(ke,"class","relative group"),r(Qi,"alt","Models"),c(Qi.src,TE="https://img.shields.io/badge/All_model_pages-xlnet-blueviolet")||r(Qi,"src",TE),r(Ji,"href","https://huggingface.co/models?filter=xlnet"),r(tr,"alt","Doc"),c(tr.src,$E="https://img.shields.io/badge/Model_documentation-xlnet-blueviolet")||r(tr,"src",$E),r(er,"href","model_doc/xlnet"),r(sr,"alt","Spaces"),c(sr.src,ME="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(sr,"src",ME),r(ar,"href","https://huggingface.co/spaces/docs-demos/xlnet-base-cased"),r(D,"class","flex flex-wrap space-x-1"),r(_a,"href","https://arxiv.org/abs/1906.08237"),r(_a,"rel","nofollow"),r(nr,"id","autoencoding-models"),r(ot,"id","encoders-or-autoencoding-models"),r(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ot,"href","#encoders-or-autoencoding-models"),r(xe,"class","relative group"),r(ht,"id","bert"),r(ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ht,"href","#bert"),r(Ae,"class","relative group"),r(fr,"alt","Models"),c(fr.src,PE="https://img.shields.io/badge/All_model_pages-bert-blueviolet")||r(fr,"src",PE),r(hr,"href","https://huggingface.co/models?filter=bert"),r(mr,"alt","Doc"),c(mr.src,LE="https://img.shields.io/badge/Model_documentation-bert-blueviolet")||r(mr,"src",LE),r(pr,"href","model_doc/bert"),r(ur,"alt","Spaces"),c(ur.src,SE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(ur,"src",SE),r(cr,"href","https://huggingface.co/spaces/docs-demos/bert-base-uncased"),r(H,"class","flex flex-wrap space-x-1"),r(xa,"href","https://arxiv.org/abs/1810.04805"),r(xa,"rel","nofollow"),r(ft,"id","albert"),r(ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ft,"href","#albert"),r(Te,"class","relative group"),r(_r,"alt","Models"),c(_r.src,RE="https://img.shields.io/badge/All_model_pages-albert-blueviolet")||r(_r,"src",RE),r(br,"href","https://huggingface.co/models?filter=albert"),r(Er,"alt","Doc"),c(Er.src,IE="https://img.shields.io/badge/Model_documentation-albert-blueviolet")||r(Er,"src",IE),r(wr,"href","model_doc/albert"),r(kr,"alt","Spaces"),c(kr.src,qE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(kr,"src",qE),r(yr,"href","https://huggingface.co/spaces/docs-demos/albert-base-v2"),r(C,"class","flex flex-wrap space-x-1"),r($a,"href","https://arxiv.org/abs/1909.11942"),r($a,"rel","nofollow"),r(pt,"id","roberta"),r(pt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(pt,"href","#roberta"),r($e,"class","relative group"),r($r,"alt","Models"),c($r.src,GE="https://img.shields.io/badge/All_model_pages-roberta-blueviolet")||r($r,"src",GE),r(Tr,"href","https://huggingface.co/models?filter=roberta"),r(Pr,"alt","Doc"),c(Pr.src,NE="https://img.shields.io/badge/Model_documentation-roberta-blueviolet")||r(Pr,"src",NE),r(Mr,"href","model_doc/roberta"),r(Sr,"alt","Spaces"),c(Sr.src,BE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Sr,"src",BE),r(Lr,"href","https://huggingface.co/spaces/docs-demos/roberta-base"),r(z,"class","flex flex-wrap space-x-1"),r(La,"href","https://arxiv.org/abs/1907.11692"),r(La,"rel","nofollow"),r(mt,"id","distilbert"),r(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(mt,"href","#distilbert"),r(Me,"class","relative group"),r(Gr,"alt","Models"),c(Gr.src,FE="https://img.shields.io/badge/All_model_pages-distilbert-blueviolet")||r(Gr,"src",FE),r(qr,"href","https://huggingface.co/models?filter=distilbert"),r(Br,"alt","Doc"),c(Br.src,DE="https://img.shields.io/badge/Model_documentation-distilbert-blueviolet")||r(Br,"src",DE),r(Nr,"href","model_doc/distilbert"),r(Dr,"alt","Spaces"),c(Dr.src,HE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Dr,"src",HE),r(Fr,"href","https://huggingface.co/spaces/docs-demos/distilbert-base-uncased"),r(j,"class","flex flex-wrap space-x-1"),r(Ia,"href","https://arxiv.org/abs/1910.01108"),r(Ia,"rel","nofollow"),r(ct,"id","convbert"),r(ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ct,"href","#convbert"),r(Pe,"class","relative group"),r(jr,"alt","Models"),c(jr.src,CE="https://img.shields.io/badge/All_model_pages-convbert-blueviolet")||r(jr,"src",CE),r(zr,"href","https://huggingface.co/models?filter=convbert"),r(Xr,"alt","Doc"),c(Xr.src,zE="https://img.shields.io/badge/Model_documentation-convbert-blueviolet")||r(Xr,"src",zE),r(Ur,"href","model_doc/convbert"),r(Yr,"alt","Spaces"),c(Yr.src,jE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Yr,"src",jE),r(Vr,"href","https://huggingface.co/spaces/docs-demos/conv-bert-base"),r(U,"class","flex flex-wrap space-x-1"),r(Na,"href","https://arxiv.org/abs/2008.02496"),r(Na,"rel","nofollow"),r(ut,"id","xlm"),r(ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ut,"href","#xlm"),r(Le,"class","relative group"),r(Wr,"alt","Models"),c(Wr.src,UE="https://img.shields.io/badge/All_model_pages-xlm-blueviolet")||r(Wr,"src",UE),r(Or,"href","https://huggingface.co/models?filter=xlm"),r(Qr,"alt","Doc"),c(Qr.src,XE="https://img.shields.io/badge/Model_documentation-xlm-blueviolet")||r(Qr,"src",XE),r(Jr,"href","model_doc/xlm"),r(tl,"alt","Spaces"),c(tl.src,VE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(tl,"src",VE),r(el,"href","https://huggingface.co/spaces/docs-demos/xlm-mlm-en-2048"),r(X,"class","flex flex-wrap space-x-1"),r(Da,"href","https://arxiv.org/abs/1901.07291"),r(Da,"rel","nofollow"),r(dt,"id","xlmroberta"),r(dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(dt,"href","#xlmroberta"),r(Se,"class","relative group"),r(rl,"alt","Models"),c(rl.src,YE="https://img.shields.io/badge/All_model_pages-xlm--roberta-blueviolet")||r(rl,"src",YE),r(il,"href","https://huggingface.co/models?filter=xlm-roberta"),r(nl,"alt","Doc"),c(nl.src,ZE="https://img.shields.io/badge/Model_documentation-xlm--roberta-blueviolet")||r(nl,"src",ZE),r(ll,"href","model_doc/xlm-roberta"),r(hl,"alt","Spaces"),c(hl.src,KE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(hl,"src",KE),r(ol,"href","https://huggingface.co/spaces/docs-demos/xlm-roberta-base"),r(V,"class","flex flex-wrap space-x-1"),r(za,"href","https://arxiv.org/abs/1911.02116"),r(za,"rel","nofollow"),r(vt,"id","flaubert"),r(vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(vt,"href","#flaubert"),r(Re,"class","relative group"),r(cl,"alt","Models"),c(cl.src,OE="https://img.shields.io/badge/All_model_pages-flaubert-blueviolet")||r(cl,"src",OE),r(ml,"href","https://huggingface.co/models?filter=flaubert"),r(dl,"alt","Doc"),c(dl.src,WE="https://img.shields.io/badge/Model_documentation-flaubert-blueviolet")||r(dl,"src",WE),r(ul,"href","model_doc/flaubert"),r(gl,"alt","Spaces"),c(gl.src,JE="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(gl,"src",JE),r(vl,"href","https://huggingface.co/spaces/docs-demos/flaubert_small_cased"),r(Y,"class","flex flex-wrap space-x-1"),r(Xa,"href","https://arxiv.org/abs/1912.05372"),r(Xa,"rel","nofollow"),r(gt,"id","electra"),r(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(gt,"href","#electra"),r(Ie,"class","relative group"),r(El,"alt","Models"),c(El.src,QE="https://img.shields.io/badge/All_model_pages-electra-blueviolet")||r(El,"src",QE),r(wl,"href","https://huggingface.co/models?filter=electra"),r(kl,"alt","Doc"),c(kl.src,ey="https://img.shields.io/badge/Model_documentation-electra-blueviolet")||r(kl,"src",ey),r(yl,"href","model_doc/electra"),r(Al,"alt","Spaces"),c(Al.src,ty="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Al,"src",ty),r(xl,"href","https://huggingface.co/spaces/docs-demos/electra_large_discriminator_squad2_512"),r(Z,"class","flex flex-wrap space-x-1"),r(Za,"href","https://arxiv.org/abs/2003.10555"),r(Za,"rel","nofollow"),r(bt,"id","funnel-transformer"),r(bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(bt,"href","#funnel-transformer"),r(qe,"class","relative group"),r(Pl,"alt","Models"),c(Pl.src,ay="https://img.shields.io/badge/All_model_pages-funnel-blueviolet")||r(Pl,"src",ay),r(Ml,"href","https://huggingface.co/models?filter=funnel"),r(Sl,"alt","Doc"),c(Sl.src,sy="https://img.shields.io/badge/Model_documentation-funnel-blueviolet")||r(Sl,"src",sy),r(Ll,"href","model_doc/funnel"),r(Il,"alt","Spaces"),c(Il.src,iy="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Il,"src",iy),r(Rl,"href","https://huggingface.co/spaces/docs-demos/funnel-transformer-small"),r(K,"class","flex flex-wrap space-x-1"),r(Wa,"href","https://arxiv.org/abs/2006.03236"),r(Wa,"rel","nofollow"),r(Fl,"id","longformer"),r(_t,"id","longformer"),r(_t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(_t,"href","#longformer"),r(Ge,"class","relative group"),r(Hl,"alt","Models"),c(Hl.src,ry="https://img.shields.io/badge/All_model_pages-longformer-blueviolet")||r(Hl,"src",ry),r(Dl,"href","https://huggingface.co/models?filter=longformer"),r(zl,"alt","Doc"),c(zl.src,ly="https://img.shields.io/badge/Model_documentation-longformer-blueviolet")||r(zl,"src",ly),r(Cl,"href","model_doc/longformer"),r(Ul,"alt","Spaces"),c(Ul.src,ny="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Ul,"src",ny),r(jl,"href","https://huggingface.co/spaces/docs-demos/longformer-base-4096-finetuned-squadv1"),r(O,"class","flex flex-wrap space-x-1"),r(es,"href","https://arxiv.org/abs/2004.05150"),r(es,"rel","nofollow"),r(Xl,"href","#local-attention"),r(Zl,"id","seq-to-seq-models"),r(yt,"id","sequencetosequence-models"),r(yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(yt,"href","#sequencetosequence-models"),r(Ne,"class","relative group"),r(kt,"id","bart"),r(kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(kt,"href","#bart"),r(Be,"class","relative group"),r(Wl,"alt","Models"),c(Wl.src,oy="https://img.shields.io/badge/All_model_pages-bart-blueviolet")||r(Wl,"src",oy),r(Ol,"href","https://huggingface.co/models?filter=bart"),r(Ql,"alt","Doc"),c(Ql.src,hy="https://img.shields.io/badge/Model_documentation-bart-blueviolet")||r(Ql,"src",hy),r(Jl,"href","model_doc/bart"),r(tn,"alt","Spaces"),c(tn.src,fy="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(tn,"src",fy),r(en,"href","https://huggingface.co/spaces/docs-demos/bart-large-mnli"),r(W,"class","flex flex-wrap space-x-1"),r(rs,"href","https://arxiv.org/abs/1910.13461"),r(rs,"rel","nofollow"),r(xt,"id","pegasus"),r(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(xt,"href","#pegasus"),r(Fe,"class","relative group"),r(ln,"alt","Models"),c(ln.src,py="https://img.shields.io/badge/All_model_pages-pegasus-blueviolet")||r(ln,"src",py),r(rn,"href","https://huggingface.co/models?filter=pegasus"),r(on,"alt","Doc"),c(on.src,my="https://img.shields.io/badge/Model_documentation-pegasus-blueviolet")||r(on,"src",my),r(nn,"href","model_doc/pegasus"),r(fn,"alt","Spaces"),c(fn.src,cy="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(fn,"src",cy),r(hn,"href","https://huggingface.co/spaces/docs-demos/pegasus_paraphrase"),r(J,"class","flex flex-wrap space-x-1"),r(os,"href","https://arxiv.org/pdf/1912.08777.pdf"),r(os,"rel","nofollow"),r(Tt,"id","marianmt"),r(Tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Tt,"href","#marianmt"),r(De,"class","relative group"),r(dn,"alt","Models"),c(dn.src,uy="https://img.shields.io/badge/All_model_pages-marian-blueviolet")||r(dn,"src",uy),r(un,"href","https://huggingface.co/models?filter=marian"),r(gn,"alt","Doc"),c(gn.src,dy="https://img.shields.io/badge/Model_documentation-marian-blueviolet")||r(gn,"src",dy),r(vn,"href","model_doc/marian"),r(_n,"alt","Spaces"),c(_n.src,vy="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(_n,"src",vy),r(bn,"href","https://huggingface.co/spaces/docs-demos/opus-mt-zh-en"),r(Q,"class","flex flex-wrap space-x-1"),r(ps,"href","https://arxiv.org/abs/1804.00344"),r(ps,"rel","nofollow"),r($t,"id","t5"),r($t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r($t,"href","#t5"),r(He,"class","relative group"),r(kn,"alt","Models"),c(kn.src,gy="https://img.shields.io/badge/All_model_pages-t5-blueviolet")||r(kn,"src",gy),r(yn,"href","https://huggingface.co/models?filter=t5"),r(An,"alt","Doc"),c(An.src,by="https://img.shields.io/badge/Model_documentation-t5-blueviolet")||r(An,"src",by),r(xn,"href","model_doc/t5"),r($n,"alt","Spaces"),c($n.src,_y="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r($n,"src",_y),r(Tn,"href","https://huggingface.co/spaces/docs-demos/t5-base"),r(ee,"class","flex flex-wrap space-x-1"),r(us,"href","https://arxiv.org/abs/1910.10683"),r(us,"rel","nofollow"),r(Mt,"id","mt5"),r(Mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Mt,"href","#mt5"),r(Ce,"class","relative group"),r(qn,"alt","Models"),c(qn.src,wy="https://img.shields.io/badge/All_model_pages-mt5-blueviolet")||r(qn,"src",wy),r(In,"href","https://huggingface.co/models?filter=mt5"),r(Nn,"alt","Doc"),c(Nn.src,Ey="https://img.shields.io/badge/Model_documentation-mt5-blueviolet")||r(Nn,"src",Ey),r(Gn,"href","model_doc/mt5"),r(Fn,"alt","Spaces"),c(Fn.src,yy="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Fn,"src",yy),r(Bn,"href","https://huggingface.co/spaces/docs-demos/mt5-small-finetuned-arxiv-cs-finetuned-arxiv-cs-full"),r(te,"class","flex flex-wrap space-x-1"),r(gs,"href","https://arxiv.org/abs/2010.11934"),r(gs,"rel","nofollow"),r(Pt,"id","mbart"),r(Pt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Pt,"href","#mbart"),r(ze,"class","relative group"),r(zn,"alt","Models"),c(zn.src,ky="https://img.shields.io/badge/All_model_pages-mbart-blueviolet")||r(zn,"src",ky),r(Cn,"href","https://huggingface.co/models?filter=mbart"),r(Un,"alt","Doc"),c(Un.src,xy="https://img.shields.io/badge/Model_documentation-mbart-blueviolet")||r(Un,"src",xy),r(jn,"href","model_doc/mbart"),r(Vn,"alt","Spaces"),c(Vn.src,Ay="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(Vn,"src",Ay),r(Xn,"href","https://huggingface.co/spaces/docs-demos/mbart-large-50-one-to-many-mmt"),r(ae,"class","flex flex-wrap space-x-1"),r(ws,"href","https://arxiv.org/abs/2001.08210"),r(ws,"rel","nofollow"),r(Es,"href","https://huggingface.co/facebook/mbart-large-en-ro"),r(Es,"rel","nofollow"),r(ys,"href","https://huggingface.co/facebook/mbart-large-cc25"),r(ys,"rel","nofollow"),r(St,"id","prophetnet"),r(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(St,"href","#prophetnet"),r(je,"class","relative group"),r(On,"alt","Models"),c(On.src,Ty="https://img.shields.io/badge/All_model_pages-prophetnet-blueviolet")||r(On,"src",Ty),r(Kn,"href","https://huggingface.co/models?filter=prophetnet"),r(Jn,"alt","Doc"),c(Jn.src,$y="https://img.shields.io/badge/Model_documentation-prophetnet-blueviolet")||r(Jn,"src",$y),r(Wn,"href","model_doc/prophetnet"),r(eo,"alt","Spaces"),c(eo.src,My="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(eo,"src",My),r(Qn,"href","https://huggingface.co/spaces/docs-demos/prophetnet-large-uncased"),r(se,"class","flex flex-wrap space-x-1"),r(As,"href","https://arxiv.org/abs/2001.04063"),r(As,"rel","nofollow"),r(Rt,"id","xlmprophetnet"),r(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Rt,"href","#xlmprophetnet"),r(Ue,"class","relative group"),r(so,"alt","Models"),c(so.src,Py="https://img.shields.io/badge/All_model_pages-xprophetnet-blueviolet")||r(so,"src",Py),r(ao,"href","https://huggingface.co/models?filter=xprophetnet"),r(ro,"alt","Doc"),c(ro.src,Ly="https://img.shields.io/badge/Model_documentation-xprophetnet-blueviolet")||r(ro,"src",Ly),r(io,"href","model_doc/xlm-prophetnet"),r(no,"alt","Spaces"),c(no.src,Sy="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(no,"src",Sy),r(lo,"href","https://huggingface.co/spaces/docs-demos/xprophetnet-large-wiki100-cased-xglue-ntg"),r(ie,"class","flex flex-wrap space-x-1"),r(Ms,"href","https://arxiv.org/abs/2001.04063"),r(Ms,"rel","nofollow"),r(Ps,"href","https://arxiv.org/abs/2004.01401"),r(Ps,"rel","nofollow"),r(ho,"id","multimodal-models"),r(qt,"id","multimodal-models"),r(qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(qt,"href","#multimodal-models"),r(Xe,"class","relative group"),r(Gt,"id","mmbt"),r(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Gt,"href","#mmbt"),r(Ve,"class","relative group"),r(Is,"href","https://arxiv.org/abs/1909.02950"),r(Is,"rel","nofollow"),r(uo,"id","retrieval-based-models"),r(Nt,"id","retrievalbased-models"),r(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Nt,"href","#retrievalbased-models"),r(Ye,"class","relative group"),r(Bt,"id","dpr"),r(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Bt,"href","#dpr"),r(Ze,"class","relative group"),r(bo,"alt","Models"),c(bo.src,Ry="https://img.shields.io/badge/All_model_pages-dpr-blueviolet")||r(bo,"src",Ry),r(go,"href","https://huggingface.co/models?filter=dpr"),r(wo,"alt","Doc"),c(wo.src,Iy="https://img.shields.io/badge/Model_documentation-dpr-blueviolet")||r(wo,"src",Iy),r(_o,"href","model_doc/dpr"),r(yo,"alt","Spaces"),c(yo.src,qy="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue")||r(yo,"src",qy),r(Eo,"href","https://huggingface.co/spaces/docs-demos/dpr-question_encoder-bert-base-multilingual"),r(re,"class","flex flex-wrap space-x-1"),r(Bs,"href","https://arxiv.org/abs/2004.04906"),r(Bs,"rel","nofollow"),r(Ft,"id","rag"),r(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Ft,"href","#rag"),r(Ke,"class","relative group"),r($o,"alt","Models"),c($o.src,Gy="https://img.shields.io/badge/All_model_pages-rag-blueviolet")||r($o,"src",Gy),r(To,"href","https://huggingface.co/models?filter=rag"),r(Po,"alt","Doc"),c(Po.src,Ny="https://img.shields.io/badge/Model_documentation-rag-blueviolet")||r(Po,"src",Ny),r(Mo,"href","model_doc/rag"),r(Oe,"class","flex flex-wrap space-x-1"),r(Hs,"href","https://arxiv.org/abs/2005.11401"),r(Hs,"rel","nofollow"),r(Dt,"id","more-technical-aspects"),r(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Dt,"href","#more-technical-aspects"),r(We,"class","relative group"),r(Ht,"id","full-vs-sparse-attention"),r(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Ht,"href","#full-vs-sparse-attention"),r(Je,"class","relative group"),r(Io,"id","lsh-attention"),r(Go,"href","#reformer"),r(No,"id","local-attention"),r(Fo,"href","#longformer"),r(Ct,"scale","50 %"),r(Ct,"align","center"),c(Ct.src,By="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png")||r(Ct,"src",By),r(zt,"id","other-tricks"),r(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(zt,"href","#other-tricks"),r(Qe,"class","relative group"),r(Co,"id","axial-pos-encoding"),r(jo,"href","#reformer"),Pd.a=Ld,Sd.a=Rd,Id.a=qd,Gd.a=Nd,Bd.a=Fd,Dd.a=Hd,Cd.a=zd,jd.a=Ud,Xd.a=Vd,Yd.a=Zd,Kd.a=Od},m(e,n){a(document.head,k),o(e,T,n),o(e,x,n),a(x,A),a(A,kh),v(Zt,kh,null),a(x,Cv),a(x,xh),a(xh,zv),o(e,Ip,n),o(e,L,n),a(L,jv),a(L,Kt),a(Kt,Uv),a(L,Xv),a(L,Ot),a(Ot,Vv),a(L,Yv),a(L,Wt),a(Wt,Zv),a(L,Kv),o(e,qp,n),o(e,Ys,n),a(Ys,Ov),o(e,Gp,n),o(e,$,n),a($,Ah),a(Ah,Zs),a(Zs,Wv),a($,Jv),a($,Th),a(Th,Ks),a(Ks,Qv),a($,eg),a($,$h),a($h,Os),a(Os,tg),a($,ag),a($,Mh),a(Mh,Ws),a(Ws,sg),a($,ig),a($,Ph),a(Ph,Js),a(Js,rg),o(e,Np,n),o(e,P,n),o(e,Bp,n),o(e,Qs,n),a(Qs,lg),o(e,Fp,n),o(e,ei,n),a(ei,ng),o(e,Dp,n),o(e,ti,n),a(ti,og),o(e,Hp,n),o(e,ai,n),a(ai,hg),o(e,Cp,n),o(e,si,n),a(si,fg),o(e,zp,n),o(e,ii,n),o(e,jp,n),o(e,ge,n),a(ge,et),a(et,Lh),v(Jt,Lh,null),a(ge,pg),a(ge,Sh),a(Sh,mg),o(e,Up,n),o(e,ri,n),a(ri,cg),o(e,Xp,n),v(Qt,e,n),o(e,Vp,n),o(e,be,n),a(be,tt),a(tt,Rh),v(ea,Rh,null),a(be,ug),a(be,Ih),a(Ih,dg),o(e,Yp,n),o(e,q,n),a(q,li),a(li,ni),a(q,vg),a(q,oi),a(oi,hi),a(q,gg),a(q,fi),a(fi,pi),o(e,Zp,n),o(e,ta,n),a(ta,aa),a(aa,bg),a(ta,_g),o(e,Kp,n),o(e,mi,n),a(mi,wg),o(e,Op,n),o(e,ci,n),a(ci,Eg),o(e,Wp,n),o(e,_e,n),a(_e,at),a(at,qh),v(sa,qh,null),a(_e,yg),a(_e,Gh),a(Gh,kg),o(e,Jp,n),o(e,G,n),a(G,ui),a(ui,di),a(G,xg),a(G,vi),a(vi,gi),a(G,Ag),a(G,bi),a(bi,_i),o(e,Qp,n),o(e,ia,n),a(ia,ra),a(ra,Tg),a(ia,$g),o(e,em,n),o(e,wi,n),a(wi,Mg),o(e,tm,n),o(e,Ei,n),a(Ei,Pg),o(e,am,n),o(e,we,n),a(we,st),a(st,Nh),v(la,Nh,null),a(we,Lg),a(we,Bh),a(Bh,Sg),o(e,sm,n),o(e,N,n),a(N,yi),a(yi,ki),a(N,Rg),a(N,xi),a(xi,Ai),a(N,Ig),a(N,Ti),a(Ti,$i),o(e,im,n),o(e,na,n),a(na,oa),a(oa,qg),a(na,Gg),o(e,rm,n),o(e,Mi,n),a(Mi,Ng),o(e,lm,n),o(e,Pi,n),a(Pi,Bg),o(e,nm,n),o(e,Ee,n),a(Ee,it),a(it,Fh),v(ha,Fh,null),a(Ee,Fg),a(Ee,Dh),a(Dh,Dg),o(e,om,n),o(e,B,n),a(B,Li),a(Li,Si),a(B,Hg),a(B,Ri),a(Ri,Ii),a(B,Cg),a(B,qi),a(qi,Gi),o(e,hm,n),o(e,fa,n),a(fa,pa),a(pa,zg),a(fa,jg),o(e,fm,n),o(e,Ni,n),a(Ni,Ug),o(e,pm,n),o(e,Bi,n),a(Bi,Xg),o(e,mm,n),o(e,Fi,n),a(Fi,Vg),o(e,cm,n),o(e,Di,n),a(Di,Yg),o(e,um,n),o(e,Hi,n),o(e,dm,n),o(e,ye,n),a(ye,rt),a(rt,Hh),v(ma,Hh,null),a(ye,Zg),a(ye,Ch),a(Ch,Kg),o(e,vm,n),o(e,F,n),a(F,Ci),a(Ci,zi),a(F,Og),a(F,ji),a(ji,Ui),a(F,Wg),a(F,Xi),a(Xi,Vi),o(e,gm,n),o(e,ca,n),a(ca,ua),a(ua,Jg),a(ca,Qg),o(e,bm,n),o(e,Yi,n),a(Yi,e1),o(e,_m,n),o(e,S,n),a(S,da),a(da,t1),a(da,Zi),a(Zi,a1),a(da,s1),a(S,i1),a(S,va),a(va,r1),a(va,Ki),a(Ki,l1),a(va,n1),a(S,o1),a(S,zh),a(zh,h1),a(S,f1),a(S,jh),a(jh,p1),o(e,wm,n),o(e,Oi,n),a(Oi,m1),o(e,Em,n),v(lt,e,n),o(e,ym,n),o(e,Wi,n),a(Wi,c1),o(e,km,n),o(e,ke,n),a(ke,nt),a(nt,Uh),v(ga,Uh,null),a(ke,u1),a(ke,Xh),a(Xh,d1),o(e,xm,n),o(e,D,n),a(D,Ji),a(Ji,Qi),a(D,v1),a(D,er),a(er,tr),a(D,g1),a(D,ar),a(ar,sr),o(e,Am,n),o(e,ba,n),a(ba,_a),a(_a,b1),a(ba,_1),o(e,Tm,n),o(e,ir,n),a(ir,w1),o(e,$m,n),o(e,rr,n),a(rr,E1),o(e,Mm,n),o(e,lr,n),a(lr,y1),o(e,Pm,n),o(e,nr,n),o(e,Lm,n),o(e,xe,n),a(xe,ot),a(ot,Vh),v(wa,Vh,null),a(xe,k1),a(xe,Yh),a(Yh,x1),o(e,Sm,n),o(e,or,n),a(or,A1),o(e,Rm,n),v(Ea,e,n),o(e,Im,n),o(e,Ae,n),a(Ae,ht),a(ht,Zh),v(ya,Zh,null),a(Ae,T1),a(Ae,Kh),a(Kh,$1),o(e,qm,n),o(e,H,n),a(H,hr),a(hr,fr),a(H,M1),a(H,pr),a(pr,mr),a(H,P1),a(H,cr),a(cr,ur),o(e,Gm,n),o(e,ka,n),a(ka,xa),a(xa,L1),a(ka,S1),o(e,Nm,n),o(e,dr,n),a(dr,R1),o(e,Bm,n),o(e,oe,n),a(oe,Oh),a(Oh,I1),a(oe,q1),a(oe,Wh),a(Wh,G1),a(oe,N1),a(oe,Jh),a(Jh,B1),o(e,Fm,n),o(e,vr,n),a(vr,F1),o(e,Dm,n),o(e,gr,n),a(gr,D1),o(e,Hm,n),o(e,Te,n),a(Te,ft),a(ft,Qh),v(Aa,Qh,null),a(Te,H1),a(Te,ef),a(ef,C1),o(e,Cm,n),o(e,C,n),a(C,br),a(br,_r),a(C,z1),a(C,wr),a(wr,Er),a(C,j1),a(C,yr),a(yr,kr),o(e,zm,n),o(e,Ta,n),a(Ta,$a),a($a,U1),a(Ta,X1),o(e,jm,n),o(e,xr,n),a(xr,V1),o(e,Um,n),o(e,he,n),a(he,tf),a(tf,Y1),a(he,Z1),a(he,af),a(af,K1),a(he,O1),a(he,sf),a(sf,W1),o(e,Xm,n),o(e,Ar,n),a(Ar,J1),o(e,Vm,n),o(e,$e,n),a($e,pt),a(pt,rf),v(Ma,rf,null),a($e,Q1),a($e,lf),a(lf,eb),o(e,Ym,n),o(e,z,n),a(z,Tr),a(Tr,$r),a(z,tb),a(z,Mr),a(Mr,Pr),a(z,ab),a(z,Lr),a(Lr,Sr),o(e,Zm,n),o(e,Pa,n),a(Pa,La),a(La,sb),a(Pa,ib),o(e,Km,n),o(e,Rr,n),a(Rr,rb),o(e,Om,n),o(e,R,n),a(R,nf),a(nf,lb),a(R,nb),a(R,of),a(of,ob),a(R,hb),a(R,hf),a(hf,fb),a(R,pb),a(R,ff),a(ff,mb),o(e,Wm,n),o(e,Ir,n),a(Ir,cb),o(e,Jm,n),o(e,Me,n),a(Me,mt),a(mt,pf),v(Sa,pf,null),a(Me,ub),a(Me,mf),a(mf,db),o(e,Qm,n),o(e,j,n),a(j,qr),a(qr,Gr),a(j,vb),a(j,Nr),a(Nr,Br),a(j,gb),a(j,Fr),a(Fr,Dr),o(e,ec,n),o(e,Ra,n),a(Ra,Ia),a(Ia,bb),a(Ra,_b),o(e,tc,n),o(e,Hr,n),a(Hr,wb),o(e,ac,n),o(e,fe,n),a(fe,cf),a(cf,Eb),a(fe,yb),a(fe,uf),a(uf,kb),a(fe,xb),a(fe,df),a(df,Ab),o(e,sc,n),o(e,Cr,n),a(Cr,Tb),o(e,ic,n),o(e,Pe,n),a(Pe,ct),a(ct,vf),v(qa,vf,null),a(Pe,$b),a(Pe,gf),a(gf,Mb),o(e,rc,n),o(e,U,n),a(U,zr),a(zr,jr),a(U,Pb),a(U,Ur),a(Ur,Xr),a(U,Lb),a(U,Vr),a(Vr,Yr),o(e,lc,n),o(e,Ga,n),a(Ga,Na),a(Na,Sb),a(Ga,Rb),o(e,nc,n),o(e,Zr,n),a(Zr,Ib),o(e,oc,n),o(e,Kr,n),a(Kr,qb),o(e,hc,n),o(e,Le,n),a(Le,ut),a(ut,bf),v(Ba,bf,null),a(Le,Gb),a(Le,_f),a(_f,Nb),o(e,fc,n),o(e,X,n),a(X,Or),a(Or,Wr),a(X,Bb),a(X,Jr),a(Jr,Qr),a(X,Fb),a(X,el),a(el,tl),o(e,pc,n),o(e,Fa,n),a(Fa,Da),a(Da,Db),a(Fa,Hb),o(e,mc,n),o(e,al,n),a(al,Cb),o(e,cc,n),o(e,pe,n),a(pe,wf),a(wf,zb),a(pe,jb),a(pe,Ef),a(Ef,Ub),a(pe,Xb),a(pe,yf),a(yf,Vb),o(e,uc,n),o(e,I,n),a(I,Yb),a(I,kf),a(kf,Zb),a(I,Kb),a(I,xf),a(xf,Ob),a(I,Wb),a(I,Af),a(Af,Jb),a(I,Qb),o(e,dc,n),o(e,sl,n),a(sl,e_),o(e,vc,n),o(e,Se,n),a(Se,dt),a(dt,Tf),v(Ha,Tf,null),a(Se,t_),a(Se,$f),a($f,a_),o(e,gc,n),o(e,V,n),a(V,il),a(il,rl),a(V,s_),a(V,ll),a(ll,nl),a(V,i_),a(V,ol),a(ol,hl),o(e,bc,n),o(e,Ca,n),a(Ca,za),a(za,r_),a(Ca,l_),o(e,_c,n),o(e,fl,n),a(fl,n_),o(e,wc,n),o(e,pl,n),a(pl,o_),o(e,Ec,n),o(e,Re,n),a(Re,vt),a(vt,Mf),v(ja,Mf,null),a(Re,h_),a(Re,Pf),a(Pf,f_),o(e,yc,n),o(e,Y,n),a(Y,ml),a(ml,cl),a(Y,p_),a(Y,ul),a(ul,dl),a(Y,m_),a(Y,vl),a(vl,gl),o(e,kc,n),o(e,Ua,n),a(Ua,Xa),a(Xa,c_),a(Ua,u_),o(e,xc,n),o(e,bl,n),a(bl,d_),o(e,Ac,n),o(e,_l,n),a(_l,v_),o(e,Tc,n),o(e,Ie,n),a(Ie,gt),a(gt,Lf),v(Va,Lf,null),a(Ie,g_),a(Ie,Sf),a(Sf,b_),o(e,$c,n),o(e,Z,n),a(Z,wl),a(wl,El),a(Z,__),a(Z,yl),a(yl,kl),a(Z,w_),a(Z,xl),a(xl,Al),o(e,Mc,n),o(e,Ya,n),a(Ya,Za),a(Za,E_),a(Ya,y_),o(e,Pc,n),o(e,Tl,n),a(Tl,k_),o(e,Lc,n),o(e,$l,n),a($l,x_),o(e,Sc,n),o(e,qe,n),a(qe,bt),a(bt,Rf),v(Ka,Rf,null),a(qe,A_),a(qe,If),a(If,T_),o(e,Rc,n),o(e,K,n),a(K,Ml),a(Ml,Pl),a(K,$_),a(K,Ll),a(Ll,Sl),a(K,M_),a(K,Rl),a(Rl,Il),o(e,Ic,n),o(e,Oa,n),a(Oa,Wa),a(Wa,P_),a(Oa,L_),o(e,qc,n),o(e,ql,n),a(ql,S_),o(e,Gc,n),o(e,Gl,n),a(Gl,R_),o(e,Nc,n),o(e,Nl,n),a(Nl,I_),o(e,Bc,n),o(e,Bl,n),a(Bl,q_),o(e,Fc,n),o(e,Fl,n),o(e,Dc,n),o(e,Ge,n),a(Ge,_t),a(_t,qf),v(Ja,qf,null),a(Ge,G_),a(Ge,Gf),a(Gf,N_),o(e,Hc,n),o(e,O,n),a(O,Dl),a(Dl,Hl),a(O,B_),a(O,Cl),a(Cl,zl),a(O,F_),a(O,jl),a(jl,Ul),o(e,Cc,n),o(e,Qa,n),a(Qa,es),a(es,D_),a(Qa,H_),o(e,zc,n),o(e,wt,n),a(wt,C_),a(wt,Xl),a(Xl,z_),a(wt,j_),o(e,jc,n),o(e,Vl,n),a(Vl,U_),o(e,Uc,n),v(Et,e,n),o(e,Xc,n),o(e,Yl,n),a(Yl,X_),o(e,Vc,n),o(e,Zl,n),o(e,Yc,n),o(e,Ne,n),a(Ne,yt),a(yt,Nf),v(ts,Nf,null),a(Ne,V_),a(Ne,Bf),a(Bf,Y_),o(e,Zc,n),o(e,Kl,n),a(Kl,Z_),o(e,Kc,n),v(as,e,n),o(e,Oc,n),o(e,Be,n),a(Be,kt),a(kt,Ff),v(ss,Ff,null),a(Be,K_),a(Be,Df),a(Df,O_),o(e,Wc,n),o(e,W,n),a(W,Ol),a(Ol,Wl),a(W,W_),a(W,Jl),a(Jl,Ql),a(W,J_),a(W,en),a(en,tn),o(e,Jc,n),o(e,is,n),a(is,rs),a(rs,Q_),a(is,ew),o(e,Qc,n),o(e,an,n),a(an,tw),o(e,eu,n),o(e,M,n),a(M,Hf),a(Hf,aw),a(M,sw),a(M,Cf),a(Cf,iw),a(M,rw),a(M,zf),a(zf,lw),a(M,nw),a(M,jf),a(jf,ow),a(M,hw),a(M,Uf),a(Uf,fw),o(e,tu,n),o(e,sn,n),a(sn,pw),o(e,au,n),o(e,Fe,n),a(Fe,xt),a(xt,Xf),v(ls,Xf,null),a(Fe,mw),a(Fe,Vf),a(Vf,cw),o(e,su,n),o(e,J,n),a(J,rn),a(rn,ln),a(J,uw),a(J,nn),a(nn,on),a(J,dw),a(J,hn),a(hn,fn),o(e,iu,n),o(e,ns,n),a(ns,os),a(os,vw),a(ns,gw),o(e,ru,n),o(e,pn,n),a(pn,bw),o(e,lu,n),o(e,At,n),a(At,Yf),a(Yf,_w),a(At,ww),a(At,Zf),a(Zf,Ew),o(e,nu,n),o(e,mn,n),a(mn,yw),o(e,ou,n),o(e,cn,n),a(cn,kw),o(e,hu,n),o(e,De,n),a(De,Tt),a(Tt,Kf),v(hs,Kf,null),a(De,xw),a(De,Of),a(Of,Aw),o(e,fu,n),o(e,Q,n),a(Q,un),a(un,dn),a(Q,Tw),a(Q,vn),a(vn,gn),a(Q,$w),a(Q,bn),a(bn,_n),o(e,pu,n),o(e,fs,n),a(fs,ps),a(ps,Mw),a(fs,Pw),o(e,mu,n),o(e,wn,n),a(wn,Lw),o(e,cu,n),o(e,En,n),a(En,Sw),o(e,uu,n),o(e,He,n),a(He,$t),a($t,Wf),v(ms,Wf,null),a(He,Rw),a(He,Jf),a(Jf,Iw),o(e,du,n),o(e,ee,n),a(ee,yn),a(yn,kn),a(ee,qw),a(ee,xn),a(xn,An),a(ee,Gw),a(ee,Tn),a(Tn,$n),o(e,vu,n),o(e,cs,n),a(cs,us),a(us,Nw),a(cs,Bw),o(e,gu,n),o(e,Mn,n),a(Mn,Fw),o(e,bu,n),o(e,Pn,n),a(Pn,Dw),o(e,_u,n),o(e,Ln,n),a(Ln,Hw),o(e,wu,n),o(e,Sn,n),a(Sn,Cw),o(e,Eu,n),o(e,Rn,n),a(Rn,zw),o(e,yu,n),o(e,Ce,n),a(Ce,Mt),a(Mt,Qf),v(ds,Qf,null),a(Ce,jw),a(Ce,ep),a(ep,Uw),o(e,ku,n),o(e,te,n),a(te,In),a(In,qn),a(te,Xw),a(te,Gn),a(Gn,Nn),a(te,Vw),a(te,Bn),a(Bn,Fn),o(e,xu,n),o(e,vs,n),a(vs,gs),a(gs,Yw),a(vs,Zw),o(e,Au,n),o(e,Dn,n),a(Dn,Kw),o(e,Tu,n),o(e,Hn,n),a(Hn,Ow),o(e,$u,n),o(e,ze,n),a(ze,Pt),a(Pt,tp),v(bs,tp,null),a(ze,Ww),a(ze,ap),a(ap,Jw),o(e,Mu,n),o(e,ae,n),a(ae,Cn),a(Cn,zn),a(ae,Qw),a(ae,jn),a(jn,Un),a(ae,e2),a(ae,Xn),a(Xn,Vn),o(e,Pu,n),o(e,_s,n),a(_s,ws),a(ws,t2),a(_s,a2),o(e,Lu,n),o(e,Yn,n),a(Yn,s2),o(e,Su,n),o(e,Zn,n),a(Zn,i2),o(e,Ru,n),o(e,Lt,n),a(Lt,r2),a(Lt,Es),a(Es,l2),a(Lt,n2),o(e,Iu,n),o(e,me,n),a(me,o2),a(me,ys),a(ys,h2),a(me,f2),a(me,sp),a(sp,p2),a(me,m2),o(e,qu,n),o(e,je,n),a(je,St),a(St,ip),v(ks,ip,null),a(je,c2),a(je,rp),a(rp,u2),o(e,Gu,n),o(e,se,n),a(se,Kn),a(Kn,On),a(se,d2),a(se,Wn),a(Wn,Jn),a(se,v2),a(se,Qn),a(Qn,eo),o(e,Nu,n),o(e,xs,n),a(xs,As),a(As,g2),a(xs,b2),o(e,Bu,n),o(e,ce,n),a(ce,_2),a(ce,lp),a(lp,w2),a(ce,E2),a(ce,np),a(np,y2),a(ce,k2),o(e,Fu,n),o(e,to,n),a(to,x2),o(e,Du,n),o(e,Ue,n),a(Ue,Rt),a(Rt,op),v(Ts,op,null),a(Ue,A2),a(Ue,hp),a(hp,T2),o(e,Hu,n),o(e,ie,n),a(ie,ao),a(ao,so),a(ie,$2),a(ie,io),a(io,ro),a(ie,M2),a(ie,lo),a(lo,no),o(e,Cu,n),o(e,$s,n),a($s,Ms),a(Ms,P2),a($s,L2),o(e,zu,n),o(e,It,n),a(It,S2),a(It,Ps),a(Ps,R2),a(It,I2),o(e,ju,n),o(e,oo,n),a(oo,q2),o(e,Uu,n),o(e,ho,n),o(e,Xu,n),o(e,Xe,n),a(Xe,qt),a(qt,fp),v(Ls,fp,null),a(Xe,G2),a(Xe,pp),a(pp,N2),o(e,Vu,n),o(e,fo,n),a(fo,B2),o(e,Yu,n),o(e,Ve,n),a(Ve,Gt),a(Gt,mp),v(Ss,mp,null),a(Ve,F2),a(Ve,cp),a(cp,D2),o(e,Zu,n),o(e,Rs,n),a(Rs,Is),a(Is,H2),a(Rs,C2),o(e,Ku,n),o(e,po,n),a(po,z2),o(e,Ou,n),o(e,mo,n),a(mo,j2),o(e,Wu,n),o(e,co,n),a(co,U2),o(e,Ju,n),o(e,uo,n),o(e,Qu,n),o(e,Ye,n),a(Ye,Nt),a(Nt,up),v(qs,up,null),a(Ye,X2),a(Ye,dp),a(dp,V2),o(e,ed,n),o(e,vo,n),a(vo,Y2),o(e,td,n),o(e,Ze,n),a(Ze,Bt),a(Bt,vp),v(Gs,vp,null),a(Ze,Z2),a(Ze,gp),a(gp,K2),o(e,ad,n),o(e,re,n),a(re,go),a(go,bo),a(re,O2),a(re,_o),a(_o,wo),a(re,W2),a(re,Eo),a(Eo,yo),o(e,sd,n),o(e,Ns,n),a(Ns,Bs),a(Bs,J2),a(Ns,Q2),o(e,id,n),o(e,ko,n),a(ko,e0),o(e,rd,n),o(e,xo,n),a(xo,t0),o(e,ld,n),o(e,ue,n),a(ue,bp),a(bp,a0),a(ue,s0),a(ue,_p),a(_p,i0),a(ue,r0),a(ue,wp),a(wp,l0),o(e,nd,n),o(e,Ao,n),a(Ao,n0),o(e,od,n),o(e,Ke,n),a(Ke,Ft),a(Ft,Ep),v(Fs,Ep,null),a(Ke,o0),a(Ke,yp),a(yp,h0),o(e,hd,n),o(e,Oe,n),a(Oe,To),a(To,$o),a(Oe,f0),a(Oe,Mo),a(Mo,Po),o(e,fd,n),o(e,Ds,n),a(Ds,Hs),a(Hs,p0),a(Ds,m0),o(e,pd,n),o(e,Lo,n),a(Lo,c0),o(e,md,n),o(e,So,n),a(So,u0),o(e,cd,n),o(e,We,n),a(We,Dt),a(Dt,kp),v(Cs,kp,null),a(We,d0),a(We,xp),a(xp,v0),o(e,ud,n),o(e,Je,n),a(Je,Ht),a(Ht,Ap),v(zs,Ap,null),a(Je,g0),a(Je,Tp),a(Tp,b0),o(e,dd,n),o(e,Ro,n),a(Ro,_0),o(e,vd,n),o(e,Io,n),o(e,gd,n),o(e,qo,n),a(qo,$p),a($p,w0),o(e,bd,n),o(e,js,n),a(js,Go),a(Go,E0),a(js,y0),o(e,_d,n),o(e,No,n),o(e,wd,n),o(e,Bo,n),a(Bo,Mp),a(Mp,k0),o(e,Ed,n),o(e,Us,n),a(Us,Fo),a(Fo,x0),a(Us,A0),o(e,yd,n),o(e,Do,n),a(Do,T0),o(e,kd,n),o(e,Ct,n),o(e,xd,n),o(e,Ho,n),a(Ho,$0),o(e,Ad,n),o(e,Qe,n),a(Qe,zt),a(zt,Pp),v(Xs,Pp,null),a(Qe,M0),a(Qe,Lp),a(Lp,P0),o(e,Td,n),o(e,Co,n),o(e,$d,n),o(e,zo,n),a(zo,Sp),a(Sp,L0),o(e,Md,n),o(e,E,n),a(E,jo),a(jo,S0),a(E,R0),Pd.m(y9,E),a(E,Ld),Sd.m(k9,E),a(E,Rd),Id.m(x9,E),a(E,qd),Gd.m(A9,E),a(E,Nd),Bd.m(T9,E),a(E,Fd),Dd.m($9,E),a(E,Hd),Cd.m(M9,E),a(E,zd),jd.m(P9,E),a(E,Ud),Xd.m(L9,E),a(E,Vd),Yd.m(S9,E),a(E,Zd),Kd.m(R9,E),a(E,Od),Wd=!0},p(e,[n]){const Vs={};n&2&&(Vs.$$scope={dirty:n,ctx:e}),lt.$set(Vs);const Rp={};n&2&&(Rp.$$scope={dirty:n,ctx:e}),Et.$set(Rp)},i(e){Wd||(g(Zt.$$.fragment,e),g(Jt.$$.fragment,e),g(Qt.$$.fragment,e),g(ea.$$.fragment,e),g(sa.$$.fragment,e),g(la.$$.fragment,e),g(ha.$$.fragment,e),g(ma.$$.fragment,e),g(lt.$$.fragment,e),g(ga.$$.fragment,e),g(wa.$$.fragment,e),g(Ea.$$.fragment,e),g(ya.$$.fragment,e),g(Aa.$$.fragment,e),g(Ma.$$.fragment,e),g(Sa.$$.fragment,e),g(qa.$$.fragment,e),g(Ba.$$.fragment,e),g(Ha.$$.fragment,e),g(ja.$$.fragment,e),g(Va.$$.fragment,e),g(Ka.$$.fragment,e),g(Ja.$$.fragment,e),g(Et.$$.fragment,e),g(ts.$$.fragment,e),g(as.$$.fragment,e),g(ss.$$.fragment,e),g(ls.$$.fragment,e),g(hs.$$.fragment,e),g(ms.$$.fragment,e),g(ds.$$.fragment,e),g(bs.$$.fragment,e),g(ks.$$.fragment,e),g(Ts.$$.fragment,e),g(Ls.$$.fragment,e),g(Ss.$$.fragment,e),g(qs.$$.fragment,e),g(Gs.$$.fragment,e),g(Fs.$$.fragment,e),g(Cs.$$.fragment,e),g(zs.$$.fragment,e),g(Xs.$$.fragment,e),Wd=!0)},o(e){b(Zt.$$.fragment,e),b(Jt.$$.fragment,e),b(Qt.$$.fragment,e),b(ea.$$.fragment,e),b(sa.$$.fragment,e),b(la.$$.fragment,e),b(ha.$$.fragment,e),b(ma.$$.fragment,e),b(lt.$$.fragment,e),b(ga.$$.fragment,e),b(wa.$$.fragment,e),b(Ea.$$.fragment,e),b(ya.$$.fragment,e),b(Aa.$$.fragment,e),b(Ma.$$.fragment,e),b(Sa.$$.fragment,e),b(qa.$$.fragment,e),b(Ba.$$.fragment,e),b(Ha.$$.fragment,e),b(ja.$$.fragment,e),b(Va.$$.fragment,e),b(Ka.$$.fragment,e),b(Ja.$$.fragment,e),b(Et.$$.fragment,e),b(ts.$$.fragment,e),b(as.$$.fragment,e),b(ss.$$.fragment,e),b(ls.$$.fragment,e),b(hs.$$.fragment,e),b(ms.$$.fragment,e),b(ds.$$.fragment,e),b(bs.$$.fragment,e),b(ks.$$.fragment,e),b(Ts.$$.fragment,e),b(Ls.$$.fragment,e),b(Ss.$$.fragment,e),b(qs.$$.fragment,e),b(Gs.$$.fragment,e),b(Fs.$$.fragment,e),b(Cs.$$.fragment,e),b(zs.$$.fragment,e),b(Xs.$$.fragment,e),Wd=!1},d(e){t(k),e&&t(T),e&&t(x),_(Zt),e&&t(Ip),e&&t(L),e&&t(qp),e&&t(Ys),e&&t(Gp),e&&t($),e&&t(Np),e&&t(P),e&&t(Bp),e&&t(Qs),e&&t(Fp),e&&t(ei),e&&t(Dp),e&&t(ti),e&&t(Hp),e&&t(ai),e&&t(Cp),e&&t(si),e&&t(zp),e&&t(ii),e&&t(jp),e&&t(ge),_(Jt),e&&t(Up),e&&t(ri),e&&t(Xp),_(Qt,e),e&&t(Vp),e&&t(be),_(ea),e&&t(Yp),e&&t(q),e&&t(Zp),e&&t(ta),e&&t(Kp),e&&t(mi),e&&t(Op),e&&t(ci),e&&t(Wp),e&&t(_e),_(sa),e&&t(Jp),e&&t(G),e&&t(Qp),e&&t(ia),e&&t(em),e&&t(wi),e&&t(tm),e&&t(Ei),e&&t(am),e&&t(we),_(la),e&&t(sm),e&&t(N),e&&t(im),e&&t(na),e&&t(rm),e&&t(Mi),e&&t(lm),e&&t(Pi),e&&t(nm),e&&t(Ee),_(ha),e&&t(om),e&&t(B),e&&t(hm),e&&t(fa),e&&t(fm),e&&t(Ni),e&&t(pm),e&&t(Bi),e&&t(mm),e&&t(Fi),e&&t(cm),e&&t(Di),e&&t(um),e&&t(Hi),e&&t(dm),e&&t(ye),_(ma),e&&t(vm),e&&t(F),e&&t(gm),e&&t(ca),e&&t(bm),e&&t(Yi),e&&t(_m),e&&t(S),e&&t(wm),e&&t(Oi),e&&t(Em),_(lt,e),e&&t(ym),e&&t(Wi),e&&t(km),e&&t(ke),_(ga),e&&t(xm),e&&t(D),e&&t(Am),e&&t(ba),e&&t(Tm),e&&t(ir),e&&t($m),e&&t(rr),e&&t(Mm),e&&t(lr),e&&t(Pm),e&&t(nr),e&&t(Lm),e&&t(xe),_(wa),e&&t(Sm),e&&t(or),e&&t(Rm),_(Ea,e),e&&t(Im),e&&t(Ae),_(ya),e&&t(qm),e&&t(H),e&&t(Gm),e&&t(ka),e&&t(Nm),e&&t(dr),e&&t(Bm),e&&t(oe),e&&t(Fm),e&&t(vr),e&&t(Dm),e&&t(gr),e&&t(Hm),e&&t(Te),_(Aa),e&&t(Cm),e&&t(C),e&&t(zm),e&&t(Ta),e&&t(jm),e&&t(xr),e&&t(Um),e&&t(he),e&&t(Xm),e&&t(Ar),e&&t(Vm),e&&t($e),_(Ma),e&&t(Ym),e&&t(z),e&&t(Zm),e&&t(Pa),e&&t(Km),e&&t(Rr),e&&t(Om),e&&t(R),e&&t(Wm),e&&t(Ir),e&&t(Jm),e&&t(Me),_(Sa),e&&t(Qm),e&&t(j),e&&t(ec),e&&t(Ra),e&&t(tc),e&&t(Hr),e&&t(ac),e&&t(fe),e&&t(sc),e&&t(Cr),e&&t(ic),e&&t(Pe),_(qa),e&&t(rc),e&&t(U),e&&t(lc),e&&t(Ga),e&&t(nc),e&&t(Zr),e&&t(oc),e&&t(Kr),e&&t(hc),e&&t(Le),_(Ba),e&&t(fc),e&&t(X),e&&t(pc),e&&t(Fa),e&&t(mc),e&&t(al),e&&t(cc),e&&t(pe),e&&t(uc),e&&t(I),e&&t(dc),e&&t(sl),e&&t(vc),e&&t(Se),_(Ha),e&&t(gc),e&&t(V),e&&t(bc),e&&t(Ca),e&&t(_c),e&&t(fl),e&&t(wc),e&&t(pl),e&&t(Ec),e&&t(Re),_(ja),e&&t(yc),e&&t(Y),e&&t(kc),e&&t(Ua),e&&t(xc),e&&t(bl),e&&t(Ac),e&&t(_l),e&&t(Tc),e&&t(Ie),_(Va),e&&t($c),e&&t(Z),e&&t(Mc),e&&t(Ya),e&&t(Pc),e&&t(Tl),e&&t(Lc),e&&t($l),e&&t(Sc),e&&t(qe),_(Ka),e&&t(Rc),e&&t(K),e&&t(Ic),e&&t(Oa),e&&t(qc),e&&t(ql),e&&t(Gc),e&&t(Gl),e&&t(Nc),e&&t(Nl),e&&t(Bc),e&&t(Bl),e&&t(Fc),e&&t(Fl),e&&t(Dc),e&&t(Ge),_(Ja),e&&t(Hc),e&&t(O),e&&t(Cc),e&&t(Qa),e&&t(zc),e&&t(wt),e&&t(jc),e&&t(Vl),e&&t(Uc),_(Et,e),e&&t(Xc),e&&t(Yl),e&&t(Vc),e&&t(Zl),e&&t(Yc),e&&t(Ne),_(ts),e&&t(Zc),e&&t(Kl),e&&t(Kc),_(as,e),e&&t(Oc),e&&t(Be),_(ss),e&&t(Wc),e&&t(W),e&&t(Jc),e&&t(is),e&&t(Qc),e&&t(an),e&&t(eu),e&&t(M),e&&t(tu),e&&t(sn),e&&t(au),e&&t(Fe),_(ls),e&&t(su),e&&t(J),e&&t(iu),e&&t(ns),e&&t(ru),e&&t(pn),e&&t(lu),e&&t(At),e&&t(nu),e&&t(mn),e&&t(ou),e&&t(cn),e&&t(hu),e&&t(De),_(hs),e&&t(fu),e&&t(Q),e&&t(pu),e&&t(fs),e&&t(mu),e&&t(wn),e&&t(cu),e&&t(En),e&&t(uu),e&&t(He),_(ms),e&&t(du),e&&t(ee),e&&t(vu),e&&t(cs),e&&t(gu),e&&t(Mn),e&&t(bu),e&&t(Pn),e&&t(_u),e&&t(Ln),e&&t(wu),e&&t(Sn),e&&t(Eu),e&&t(Rn),e&&t(yu),e&&t(Ce),_(ds),e&&t(ku),e&&t(te),e&&t(xu),e&&t(vs),e&&t(Au),e&&t(Dn),e&&t(Tu),e&&t(Hn),e&&t($u),e&&t(ze),_(bs),e&&t(Mu),e&&t(ae),e&&t(Pu),e&&t(_s),e&&t(Lu),e&&t(Yn),e&&t(Su),e&&t(Zn),e&&t(Ru),e&&t(Lt),e&&t(Iu),e&&t(me),e&&t(qu),e&&t(je),_(ks),e&&t(Gu),e&&t(se),e&&t(Nu),e&&t(xs),e&&t(Bu),e&&t(ce),e&&t(Fu),e&&t(to),e&&t(Du),e&&t(Ue),_(Ts),e&&t(Hu),e&&t(ie),e&&t(Cu),e&&t($s),e&&t(zu),e&&t(It),e&&t(ju),e&&t(oo),e&&t(Uu),e&&t(ho),e&&t(Xu),e&&t(Xe),_(Ls),e&&t(Vu),e&&t(fo),e&&t(Yu),e&&t(Ve),_(Ss),e&&t(Zu),e&&t(Rs),e&&t(Ku),e&&t(po),e&&t(Ou),e&&t(mo),e&&t(Wu),e&&t(co),e&&t(Ju),e&&t(uo),e&&t(Qu),e&&t(Ye),_(qs),e&&t(ed),e&&t(vo),e&&t(td),e&&t(Ze),_(Gs),e&&t(ad),e&&t(re),e&&t(sd),e&&t(Ns),e&&t(id),e&&t(ko),e&&t(rd),e&&t(xo),e&&t(ld),e&&t(ue),e&&t(nd),e&&t(Ao),e&&t(od),e&&t(Ke),_(Fs),e&&t(hd),e&&t(Oe),e&&t(fd),e&&t(Ds),e&&t(pd),e&&t(Lo),e&&t(md),e&&t(So),e&&t(cd),e&&t(We),_(Cs),e&&t(ud),e&&t(Je),_(zs),e&&t(dd),e&&t(Ro),e&&t(vd),e&&t(Io),e&&t(gd),e&&t(qo),e&&t(bd),e&&t(js),e&&t(_d),e&&t(No),e&&t(wd),e&&t(Bo),e&&t(Ed),e&&t(Us),e&&t(yd),e&&t(Do),e&&t(kd),e&&t(Ct),e&&t(xd),e&&t(Ho),e&&t(Ad),e&&t(Qe),_(Xs),e&&t(Td),e&&t(Co),e&&t($d),e&&t(zo),e&&t(Md),e&&t(E)}}}const H9={local:"summary-of-the-models",sections:[{local:"decoders-or-autoregressive-models",sections:[{local:"original-gpt",title:"Original GPT"},{local:"gpt2",title:"GPT-2"},{local:"ctrl",title:"CTRL"},{local:"transformerxl",title:"Transformer-XL"},{local:"reformer",title:"Reformer"},{local:"xlnet",title:"XLNet"}],title:"Decoders or autoregressive models"},{local:"encoders-or-autoencoding-models",sections:[{local:"bert",title:"BERT"},{local:"albert",title:"ALBERT"},{local:"roberta",title:"RoBERTa"},{local:"distilbert",title:"DistilBERT"},{local:"convbert",title:"ConvBERT"},{local:"xlm",title:"XLM"},{local:"xlmroberta",title:"XLM-RoBERTa"},{local:"flaubert",title:"FlauBERT"},{local:"electra",title:"ELECTRA"},{local:"funnel-transformer",title:"Funnel Transformer"},{local:"longformer",title:"Longformer"}],title:"Encoders or autoencoding models"},{local:"sequencetosequence-models",sections:[{local:"bart",title:"BART"},{local:"pegasus",title:"Pegasus"},{local:"marianmt",title:"MarianMT"},{local:"t5",title:"T5"},{local:"mt5",title:"MT5"},{local:"mbart",title:"MBart"},{local:"prophetnet",title:"ProphetNet"},{local:"xlmprophetnet",title:"XLM-ProphetNet"}],title:"Sequence-to-sequence models"},{local:"multimodal-models",sections:[{local:"mmbt",title:"MMBT"}],title:"Multimodal models"},{local:"retrievalbased-models",sections:[{local:"dpr",title:"DPR"},{local:"rag",title:"RAG"}],title:"Retrieval-based models"},{local:"more-technical-aspects",sections:[{local:"full-vs-sparse-attention",title:"Full vs sparse attention"},{local:"other-tricks",title:"Other tricks"}],title:"More technical aspects"}],title:"Summary of the models"};function C9(Yt,k,T){let{fw:x}=k;return Yt.$$set=A=>{"fw"in A&&T(0,x=A.fw)},[x]}class V9 extends I9{constructor(k){super();q9(this,k,C9,D9,G9,{fw:0})}}export{V9 as default,H9 as metadata};
276
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/create_a_model.mdx-44804e70.js
import{S as ul,i as cl,s as ml,e as n,k as f,w as _,t as a,M as dl,c as i,d as s,m as u,a as l,x as g,h as r,b as c,F as t,g as p,y as v,q as b,o as y,B as $}from"../chunks/vendor-4833417e.js";import{T as ir}from"../chunks/Tip-fffd6df1.js";import{I as nt}from"../chunks/IconCopyLink-4b81c553.js";import{C}from"../chunks/CodeBlock-6a3d1b46.js";import{C as xs}from"../chunks/CodeBlockFw-27a176a0.js";import"../chunks/CopyButton-dacfbfaf.js";function hl(M){let m,j,d,k,q;return{c(){m=n("p"),j=a("You can also save your configuration file as a dictionary or even just the difference between your custom configuration attributes and the default configuration attributes! See the "),d=n("a"),k=a("configuration"),q=a(" documentation for more details."),this.h()},l(h){m=i(h,"P",{});var w=l(m);j=r(w,"You can also save your configuration file as a dictionary or even just the difference between your custom configuration attributes and the default configuration attributes! See the "),d=i(w,"A",{href:!0});var E=l(d);k=r(E,"configuration"),E.forEach(s),q=r(w," documentation for more details."),w.forEach(s),this.h()},h(){c(d,"href","main_classes/configuration")},m(h,w){p(h,m,w),t(m,j),t(m,d),t(d,k),t(m,q)},d(h){h&&s(m)}}}function _l(M){let m,j,d,k,q;return{c(){m=n("p"),j=a("Not every model supports a fast tokenizer. Take a look at this "),d=n("a"),k=a("table"),q=a(" to check if a model has fast tokenizer support."),this.h()},l(h){m=i(h,"P",{});var w=l(m);j=r(w,"Not every model supports a fast tokenizer. Take a look at this "),d=i(w,"A",{href:!0});var E=l(d);k=r(E,"table"),E.forEach(s),q=r(w," to check if a model has fast tokenizer support."),w.forEach(s),this.h()},h(){c(d,"href","index#supported-frameworks")},m(h,w){p(h,m,w),t(m,j),t(m,d),t(d,k),t(m,q)},d(h){h&&s(m)}}}function gl(M){let m,j,d,k,q,h,w,E,L,J,z;return{c(){m=n("p"),j=a("By default, "),d=n("a"),k=a("AutoTokenizer"),q=a(" will try to load a fast tokenizer. You can disable this behavior by setting "),h=n("code"),w=a("use_fast=False"),E=a(" in "),L=n("code"),J=a("from_pretrained"),z=a("."),this.h()},l(O){m=i(O,"P",{});var x=l(m);j=r(x,"By default, "),d=i(x,"A",{href:!0});var X=l(d);k=r(X,"AutoTokenizer"),X.forEach(s),q=r(x," will try to load a fast tokenizer. You can disable this behavior by setting "),h=i(x,"CODE",{});var it=l(h);w=r(it,"use_fast=False"),it.forEach(s),E=r(x," in "),L=i(x,"CODE",{});var lt=l(L);J=r(lt,"from_pretrained"),lt.forEach(s),z=r(x,"."),x.forEach(s),this.h()},h(){c(d,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer")},m(O,x){p(O,m,x),t(m,j),t(m,d),t(d,k),t(m,q),t(m,h),t(h,w),t(m,E),t(m,L),t(L,J),t(m,z)},d(O){O&&s(m)}}}function vl(M){let m,j,d,k,q;return{c(){m=n("p"),j=a("If you aren\u2019t looking for any customization, just use the "),d=n("code"),k=a("from_pretrained"),q=a(" method to load a model\u2019s default feature extractor parameters.")},l(h){m=i(h,"P",{});var w=l(m);j=r(w,"If you aren\u2019t looking for any customization, just use the "),d=i(w,"CODE",{});var E=l(d);k=r(E,"from_pretrained"),E.forEach(s),q=r(w," method to load a model\u2019s default feature extractor parameters."),w.forEach(s)},m(h,w){p(h,m,w),t(m,j),t(m,d),t(d,k),t(m,q)},d(h){h&&s(m)}}}function bl(M){let m,j,d,k,q,h,w,E,L,J,z,O,x,X,it,lt,Qt,lr,pr,Ht,fr,ur,zs,D,Ut,cr,mr,Gt,dr,hr,Jt,_r,gr,Xt,vr,br,Kt,yr,Fs,N,K,Zt,Te,$r,es,wr,Ds,F,kr,pt,jr,qr,ts,Er,Tr,ss,xr,zr,as,Fr,Dr,rs,Br,Cr,Bs,V,Ar,ft,Pr,Mr,ut,Vr,Sr,Cs,xe,As,R,ct,Ir,Wr,mt,Lr,Or,Ps,Z,ze,Nr,os,Rr,Yr,Qr,Fe,Hr,ns,Ur,Gr,Ms,De,Vs,ee,Jr,dt,Xr,Kr,Ss,Be,Is,te,Zr,ht,eo,to,Ws,Ce,Ls,se,so,_t,ao,ro,Os,Ae,Ns,ae,Rs,Y,re,is,Pe,oo,ls,no,Ys,T,io,gt,lo,po,ps,fo,uo,vt,co,mo,Me,fs,ho,_o,Ve,us,go,vo,Se,cs,bo,yo,Qs,bt,$o,Hs,Ie,Us,yt,wo,Gs,oe,ko,$t,jo,qo,Js,We,Xs,wt,Eo,Ks,Le,Zs,Q,ne,ms,Oe,To,ds,xo,ea,ie,zo,hs,Fo,Do,ta,le,Bo,kt,Co,Ao,sa,Ne,aa,pe,Po,jt,Mo,Vo,ra,Re,oa,H,fe,_s,Ye,So,gs,Io,na,ue,Wo,qt,Lo,Oo,ia,ce,Et,Tt,No,Ro,Yo,S,xt,Qo,Ho,Qe,Uo,Go,vs,Jo,Xo,la,zt,Ko,pa,me,fa,de,Zo,bs,en,tn,ua,He,ca,he,sn,Ft,an,rn,ma,Ue,da,_e,on,Dt,nn,ln,ha,Ge,_a,ge,ga,U,ve,ys,Je,pn,$s,fn,va,A,un,Bt,cn,mn,Ct,dn,hn,At,_n,gn,ba,I,vn,Pt,bn,yn,Mt,$n,wn,ya,Xe,$a,be,wa,ye,kn,Vt,jn,qn,ka,Ke,ja,$e,En,St,Tn,xn,qa,Ze,Ea,G,we,ws,et,zn,ks,Fn,Ta,ke,Dn,It,Bn,Cn,xa,Wt,An,za,tt,Fa,Lt,Pn,Da,st,Ba,je,Mn,Ot,Vn,Sn,Ca,at,Aa,Nt,In,Pa;return h=new nt({}),Te=new nt({}),xe=new C({props:{code:`from transformers import DistilBertConfig config = DistilBertConfig() print(config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = DistilBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(config) DistilBertConfig { <span class="hljs-string">&quot;activation&quot;</span>: <span class="hljs-string">&quot;gelu&quot;</span>, <span class="hljs-string">&quot;attention_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;dim&quot;</span>: <span class="hljs-number">768</span>, <span class="hljs-string">&quot;dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;hidden_dim&quot;</span>: <span class="hljs-number">3072</span>, <span class="hljs-string">&quot;initializer_range&quot;</span>: <span class="hljs-number">0.02</span>, <span class="hljs-string">&quot;max_position_embeddings&quot;</span>: <span class="hljs-number">512</span>, <span class="hljs-string">&quot;model_type&quot;</span>: <span class="hljs-string">&quot;distilbert&quot;</span>, <span class="hljs-string">&quot;n_heads&quot;</span>: <span class="hljs-number">12</span>, <span class="hljs-string">&quot;n_layers&quot;</span>: <span class="hljs-number">6</span>, <span class="hljs-string">&quot;pad_token_id&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;qa_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;seq_classif_dropout&quot;</span>: <span class="hljs-number">0.2</span>, <span class="hljs-string">&quot;sinusoidal_pos_embds&quot;</span>: false, <span class="hljs-string">&quot;transformers_version&quot;</span>: <span class="hljs-string">&quot;4.16.2&quot;</span>, <span class="hljs-string">&quot;vocab_size&quot;</span>: <span class="hljs-number">30522</span> }`}}),De=new C({props:{code:`my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) print(my_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig(activation=<span class="hljs-string">&quot;relu&quot;</span>, attention_dropout=<span class="hljs-number">0.4</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(my_config) DistilBertConfig { <span class="hljs-string">&quot;activation&quot;</span>: <span class="hljs-string">&quot;relu&quot;</span>, <span class="hljs-string">&quot;attention_dropout&quot;</span>: <span class="hljs-number">0.4</span>, <span class="hljs-string">&quot;dim&quot;</span>: <span class="hljs-number">768</span>, <span class="hljs-string">&quot;dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;hidden_dim&quot;</span>: <span class="hljs-number">3072</span>, <span class="hljs-string">&quot;initializer_range&quot;</span>: <span class="hljs-number">0.02</span>, <span class="hljs-string">&quot;max_position_embeddings&quot;</span>: <span class="hljs-number">512</span>, <span class="hljs-string">&quot;model_type&quot;</span>: <span class="hljs-string">&quot;distilbert&quot;</span>, <span class="hljs-string">&quot;n_heads&quot;</span>: <span class="hljs-number">12</span>, <span class="hljs-string">&quot;n_layers&quot;</span>: <span class="hljs-number">6</span>, <span class="hljs-string">&quot;pad_token_id&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;qa_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;seq_classif_dropout&quot;</span>: <span class="hljs-number">0.2</span>, <span class="hljs-string">&quot;sinusoidal_pos_embds&quot;</span>: false, <span class="hljs-string">&quot;transformers_version&quot;</span>: <span class="hljs-string">&quot;4.16.2&quot;</span>, <span class="hljs-string">&quot;vocab_size&quot;</span>: <span class="hljs-number">30522</span> }`}}),Be=new C({props:{code:'my_config = DistilBertConfig.from_pretrained("distilbert-base-uncased", activation="relu", attention_dropout=0.4)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, activation=<span class="hljs-string">&quot;relu&quot;</span>, attention_dropout=<span class="hljs-number">0.4</span>)'}}),Ce=new C({props:{code:'my_config.save_pretrained(save_directory="./your_model_save_path")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>my_config.save_pretrained(save_directory=<span class="hljs-string">&quot;./your_model_save_path&quot;</span>)'}}),Ae=new C({props:{code:'my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>)'}}),ae=new ir({props:{$$slots:{default:[hl]},$$scope:{ctx:M}}}),Pe=new nt({}),Ie=new xs({props:{group1:{id:"pt",code:`from transformers import DistilBertModel my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") model = DistilBertModel(my_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel(my_config)`},group2:{id:"tf",code:`from transformers import TFDistilBertModel my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") tf_model = TFDistilBertModel(my_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel(my_config)`}}}),We=new xs({props:{group1:{id:"pt",code:'model = DistilBertModel.from_pretrained("distilbert-base-uncased")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)'},group2:{id:"tf",code:'tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)'}}}),Le=new xs({props:{group1:{id:"pt",code:'model = DistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, config=my_config)'},group2:{id:"tf",code:'tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, config=my_config)'}}}),Oe=new nt({}),Ne=new xs({props:{group1:{id:"pt",code:`from transformers import DistilBertForSequenceClassification model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`},group2:{id:"tf",code:`from transformers import TFDistilBertForSequenceClassification tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}}),Re=new xs({props:{group1:{id:"pt",code:`from transformers import DistilBertForQuestionAnswering model = DistilBertForQuestionAnswering.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`},group2:{id:"tf",code:`from transformers import TFDistilBertForQuestionAnswering tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}}),Ye=new nt({}),me=new ir({props:{warning:"&lcub;true}",$$slots:{default:[_l]},$$scope:{ctx:M}}}),He=new C({props:{code:`from transformers import DistilBertTokenizer my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>my_tokenizer = DistilBertTokenizer(vocab_file=<span class="hljs-string">&quot;my_vocab_file.txt&quot;</span>, do_lower_case=<span class="hljs-literal">False</span>, padding_side=<span class="hljs-string">&quot;left&quot;</span>)`}}),Ue=new C({props:{code:`from transformers import DistilBertTokenizer slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>slow_tokenizer = DistilBertTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Ge=new C({props:{code:`from transformers import DistilBertTokenizerFast fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = DistilBertTokenizerFast.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),ge=new ir({props:{$$slots:{default:[gl]},$$scope:{ctx:M}}}),Je=new nt({}),Xe=new C({props:{code:`from transformers import ViTFeatureExtractor vit_extractor = ViTFeatureExtractor() print(vit_extractor)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>vit_extractor = ViTFeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(vit_extractor) ViTFeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: true, <span class="hljs-string">&quot;do_resize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;ViTFeatureExtractor&quot;</span>, <span class="hljs-string">&quot;image_mean&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;image_std&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;resample&quot;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&quot;size&quot;</span>: <span class="hljs-number">224</span> }`}}),be=new ir({props:{$$slots:{default:[vl]},$$scope:{ctx:M}}}),Ke=new C({props:{code:`from transformers import ViTFeatureExtractor my_vit_extractor = ViTFeatureExtractor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) print(my_vit_extractor)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>my_vit_extractor = ViTFeatureExtractor(resample=<span class="hljs-string">&quot;PIL.Image.BOX&quot;</span>, do_normalize=<span class="hljs-literal">False</span>, image_mean=[<span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(my_vit_extractor) ViTFeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: false, <span class="hljs-string">&quot;do_resize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;ViTFeatureExtractor&quot;</span>, <span class="hljs-string">&quot;image_mean&quot;</span>: [ <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span> ], <span class="hljs-string">&quot;image_std&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;resample&quot;</span>: <span class="hljs-string">&quot;PIL.Image.BOX&quot;</span>, <span class="hljs-string">&quot;size&quot;</span>: <span class="hljs-number">224</span> }`}}),Ze=new C({props:{code:`from transformers import Wav2Vec2FeatureExtractor w2v2_extractor = Wav2Vec2FeatureExtractor() print(w2v2_extractor)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>w2v2_extractor = Wav2Vec2FeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(w2v2_extractor) Wav2Vec2FeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;Wav2Vec2FeatureExtractor&quot;</span>, <span class="hljs-string">&quot;feature_size&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;padding_side&quot;</span>: <span class="hljs-string">&quot;right&quot;</span>, <span class="hljs-string">&quot;padding_value&quot;</span>: <span class="hljs-number">0.0</span>, <span class="hljs-string">&quot;return_attention_mask&quot;</span>: false, <span class="hljs-string">&quot;sampling_rate&quot;</span>: <span class="hljs-number">16000</span> }`}}),et=new nt({}),tt=new C({props:{code:`from transformers import Wav2Vec2FeatureExtractor feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor(padding_value=<span class="hljs-number">1.0</span>, do_normalize=<span class="hljs-literal">True</span>)`}}),st=new C({props:{code:`from transformers import Wav2Vec2CTCTokenizer tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2CTCTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Wav2Vec2CTCTokenizer(vocab_file=<span class="hljs-string">&quot;my_vocab_file.txt&quot;</span>)`}}),at=new C({props:{code:`from transformers import Wav2Vec2Processor processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)`}}),{c(){m=n("meta"),j=f(),d=n("h1"),k=n("a"),q=n("span"),_(h.$$.fragment),w=f(),E=n("span"),L=a("Create a custom model"),J=f(),z=n("p"),O=a("An "),x=n("a"),X=n("code"),it=a("AutoClass"),lt=a(" automatically infers the model architecture and downloads pretrained configuration and weights. Generally, we recommend using an "),Qt=n("code"),lr=a("AutoClass"),pr=a(" to produce checkpoint-agnostic code. But users who want more control over specific model parameters can create a custom \u{1F917} Transformers model from just a few base classes. This could be particularly useful for anyone who is interested in studying, training or experimenting with a \u{1F917} Transformers model. In this guide, dive deeper into creating a custom model without an "),Ht=n("code"),fr=a("AutoClass"),ur=a(". Learn how to:"),zs=f(),D=n("ul"),Ut=n("li"),cr=a("Load and customize a model configuration."),mr=f(),Gt=n("li"),dr=a("Create a model architecture."),hr=f(),Jt=n("li"),_r=a("Create a slow and fast tokenizer for text."),gr=f(),Xt=n("li"),vr=a("Create a feature extractor for audio or image tasks."),br=f(),Kt=n("li"),yr=a("Create a processor for multimodal tasks."),Fs=f(),N=n("h2"),K=n("a"),Zt=n("span"),_(Te.$$.fragment),$r=f(),es=n("span"),wr=a("Configuration"),Ds=f(),F=n("p"),kr=a("A "),pt=n("a"),jr=a("configuration"),qr=a(" refers to a model\u2019s specific attributes. Each model configuration has different attributes; for instance, all NLP models have the "),ts=n("code"),Er=a("hidden_size"),Tr=a(", "),ss=n("code"),xr=a("num_attention_heads"),zr=a(", "),as=n("code"),Fr=a("num_hidden_layers"),Dr=a(" and "),rs=n("code"),Br=a("vocab_size"),Cr=a(" attributes in common. These attributes specify the number of attention heads or hidden layers to construct a model with."),Bs=f(),V=n("p"),Ar=a("Get a closer look at "),ft=n("a"),Pr=a("DistilBERT"),Mr=a(" by accessing "),ut=n("a"),Vr=a("DistilBertConfig"),Sr=a(" to inspect it\u2019s attributes:"),Cs=f(),_(xe.$$.fragment),As=f(),R=n("p"),ct=n("a"),Ir=a("DistilBertConfig"),Wr=a(" displays all the default attributes used to build a base "),mt=n("a"),Lr=a("DistilBertModel"),Or=a(". All attributes are customizable, creating space for experimentation. For example, you can customize a default model to:"),Ps=f(),Z=n("ul"),ze=n("li"),Nr=a("Try a different activation function with the "),os=n("code"),Rr=a("activation"),Yr=a(" parameter."),Qr=f(),Fe=n("li"),Hr=a("Use a higher dropout ratio for the attention probabilities with the "),ns=n("code"),Ur=a("attention_dropout"),Gr=a(" parameter."),Ms=f(),_(De.$$.fragment),Vs=f(),ee=n("p"),Jr=a("Pretrained model attributes can be modified in the "),dt=n("a"),Xr=a("from_pretrained()"),Kr=a(" function:"),Ss=f(),_(Be.$$.fragment),Is=f(),te=n("p"),Zr=a("Once you are satisfied with your model configuration, you can save it with "),ht=n("a"),eo=a("save_pretrained()"),to=a(". Your configuration file is stored as a JSON file in the specified save directory:"),Ws=f(),_(Ce.$$.fragment),Ls=f(),se=n("p"),so=a("To reuse the configuration file, load it with "),_t=n("a"),ao=a("from_pretrained()"),ro=a(":"),Os=f(),_(Ae.$$.fragment),Ns=f(),_(ae.$$.fragment),Rs=f(),Y=n("h2"),re=n("a"),is=n("span"),_(Pe.$$.fragment),oo=f(),ls=n("span"),no=a("Model"),Ys=f(),T=n("p"),io=a("The next step is to create a "),gt=n("a"),lo=a("model"),po=a(". The model - also loosely referred to as the architecture - defines what each layer is doing and what operations are happening. Attributes like "),ps=n("code"),fo=a("num_hidden_layers"),uo=a(" from the configuration are used to define the architecture. Every model shares the base class "),vt=n("a"),co=a("PreTrainedModel"),mo=a(" and a few common methods like resizing input embeddings and pruning self-attention heads. In addition, all models are also either a "),Me=n("a"),fs=n("code"),ho=a("torch.nn.Module"),_o=a(", "),Ve=n("a"),us=n("code"),go=a("tf.keras.Model"),vo=a(" or "),Se=n("a"),cs=n("code"),bo=a("flax.linen.Module"),yo=a(" subclass. This means models are compatible with each of their respective framework\u2019s usage."),Qs=f(),bt=n("p"),$o=a("Load your custom configuration attributes into the model:"),Hs=f(),_(Ie.$$.fragment),Us=f(),yt=n("p"),wo=a("This creates a model with random values instead of pretrained weights. You won\u2019t be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training."),Gs=f(),oe=n("p"),ko=a("Create a pretrained model with "),$t=n("a"),jo=a("from_pretrained()"),qo=a(":"),Js=f(),_(We.$$.fragment),Xs=f(),wt=n("p"),Eo=a("When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by \u{1F917} Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you\u2019d like:"),Ks=f(),_(Le.$$.fragment),Zs=f(),Q=n("h3"),ne=n("a"),ms=n("span"),_(Oe.$$.fragment),To=f(),ds=n("span"),xo=a("Model heads"),ea=f(),ie=n("p"),zo=a("At this point, you have a base DistilBERT model which outputs the "),hs=n("em"),Fo=a("hidden states"),Do=a(". The hidden states are passed as inputs to a model head to produce the final output. \u{1F917} Transformers provides a different model head for each task as long as a model supports the task (i.e., you can\u2019t use DistilBERT for a sequence-to-sequence task like translation)."),ta=f(),le=n("p"),Bo=a("For example, "),kt=n("a"),Co=a("DistilBertForSequenceClassification"),Ao=a(" is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs."),sa=f(),_(Ne.$$.fragment),aa=f(),pe=n("p"),Po=a("Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the "),jt=n("a"),Mo=a("DistilBertForQuestionAnswering"),Vo=a(" model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output."),ra=f(),_(Re.$$.fragment),oa=f(),H=n("h2"),fe=n("a"),_s=n("span"),_(Ye.$$.fragment),So=f(),gs=n("span"),Io=a("Tokenizer"),na=f(),ue=n("p"),Wo=a("The last base class you need before using a model for textual data is a "),qt=n("a"),Lo=a("tokenizer"),Oo=a(" to convert raw text to tensors. There are two types of tokenizers you can use with \u{1F917} Transformers:"),ia=f(),ce=n("ul"),Et=n("li"),Tt=n("a"),No=a("PreTrainedTokenizer"),Ro=a(": a Python implementation of a tokenizer."),Yo=f(),S=n("li"),xt=n("a"),Qo=a("PreTrainedTokenizerFast"),Ho=a(": a tokenizer from our Rust-based "),Qe=n("a"),Uo=a("\u{1F917} Tokenizer"),Go=a(" library. This tokenizer type is significantly faster - especially during batch tokenization - due to it\u2019s Rust implementation. The fast tokenizer also offers additional methods like "),vs=n("em"),Jo=a("offset mapping"),Xo=a(" which maps tokens to their original words or characters."),la=f(),zt=n("p"),Ko=a("Both tokenizers support common methods such as encoding and decoding, adding new tokens, and managing special tokens."),pa=f(),_(me.$$.fragment),fa=f(),de=n("p"),Zo=a("If you trained your own tokenizer, you can create one from your "),bs=n("em"),en=a("vocabulary"),tn=a(" file:"),ua=f(),_(He.$$.fragment),ca=f(),he=n("p"),sn=a("It is important to remember the vocabulary from a custom tokenizer will be different from the vocabulary generated by a pretrained model\u2019s tokenizer. You need to use a pretrained model\u2019s vocabulary if you are using a pretrained model, otherwise the inputs won\u2019t make sense. Create a tokenizer with a pretrained model\u2019s vocabulary with the "),Ft=n("a"),an=a("DistilBertTokenizer"),rn=a(" class:"),ma=f(),_(Ue.$$.fragment),da=f(),_e=n("p"),on=a("Create a fast tokenizer with the "),Dt=n("a"),nn=a("DistilBertTokenizerFast"),ln=a(" class:"),ha=f(),_(Ge.$$.fragment),_a=f(),_(ge.$$.fragment),ga=f(),U=n("h2"),ve=n("a"),ys=n("span"),_(Je.$$.fragment),pn=f(),$s=n("span"),fn=a("Feature Extractor"),va=f(),A=n("p"),un=a("A feature extractor processes audio or image inputs. It inherits from the base "),Bt=n("a"),cn=a("FeatureExtractionMixin"),mn=a(" class, and may also inherit from the "),Ct=n("a"),dn=a("ImageFeatureExtractionMixin"),hn=a(" class for processing image features or the "),At=n("a"),_n=a("SequenceFeatureExtractor"),gn=a(" class for processing audio inputs."),ba=f(),I=n("p"),vn=a("Depending on whether you are working on an audio or vision task, create a feature extractor associated with the model you\u2019re using. For example, create a default "),Pt=n("a"),bn=a("ViTFeatureExtractor"),yn=a(" if you are using "),Mt=n("a"),$n=a("ViT"),wn=a(" for image classification:"),ya=f(),_(Xe.$$.fragment),$a=f(),_(be.$$.fragment),wa=f(),ye=n("p"),kn=a("Modify any of the "),Vt=n("a"),jn=a("ViTFeatureExtractor"),qn=a(" parameters to create your custom feature extractor:"),ka=f(),_(Ke.$$.fragment),ja=f(),$e=n("p"),En=a("For audio inputs, you can create a "),St=n("a"),Tn=a("Wav2Vec2FeatureExtractor"),xn=a(" and customize the parameters in a similar way:"),qa=f(),_(Ze.$$.fragment),Ea=f(),G=n("h2"),we=n("a"),ws=n("span"),_(et.$$.fragment),zn=f(),ks=n("span"),Fn=a("Processor"),Ta=f(),ke=n("p"),Dn=a("For models that support multimodal tasks, \u{1F917} Transformers offers a processor class that conveniently wraps a feature extractor and tokenizer into a single object. For example, let\u2019s use the "),It=n("a"),Bn=a("Wav2Vec2Processor"),Cn=a(" for an automatic speech recognition task (ASR). ASR transcribes audio to text, so you will need a feature extractor and a tokenizer."),xa=f(),Wt=n("p"),An=a("Create a feature extractor to handle the audio inputs:"),za=f(),_(tt.$$.fragment),Fa=f(),Lt=n("p"),Pn=a("Create a tokenizer to handle the text inputs:"),Da=f(),_(st.$$.fragment),Ba=f(),je=n("p"),Mn=a("Combine the feature extractor and tokenizer in "),Ot=n("a"),Vn=a("Wav2Vec2Processor"),Sn=a(":"),Ca=f(),_(at.$$.fragment),Aa=f(),Nt=n("p"),In=a("With two basic classes - configuration and model - and an additional preprocessing class (tokenizer, feature extractor, or processor), you can create any of the models supported by \u{1F917} Transformers. Each of these base classes are configurable, allowing you to use the specific attributes you want. You can easily setup a model for training or modify an existing pretrained model to fine-tune."),this.h()},l(e){const o=dl('[data-svelte="svelte-1phssyn"]',document.head);m=i(o,"META",{name:!0,content:!0}),o.forEach(s),j=u(e),d=i(e,"H1",{class:!0});var rt=l(d);k=i(rt,"A",{id:!0,class:!0,href:!0});var js=l(k);q=i(js,"SPAN",{});var qs=l(q);g(h.$$.fragment,qs),qs.forEach(s),js.forEach(s),w=u(rt),E=i(rt,"SPAN",{});var Es=l(E);L=r(Es,"Create a custom model"),Es.forEach(s),rt.forEach(s),J=u(e),z=i(e,"P",{});var qe=l(z);O=r(qe,"An "),x=i(qe,"A",{href:!0});var Ln=l(x);X=i(Ln,"CODE",{});var On=l(X);it=r(On,"AutoClass"),On.forEach(s),Ln.forEach(s),lt=r(qe," automatically infers the model architecture and downloads pretrained configuration and weights. Generally, we recommend using an "),Qt=i(qe,"CODE",{});var Nn=l(Qt);lr=r(Nn,"AutoClass"),Nn.forEach(s),pr=r(qe," to produce checkpoint-agnostic code. But users who want more control over specific model parameters can create a custom \u{1F917} Transformers model from just a few base classes. This could be particularly useful for anyone who is interested in studying, training or experimenting with a \u{1F917} Transformers model. In this guide, dive deeper into creating a custom model without an "),Ht=i(qe,"CODE",{});var Rn=l(Ht);fr=r(Rn,"AutoClass"),Rn.forEach(s),ur=r(qe,". Learn how to:"),qe.forEach(s),zs=u(e),D=i(e,"UL",{});var W=l(D);Ut=i(W,"LI",{});var Yn=l(Ut);cr=r(Yn,"Load and customize a model configuration."),Yn.forEach(s),mr=u(W),Gt=i(W,"LI",{});var Qn=l(Gt);dr=r(Qn,"Create a model architecture."),Qn.forEach(s),hr=u(W),Jt=i(W,"LI",{});var Hn=l(Jt);_r=r(Hn,"Create a slow and fast tokenizer for text."),Hn.forEach(s),gr=u(W),Xt=i(W,"LI",{});var Un=l(Xt);vr=r(Un,"Create a feature extractor for audio or image tasks."),Un.forEach(s),br=u(W),Kt=i(W,"LI",{});var Gn=l(Kt);yr=r(Gn,"Create a processor for multimodal tasks."),Gn.forEach(s),W.forEach(s),Fs=u(e),N=i(e,"H2",{class:!0});var Ma=l(N);K=i(Ma,"A",{id:!0,class:!0,href:!0});var Jn=l(K);Zt=i(Jn,"SPAN",{});var Xn=l(Zt);g(Te.$$.fragment,Xn),Xn.forEach(s),Jn.forEach(s),$r=u(Ma),es=i(Ma,"SPAN",{});var Kn=l(es);wr=r(Kn,"Configuration"),Kn.forEach(s),Ma.forEach(s),Ds=u(e),F=i(e,"P",{});var P=l(F);kr=r(P,"A "),pt=i(P,"A",{href:!0});var Zn=l(pt);jr=r(Zn,"configuration"),Zn.forEach(s),qr=r(P," refers to a model\u2019s specific attributes. Each model configuration has different attributes; for instance, all NLP models have the "),ts=i(P,"CODE",{});var ei=l(ts);Er=r(ei,"hidden_size"),ei.forEach(s),Tr=r(P,", "),ss=i(P,"CODE",{});var ti=l(ss);xr=r(ti,"num_attention_heads"),ti.forEach(s),zr=r(P,", "),as=i(P,"CODE",{});var si=l(as);Fr=r(si,"num_hidden_layers"),si.forEach(s),Dr=r(P," and "),rs=i(P,"CODE",{});var ai=l(rs);Br=r(ai,"vocab_size"),ai.forEach(s),Cr=r(P," attributes in common. These attributes specify the number of attention heads or hidden layers to construct a model with."),P.forEach(s),Bs=u(e),V=i(e,"P",{});var Rt=l(V);Ar=r(Rt,"Get a closer look at "),ft=i(Rt,"A",{href:!0});var ri=l(ft);Pr=r(ri,"DistilBERT"),ri.forEach(s),Mr=r(Rt," by accessing "),ut=i(Rt,"A",{href:!0});var oi=l(ut);Vr=r(oi,"DistilBertConfig"),oi.forEach(s),Sr=r(Rt," to inspect it\u2019s attributes:"),Rt.forEach(s),Cs=u(e),g(xe.$$.fragment,e),As=u(e),R=i(e,"P",{});var Ts=l(R);ct=i(Ts,"A",{href:!0});var ni=l(ct);Ir=r(ni,"DistilBertConfig"),ni.forEach(s),Wr=r(Ts," displays all the default attributes used to build a base "),mt=i(Ts,"A",{href:!0});var ii=l(mt);Lr=r(ii,"DistilBertModel"),ii.forEach(s),Or=r(Ts,". All attributes are customizable, creating space for experimentation. For example, you can customize a default model to:"),Ts.forEach(s),Ps=u(e),Z=i(e,"UL",{});var Va=l(Z);ze=i(Va,"LI",{});var Sa=l(ze);Nr=r(Sa,"Try a different activation function with the "),os=i(Sa,"CODE",{});var li=l(os);Rr=r(li,"activation"),li.forEach(s),Yr=r(Sa," parameter."),Sa.forEach(s),Qr=u(Va),Fe=i(Va,"LI",{});var Ia=l(Fe);Hr=r(Ia,"Use a higher dropout ratio for the attention probabilities with the "),ns=i(Ia,"CODE",{});var pi=l(ns);Ur=r(pi,"attention_dropout"),pi.forEach(s),Gr=r(Ia," parameter."),Ia.forEach(s),Va.forEach(s),Ms=u(e),g(De.$$.fragment,e),Vs=u(e),ee=i(e,"P",{});var Wa=l(ee);Jr=r(Wa,"Pretrained model attributes can be modified in the "),dt=i(Wa,"A",{href:!0});var fi=l(dt);Xr=r(fi,"from_pretrained()"),fi.forEach(s),Kr=r(Wa," function:"),Wa.forEach(s),Ss=u(e),g(Be.$$.fragment,e),Is=u(e),te=i(e,"P",{});var La=l(te);Zr=r(La,"Once you are satisfied with your model configuration, you can save it with "),ht=i(La,"A",{href:!0});var ui=l(ht);eo=r(ui,"save_pretrained()"),ui.forEach(s),to=r(La,". Your configuration file is stored as a JSON file in the specified save directory:"),La.forEach(s),Ws=u(e),g(Ce.$$.fragment,e),Ls=u(e),se=i(e,"P",{});var Oa=l(se);so=r(Oa,"To reuse the configuration file, load it with "),_t=i(Oa,"A",{href:!0});var ci=l(_t);ao=r(ci,"from_pretrained()"),ci.forEach(s),ro=r(Oa,":"),Oa.forEach(s),Os=u(e),g(Ae.$$.fragment,e),Ns=u(e),g(ae.$$.fragment,e),Rs=u(e),Y=i(e,"H2",{class:!0});var Na=l(Y);re=i(Na,"A",{id:!0,class:!0,href:!0});var mi=l(re);is=i(mi,"SPAN",{});var di=l(is);g(Pe.$$.fragment,di),di.forEach(s),mi.forEach(s),oo=u(Na),ls=i(Na,"SPAN",{});var hi=l(ls);no=r(hi,"Model"),hi.forEach(s),Na.forEach(s),Ys=u(e),T=i(e,"P",{});var B=l(T);io=r(B,"The next step is to create a "),gt=i(B,"A",{href:!0});var _i=l(gt);lo=r(_i,"model"),_i.forEach(s),po=r(B,". The model - also loosely referred to as the architecture - defines what each layer is doing and what operations are happening. Attributes like "),ps=i(B,"CODE",{});var gi=l(ps);fo=r(gi,"num_hidden_layers"),gi.forEach(s),uo=r(B," from the configuration are used to define the architecture. Every model shares the base class "),vt=i(B,"A",{href:!0});var vi=l(vt);co=r(vi,"PreTrainedModel"),vi.forEach(s),mo=r(B," and a few common methods like resizing input embeddings and pruning self-attention heads. In addition, all models are also either a "),Me=i(B,"A",{href:!0,rel:!0});var bi=l(Me);fs=i(bi,"CODE",{});var yi=l(fs);ho=r(yi,"torch.nn.Module"),yi.forEach(s),bi.forEach(s),_o=r(B,", "),Ve=i(B,"A",{href:!0,rel:!0});var $i=l(Ve);us=i($i,"CODE",{});var wi=l(us);go=r(wi,"tf.keras.Model"),wi.forEach(s),$i.forEach(s),vo=r(B," or "),Se=i(B,"A",{href:!0,rel:!0});var ki=l(Se);cs=i(ki,"CODE",{});var ji=l(cs);bo=r(ji,"flax.linen.Module"),ji.forEach(s),ki.forEach(s),yo=r(B," subclass. This means models are compatible with each of their respective framework\u2019s usage."),B.forEach(s),Qs=u(e),bt=i(e,"P",{});var qi=l(bt);$o=r(qi,"Load your custom configuration attributes into the model:"),qi.forEach(s),Hs=u(e),g(Ie.$$.fragment,e),Us=u(e),yt=i(e,"P",{});var Ei=l(yt);wo=r(Ei,"This creates a model with random values instead of pretrained weights. You won\u2019t be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training."),Ei.forEach(s),Gs=u(e),oe=i(e,"P",{});var Ra=l(oe);ko=r(Ra,"Create a pretrained model with "),$t=i(Ra,"A",{href:!0});var Ti=l($t);jo=r(Ti,"from_pretrained()"),Ti.forEach(s),qo=r(Ra,":"),Ra.forEach(s),Js=u(e),g(We.$$.fragment,e),Xs=u(e),wt=i(e,"P",{});var xi=l(wt);Eo=r(xi,"When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by \u{1F917} Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you\u2019d like:"),xi.forEach(s),Ks=u(e),g(Le.$$.fragment,e),Zs=u(e),Q=i(e,"H3",{class:!0});var Ya=l(Q);ne=i(Ya,"A",{id:!0,class:!0,href:!0});var zi=l(ne);ms=i(zi,"SPAN",{});var Fi=l(ms);g(Oe.$$.fragment,Fi),Fi.forEach(s),zi.forEach(s),To=u(Ya),ds=i(Ya,"SPAN",{});var Di=l(ds);xo=r(Di,"Model heads"),Di.forEach(s),Ya.forEach(s),ea=u(e),ie=i(e,"P",{});var Qa=l(ie);zo=r(Qa,"At this point, you have a base DistilBERT model which outputs the "),hs=i(Qa,"EM",{});var Bi=l(hs);Fo=r(Bi,"hidden states"),Bi.forEach(s),Do=r(Qa,". The hidden states are passed as inputs to a model head to produce the final output. \u{1F917} Transformers provides a different model head for each task as long as a model supports the task (i.e., you can\u2019t use DistilBERT for a sequence-to-sequence task like translation)."),Qa.forEach(s),ta=u(e),le=i(e,"P",{});var Ha=l(le);Bo=r(Ha,"For example, "),kt=i(Ha,"A",{href:!0});var Ci=l(kt);Co=r(Ci,"DistilBertForSequenceClassification"),Ci.forEach(s),Ao=r(Ha," is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs."),Ha.forEach(s),sa=u(e),g(Ne.$$.fragment,e),aa=u(e),pe=i(e,"P",{});var Ua=l(pe);Po=r(Ua,"Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the "),jt=i(Ua,"A",{href:!0});var Ai=l(jt);Mo=r(Ai,"DistilBertForQuestionAnswering"),Ai.forEach(s),Vo=r(Ua," model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output."),Ua.forEach(s),ra=u(e),g(Re.$$.fragment,e),oa=u(e),H=i(e,"H2",{class:!0});var Ga=l(H);fe=i(Ga,"A",{id:!0,class:!0,href:!0});var Pi=l(fe);_s=i(Pi,"SPAN",{});var Mi=l(_s);g(Ye.$$.fragment,Mi),Mi.forEach(s),Pi.forEach(s),So=u(Ga),gs=i(Ga,"SPAN",{});var Vi=l(gs);Io=r(Vi,"Tokenizer"),Vi.forEach(s),Ga.forEach(s),na=u(e),ue=i(e,"P",{});var Ja=l(ue);Wo=r(Ja,"The last base class you need before using a model for textual data is a "),qt=i(Ja,"A",{href:!0});var Si=l(qt);Lo=r(Si,"tokenizer"),Si.forEach(s),Oo=r(Ja," to convert raw text to tensors. There are two types of tokenizers you can use with \u{1F917} Transformers:"),Ja.forEach(s),ia=u(e),ce=i(e,"UL",{});var Xa=l(ce);Et=i(Xa,"LI",{});var Wn=l(Et);Tt=i(Wn,"A",{href:!0});var Ii=l(Tt);No=r(Ii,"PreTrainedTokenizer"),Ii.forEach(s),Ro=r(Wn,": a Python implementation of a tokenizer."),Wn.forEach(s),Yo=u(Xa),S=i(Xa,"LI",{});var ot=l(S);xt=i(ot,"A",{href:!0});var Wi=l(xt);Qo=r(Wi,"PreTrainedTokenizerFast"),Wi.forEach(s),Ho=r(ot,": a tokenizer from our Rust-based "),Qe=i(ot,"A",{href:!0,rel:!0});var Li=l(Qe);Uo=r(Li,"\u{1F917} Tokenizer"),Li.forEach(s),Go=r(ot," library. This tokenizer type is significantly faster - especially during batch tokenization - due to it\u2019s Rust implementation. The fast tokenizer also offers additional methods like "),vs=i(ot,"EM",{});var Oi=l(vs);Jo=r(Oi,"offset mapping"),Oi.forEach(s),Xo=r(ot," which maps tokens to their original words or characters."),ot.forEach(s),Xa.forEach(s),la=u(e),zt=i(e,"P",{});var Ni=l(zt);Ko=r(Ni,"Both tokenizers support common methods such as encoding and decoding, adding new tokens, and managing special tokens."),Ni.forEach(s),pa=u(e),g(me.$$.fragment,e),fa=u(e),de=i(e,"P",{});var Ka=l(de);Zo=r(Ka,"If you trained your own tokenizer, you can create one from your "),bs=i(Ka,"EM",{});var Ri=l(bs);en=r(Ri,"vocabulary"),Ri.forEach(s),tn=r(Ka," file:"),Ka.forEach(s),ua=u(e),g(He.$$.fragment,e),ca=u(e),he=i(e,"P",{});var Za=l(he);sn=r(Za,"It is important to remember the vocabulary from a custom tokenizer will be different from the vocabulary generated by a pretrained model\u2019s tokenizer. You need to use a pretrained model\u2019s vocabulary if you are using a pretrained model, otherwise the inputs won\u2019t make sense. Create a tokenizer with a pretrained model\u2019s vocabulary with the "),Ft=i(Za,"A",{href:!0});var Yi=l(Ft);an=r(Yi,"DistilBertTokenizer"),Yi.forEach(s),rn=r(Za," class:"),Za.forEach(s),ma=u(e),g(Ue.$$.fragment,e),da=u(e),_e=i(e,"P",{});var er=l(_e);on=r(er,"Create a fast tokenizer with the "),Dt=i(er,"A",{href:!0});var Qi=l(Dt);nn=r(Qi,"DistilBertTokenizerFast"),Qi.forEach(s),ln=r(er," class:"),er.forEach(s),ha=u(e),g(Ge.$$.fragment,e),_a=u(e),g(ge.$$.fragment,e),ga=u(e),U=i(e,"H2",{class:!0});var tr=l(U);ve=i(tr,"A",{id:!0,class:!0,href:!0});var Hi=l(ve);ys=i(Hi,"SPAN",{});var Ui=l(ys);g(Je.$$.fragment,Ui),Ui.forEach(s),Hi.forEach(s),pn=u(tr),$s=i(tr,"SPAN",{});var Gi=l($s);fn=r(Gi,"Feature Extractor"),Gi.forEach(s),tr.forEach(s),va=u(e),A=i(e,"P",{});var Ee=l(A);un=r(Ee,"A feature extractor processes audio or image inputs. It inherits from the base "),Bt=i(Ee,"A",{href:!0});var Ji=l(Bt);cn=r(Ji,"FeatureExtractionMixin"),Ji.forEach(s),mn=r(Ee," class, and may also inherit from the "),Ct=i(Ee,"A",{href:!0});var Xi=l(Ct);dn=r(Xi,"ImageFeatureExtractionMixin"),Xi.forEach(s),hn=r(Ee," class for processing image features or the "),At=i(Ee,"A",{href:!0});var Ki=l(At);_n=r(Ki,"SequenceFeatureExtractor"),Ki.forEach(s),gn=r(Ee," class for processing audio inputs."),Ee.forEach(s),ba=u(e),I=i(e,"P",{});var Yt=l(I);vn=r(Yt,"Depending on whether you are working on an audio or vision task, create a feature extractor associated with the model you\u2019re using. For example, create a default "),Pt=i(Yt,"A",{href:!0});var Zi=l(Pt);bn=r(Zi,"ViTFeatureExtractor"),Zi.forEach(s),yn=r(Yt," if you are using "),Mt=i(Yt,"A",{href:!0});var el=l(Mt);$n=r(el,"ViT"),el.forEach(s),wn=r(Yt," for image classification:"),Yt.forEach(s),ya=u(e),g(Xe.$$.fragment,e),$a=u(e),g(be.$$.fragment,e),wa=u(e),ye=i(e,"P",{});var sr=l(ye);kn=r(sr,"Modify any of the "),Vt=i(sr,"A",{href:!0});var tl=l(Vt);jn=r(tl,"ViTFeatureExtractor"),tl.forEach(s),qn=r(sr," parameters to create your custom feature extractor:"),sr.forEach(s),ka=u(e),g(Ke.$$.fragment,e),ja=u(e),$e=i(e,"P",{});var ar=l($e);En=r(ar,"For audio inputs, you can create a "),St=i(ar,"A",{href:!0});var sl=l(St);Tn=r(sl,"Wav2Vec2FeatureExtractor"),sl.forEach(s),xn=r(ar," and customize the parameters in a similar way:"),ar.forEach(s),qa=u(e),g(Ze.$$.fragment,e),Ea=u(e),G=i(e,"H2",{class:!0});var rr=l(G);we=i(rr,"A",{id:!0,class:!0,href:!0});var al=l(we);ws=i(al,"SPAN",{});var rl=l(ws);g(et.$$.fragment,rl),rl.forEach(s),al.forEach(s),zn=u(rr),ks=i(rr,"SPAN",{});var ol=l(ks);Fn=r(ol,"Processor"),ol.forEach(s),rr.forEach(s),Ta=u(e),ke=i(e,"P",{});var or=l(ke);Dn=r(or,"For models that support multimodal tasks, \u{1F917} Transformers offers a processor class that conveniently wraps a feature extractor and tokenizer into a single object. For example, let\u2019s use the "),It=i(or,"A",{href:!0});var nl=l(It);Bn=r(nl,"Wav2Vec2Processor"),nl.forEach(s),Cn=r(or," for an automatic speech recognition task (ASR). ASR transcribes audio to text, so you will need a feature extractor and a tokenizer."),or.forEach(s),xa=u(e),Wt=i(e,"P",{});var il=l(Wt);An=r(il,"Create a feature extractor to handle the audio inputs:"),il.forEach(s),za=u(e),g(tt.$$.fragment,e),Fa=u(e),Lt=i(e,"P",{});var ll=l(Lt);Pn=r(ll,"Create a tokenizer to handle the text inputs:"),ll.forEach(s),Da=u(e),g(st.$$.fragment,e),Ba=u(e),je=i(e,"P",{});var nr=l(je);Mn=r(nr,"Combine the feature extractor and tokenizer in "),Ot=i(nr,"A",{href:!0});var pl=l(Ot);Vn=r(pl,"Wav2Vec2Processor"),pl.forEach(s),Sn=r(nr,":"),nr.forEach(s),Ca=u(e),g(at.$$.fragment,e),Aa=u(e),Nt=i(e,"P",{});var fl=l(Nt);In=r(fl,"With two basic classes - configuration and model - and an additional preprocessing class (tokenizer, feature extractor, or processor), you can create any of the models supported by \u{1F917} Transformers. Each of these base classes are configurable, allowing you to use the specific attributes you want. You can easily setup a model for training or modify an existing pretrained model to fine-tune."),fl.forEach(s),this.h()},h(){c(m,"name","hf:doc:metadata"),c(m,"content",JSON.stringify(yl)),c(k,"id","create-a-custom-model"),c(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(k,"href","#create-a-custom-model"),c(d,"class","relative group"),c(x,"href","model_doc/auto"),c(K,"id","configuration"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#configuration"),c(N,"class","relative group"),c(pt,"href","main_classes/configuration"),c(ft,"href","model_doc/distilbert"),c(ut,"href","/docs/transformers/pr_16143/en/model_doc/distilbert#transformers.DistilBertConfig"),c(ct,"href","/docs/transformers/pr_16143/en/model_doc/distilbert#transformers.DistilBertConfig"),c(mt,"href","/docs/transformers/pr_16143/en/model_doc/distilbert#transformers.DistilBertModel"),c(dt,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained"),c(ht,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.save_pretrained"),c(_t,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained"),c(re,"id","model"),c(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(re,"href","#model"),c(Y,"class","relative group"),c(gt,"href","main_classes/models"),c(vt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Me,"href","https://pytorch.org/docs/stable/generated/torch.nn.Module.html"),c(Me,"rel","nofollow"),c(Ve,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Ve,"rel","nofollow"),c(Se,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Se,"rel","nofollow"),c($t,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),c(ne,"id","model-heads"),c(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ne,"href","#model-heads"),c(Q,"class","relative group"),c(kt,"href","/docs/transformers/pr_16143/en/model_doc/distilbert#transformers.DistilBertForSequenceClassification"),c(jt,"href","/docs/transformers/pr_16143/en/model_doc/distilbert#transformers.DistilBertForQuestionAnswering"),c(fe,"id","tokenizer"),c(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fe,"href","#tokenizer"),c(H,"class","relative group"),c(qt,"href","main_classes/tokenizer"),c(Tt,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(xt,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(Qe,"href","https://huggingface.co/docs/tokenizers/python/latest/"),c(Qe,"rel","nofollow"),c(Ft,"href","/docs/transformers/pr_16143/en/model_doc/distilbert#transformers.DistilBertTokenizer"),c(Dt,"href","/docs/transformers/pr_16143/en/model_doc/distilbert#transformers.DistilBertTokenizerFast"),c(ve,"id","feature-extractor"),c(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ve,"href","#feature-extractor"),c(U,"class","relative group"),c(Bt,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin"),c(Ct,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.ImageFeatureExtractionMixin"),c(At,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor"),c(Pt,"href","/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor"),c(Mt,"href","model_doc/vit"),c(Vt,"href","/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor"),c(St,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),c(we,"id","processor"),c(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(we,"href","#processor"),c(G,"class","relative group"),c(It,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),c(Ot,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor")},m(e,o){t(document.head,m),p(e,j,o),p(e,d,o),t(d,k),t(k,q),v(h,q,null),t(d,w),t(d,E),t(E,L),p(e,J,o),p(e,z,o),t(z,O),t(z,x),t(x,X),t(X,it),t(z,lt),t(z,Qt),t(Qt,lr),t(z,pr),t(z,Ht),t(Ht,fr),t(z,ur),p(e,zs,o),p(e,D,o),t(D,Ut),t(Ut,cr),t(D,mr),t(D,Gt),t(Gt,dr),t(D,hr),t(D,Jt),t(Jt,_r),t(D,gr),t(D,Xt),t(Xt,vr),t(D,br),t(D,Kt),t(Kt,yr),p(e,Fs,o),p(e,N,o),t(N,K),t(K,Zt),v(Te,Zt,null),t(N,$r),t(N,es),t(es,wr),p(e,Ds,o),p(e,F,o),t(F,kr),t(F,pt),t(pt,jr),t(F,qr),t(F,ts),t(ts,Er),t(F,Tr),t(F,ss),t(ss,xr),t(F,zr),t(F,as),t(as,Fr),t(F,Dr),t(F,rs),t(rs,Br),t(F,Cr),p(e,Bs,o),p(e,V,o),t(V,Ar),t(V,ft),t(ft,Pr),t(V,Mr),t(V,ut),t(ut,Vr),t(V,Sr),p(e,Cs,o),v(xe,e,o),p(e,As,o),p(e,R,o),t(R,ct),t(ct,Ir),t(R,Wr),t(R,mt),t(mt,Lr),t(R,Or),p(e,Ps,o),p(e,Z,o),t(Z,ze),t(ze,Nr),t(ze,os),t(os,Rr),t(ze,Yr),t(Z,Qr),t(Z,Fe),t(Fe,Hr),t(Fe,ns),t(ns,Ur),t(Fe,Gr),p(e,Ms,o),v(De,e,o),p(e,Vs,o),p(e,ee,o),t(ee,Jr),t(ee,dt),t(dt,Xr),t(ee,Kr),p(e,Ss,o),v(Be,e,o),p(e,Is,o),p(e,te,o),t(te,Zr),t(te,ht),t(ht,eo),t(te,to),p(e,Ws,o),v(Ce,e,o),p(e,Ls,o),p(e,se,o),t(se,so),t(se,_t),t(_t,ao),t(se,ro),p(e,Os,o),v(Ae,e,o),p(e,Ns,o),v(ae,e,o),p(e,Rs,o),p(e,Y,o),t(Y,re),t(re,is),v(Pe,is,null),t(Y,oo),t(Y,ls),t(ls,no),p(e,Ys,o),p(e,T,o),t(T,io),t(T,gt),t(gt,lo),t(T,po),t(T,ps),t(ps,fo),t(T,uo),t(T,vt),t(vt,co),t(T,mo),t(T,Me),t(Me,fs),t(fs,ho),t(T,_o),t(T,Ve),t(Ve,us),t(us,go),t(T,vo),t(T,Se),t(Se,cs),t(cs,bo),t(T,yo),p(e,Qs,o),p(e,bt,o),t(bt,$o),p(e,Hs,o),v(Ie,e,o),p(e,Us,o),p(e,yt,o),t(yt,wo),p(e,Gs,o),p(e,oe,o),t(oe,ko),t(oe,$t),t($t,jo),t(oe,qo),p(e,Js,o),v(We,e,o),p(e,Xs,o),p(e,wt,o),t(wt,Eo),p(e,Ks,o),v(Le,e,o),p(e,Zs,o),p(e,Q,o),t(Q,ne),t(ne,ms),v(Oe,ms,null),t(Q,To),t(Q,ds),t(ds,xo),p(e,ea,o),p(e,ie,o),t(ie,zo),t(ie,hs),t(hs,Fo),t(ie,Do),p(e,ta,o),p(e,le,o),t(le,Bo),t(le,kt),t(kt,Co),t(le,Ao),p(e,sa,o),v(Ne,e,o),p(e,aa,o),p(e,pe,o),t(pe,Po),t(pe,jt),t(jt,Mo),t(pe,Vo),p(e,ra,o),v(Re,e,o),p(e,oa,o),p(e,H,o),t(H,fe),t(fe,_s),v(Ye,_s,null),t(H,So),t(H,gs),t(gs,Io),p(e,na,o),p(e,ue,o),t(ue,Wo),t(ue,qt),t(qt,Lo),t(ue,Oo),p(e,ia,o),p(e,ce,o),t(ce,Et),t(Et,Tt),t(Tt,No),t(Et,Ro),t(ce,Yo),t(ce,S),t(S,xt),t(xt,Qo),t(S,Ho),t(S,Qe),t(Qe,Uo),t(S,Go),t(S,vs),t(vs,Jo),t(S,Xo),p(e,la,o),p(e,zt,o),t(zt,Ko),p(e,pa,o),v(me,e,o),p(e,fa,o),p(e,de,o),t(de,Zo),t(de,bs),t(bs,en),t(de,tn),p(e,ua,o),v(He,e,o),p(e,ca,o),p(e,he,o),t(he,sn),t(he,Ft),t(Ft,an),t(he,rn),p(e,ma,o),v(Ue,e,o),p(e,da,o),p(e,_e,o),t(_e,on),t(_e,Dt),t(Dt,nn),t(_e,ln),p(e,ha,o),v(Ge,e,o),p(e,_a,o),v(ge,e,o),p(e,ga,o),p(e,U,o),t(U,ve),t(ve,ys),v(Je,ys,null),t(U,pn),t(U,$s),t($s,fn),p(e,va,o),p(e,A,o),t(A,un),t(A,Bt),t(Bt,cn),t(A,mn),t(A,Ct),t(Ct,dn),t(A,hn),t(A,At),t(At,_n),t(A,gn),p(e,ba,o),p(e,I,o),t(I,vn),t(I,Pt),t(Pt,bn),t(I,yn),t(I,Mt),t(Mt,$n),t(I,wn),p(e,ya,o),v(Xe,e,o),p(e,$a,o),v(be,e,o),p(e,wa,o),p(e,ye,o),t(ye,kn),t(ye,Vt),t(Vt,jn),t(ye,qn),p(e,ka,o),v(Ke,e,o),p(e,ja,o),p(e,$e,o),t($e,En),t($e,St),t(St,Tn),t($e,xn),p(e,qa,o),v(Ze,e,o),p(e,Ea,o),p(e,G,o),t(G,we),t(we,ws),v(et,ws,null),t(G,zn),t(G,ks),t(ks,Fn),p(e,Ta,o),p(e,ke,o),t(ke,Dn),t(ke,It),t(It,Bn),t(ke,Cn),p(e,xa,o),p(e,Wt,o),t(Wt,An),p(e,za,o),v(tt,e,o),p(e,Fa,o),p(e,Lt,o),t(Lt,Pn),p(e,Da,o),v(st,e,o),p(e,Ba,o),p(e,je,o),t(je,Mn),t(je,Ot),t(Ot,Vn),t(je,Sn),p(e,Ca,o),v(at,e,o),p(e,Aa,o),p(e,Nt,o),t(Nt,In),Pa=!0},p(e,[o]){const rt={};o&2&&(rt.$$scope={dirty:o,ctx:e}),ae.$set(rt);const js={};o&2&&(js.$$scope={dirty:o,ctx:e}),me.$set(js);const qs={};o&2&&(qs.$$scope={dirty:o,ctx:e}),ge.$set(qs);const Es={};o&2&&(Es.$$scope={dirty:o,ctx:e}),be.$set(Es)},i(e){Pa||(b(h.$$.fragment,e),b(Te.$$.fragment,e),b(xe.$$.fragment,e),b(De.$$.fragment,e),b(Be.$$.fragment,e),b(Ce.$$.fragment,e),b(Ae.$$.fragment,e),b(ae.$$.fragment,e),b(Pe.$$.fragment,e),b(Ie.$$.fragment,e),b(We.$$.fragment,e),b(Le.$$.fragment,e),b(Oe.$$.fragment,e),b(Ne.$$.fragment,e),b(Re.$$.fragment,e),b(Ye.$$.fragment,e),b(me.$$.fragment,e),b(He.$$.fragment,e),b(Ue.$$.fragment,e),b(Ge.$$.fragment,e),b(ge.$$.fragment,e),b(Je.$$.fragment,e),b(Xe.$$.fragment,e),b(be.$$.fragment,e),b(Ke.$$.fragment,e),b(Ze.$$.fragment,e),b(et.$$.fragment,e),b(tt.$$.fragment,e),b(st.$$.fragment,e),b(at.$$.fragment,e),Pa=!0)},o(e){y(h.$$.fragment,e),y(Te.$$.fragment,e),y(xe.$$.fragment,e),y(De.$$.fragment,e),y(Be.$$.fragment,e),y(Ce.$$.fragment,e),y(Ae.$$.fragment,e),y(ae.$$.fragment,e),y(Pe.$$.fragment,e),y(Ie.$$.fragment,e),y(We.$$.fragment,e),y(Le.$$.fragment,e),y(Oe.$$.fragment,e),y(Ne.$$.fragment,e),y(Re.$$.fragment,e),y(Ye.$$.fragment,e),y(me.$$.fragment,e),y(He.$$.fragment,e),y(Ue.$$.fragment,e),y(Ge.$$.fragment,e),y(ge.$$.fragment,e),y(Je.$$.fragment,e),y(Xe.$$.fragment,e),y(be.$$.fragment,e),y(Ke.$$.fragment,e),y(Ze.$$.fragment,e),y(et.$$.fragment,e),y(tt.$$.fragment,e),y(st.$$.fragment,e),y(at.$$.fragment,e),Pa=!1},d(e){s(m),e&&s(j),e&&s(d),$(h),e&&s(J),e&&s(z),e&&s(zs),e&&s(D),e&&s(Fs),e&&s(N),$(Te),e&&s(Ds),e&&s(F),e&&s(Bs),e&&s(V),e&&s(Cs),$(xe,e),e&&s(As),e&&s(R),e&&s(Ps),e&&s(Z),e&&s(Ms),$(De,e),e&&s(Vs),e&&s(ee),e&&s(Ss),$(Be,e),e&&s(Is),e&&s(te),e&&s(Ws),$(Ce,e),e&&s(Ls),e&&s(se),e&&s(Os),$(Ae,e),e&&s(Ns),$(ae,e),e&&s(Rs),e&&s(Y),$(Pe),e&&s(Ys),e&&s(T),e&&s(Qs),e&&s(bt),e&&s(Hs),$(Ie,e),e&&s(Us),e&&s(yt),e&&s(Gs),e&&s(oe),e&&s(Js),$(We,e),e&&s(Xs),e&&s(wt),e&&s(Ks),$(Le,e),e&&s(Zs),e&&s(Q),$(Oe),e&&s(ea),e&&s(ie),e&&s(ta),e&&s(le),e&&s(sa),$(Ne,e),e&&s(aa),e&&s(pe),e&&s(ra),$(Re,e),e&&s(oa),e&&s(H),$(Ye),e&&s(na),e&&s(ue),e&&s(ia),e&&s(ce),e&&s(la),e&&s(zt),e&&s(pa),$(me,e),e&&s(fa),e&&s(de),e&&s(ua),$(He,e),e&&s(ca),e&&s(he),e&&s(ma),$(Ue,e),e&&s(da),e&&s(_e),e&&s(ha),$(Ge,e),e&&s(_a),$(ge,e),e&&s(ga),e&&s(U),$(Je),e&&s(va),e&&s(A),e&&s(ba),e&&s(I),e&&s(ya),$(Xe,e),e&&s($a),$(be,e),e&&s(wa),e&&s(ye),e&&s(ka),$(Ke,e),e&&s(ja),e&&s($e),e&&s(qa),$(Ze,e),e&&s(Ea),e&&s(G),$(et),e&&s(Ta),e&&s(ke),e&&s(xa),e&&s(Wt),e&&s(za),$(tt,e),e&&s(Fa),e&&s(Lt),e&&s(Da),$(st,e),e&&s(Ba),e&&s(je),e&&s(Ca),$(at,e),e&&s(Aa),e&&s(Nt)}}}const yl={local:"create-a-custom-model",sections:[{local:"configuration",title:"Configuration"},{local:"model",sections:[{local:"model-heads",title:"Model heads"}],title:"Model"},{local:"tokenizer",title:"Tokenizer"},{local:"feature-extractor",title:"Feature Extractor"},{local:"processor",title:"Processor"}],title:"Create a custom model"};function $l(M,m,j){let{fw:d}=m;return M.$$set=k=>{"fw"in k&&j(0,d=k.fw)},[d]}class xl extends ul{constructor(m){super();cl(this,m,$l,bl,ml,{fw:0})}}export{xl as default,yl as metadata};
277
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/installation.mdx-ccd851fb.js
import{S as Ri,i as Mi,s as Ni,e as r,k as p,w as u,t as n,M as Hi,c as l,d as o,m as h,a,x as v,h as s,b as m,N as Di,F as t,g as f,y as _,q as $,o as w,B as g}from"../chunks/vendor-4833417e.js";import{T as bl}from"../chunks/Tip-fffd6df1.js";import{I as $e}from"../chunks/IconCopyLink-4b81c553.js";import{C as P}from"../chunks/CodeBlock-6a3d1b46.js";import"../chunks/CopyButton-dacfbfaf.js";function qi(M){let d,T,c,E,k;return{c(){d=r("p"),T=n("You must keep the "),c=r("code"),E=n("transformers"),k=n(" folder if you want to keep using the library.")},l(y){d=l(y,"P",{});var b=a(d);T=s(b,"You must keep the "),c=l(b,"CODE",{});var A=a(c);E=s(A,"transformers"),A.forEach(o),k=s(b," folder if you want to keep using the library."),b.forEach(o)},m(y,b){f(y,d,b),t(d,T),t(d,c),t(c,E),t(d,k)},d(y){y&&o(d)}}}function Li(M){let d,T,c,E,k,y,b,A,F,S,x;return{c(){d=r("p"),T=n("\u{1F917} Transformers will use the shell environment variables "),c=r("code"),E=n("PYTORCH_TRANSFORMERS_CACHE"),k=n(" or "),y=r("code"),b=n("PYTORCH_PRETRAINED_BERT_CACHE"),A=n(" if you are coming from an earlier iteration of this library and have set those environment variables, unless you specify the shell environment variable "),F=r("code"),S=n("TRANSFORMERS_CACHE"),x=n(".")},l(j){d=l(j,"P",{});var O=a(d);T=s(O,"\u{1F917} Transformers will use the shell environment variables "),c=l(O,"CODE",{});var Y=a(c);E=s(Y,"PYTORCH_TRANSFORMERS_CACHE"),Y.forEach(o),k=s(O," or "),y=l(O,"CODE",{});var Et=a(y);b=s(Et,"PYTORCH_PRETRAINED_BERT_CACHE"),Et.forEach(o),A=s(O," if you are coming from an earlier iteration of this library and have set those environment variables, unless you specify the shell environment variable "),F=l(O,"CODE",{});var we=a(F);S=s(we,"TRANSFORMERS_CACHE"),we.forEach(o),x=s(O,"."),O.forEach(o)},m(j,O){f(j,d,O),t(d,T),t(d,c),t(c,E),t(d,k),t(d,y),t(y,b),t(d,A),t(d,F),t(F,S),t(d,x)},d(j){j&&o(d)}}}function zi(M){let d,T,c,E,k,y,b,A;return{c(){d=r("p"),T=n("Add "),c=r("a"),E=n("\u{1F917} Datasets"),k=n(" to your offline training workflow by setting the environment variable "),y=r("code"),b=n("HF_DATASETS_OFFLINE=1"),A=n("."),this.h()},l(F){d=l(F,"P",{});var S=a(d);T=s(S,"Add "),c=l(S,"A",{href:!0,rel:!0});var x=a(c);E=s(x,"\u{1F917} Datasets"),x.forEach(o),k=s(S," to your offline training workflow by setting the environment variable "),y=l(S,"CODE",{});var j=a(y);b=s(j,"HF_DATASETS_OFFLINE=1"),j.forEach(o),A=s(S,"."),S.forEach(o),this.h()},h(){m(c,"href","https://huggingface.co/docs/datasets/"),m(c,"rel","nofollow")},m(F,S){f(F,d,S),t(d,T),t(d,c),t(c,E),t(d,k),t(d,y),t(y,b),t(d,A)},d(F){F&&o(d)}}}function Yi(M){let d,T,c,E,k;return{c(){d=r("p"),T=n("See the "),c=r("a"),E=n("How to download files from the Hub"),k=n(" section for more details on downloading files stored on the Hub."),this.h()},l(y){d=l(y,"P",{});var b=a(d);T=s(b,"See the "),c=l(b,"A",{href:!0,rel:!0});var A=a(c);E=s(A,"How to download files from the Hub"),A.forEach(o),k=s(b," section for more details on downloading files stored on the Hub."),b.forEach(o),this.h()},h(){m(c,"href","https://huggingface.co/docs/hub/how-to-downstream"),m(c,"rel","nofollow")},m(y,b){f(y,d,b),t(d,T),t(d,c),t(c,E),t(d,k)},d(y){y&&o(d)}}}function Ui(M){let d,T,c,E,k,y,b,A,F,S,x,j,O,Y,Et,we,H,bt,ge,Tl,kl,Pl,Tt,ye,Al,Cl,Sl,kt,Ee,Fl,Ol,Go,U,te,ao,be,Il,no,xl,Vo,D,jl,Te,Rl,Ml,ke,Nl,Hl,Xo,Pt,Dl,Jo,Pe,Ko,At,ql,Qo,Ae,Zo,Ct,Ll,er,Ce,tr,St,zl,or,Se,rr,Ft,Yl,lr,Fe,ar,Ot,Ul,nr,Oe,sr,It,Bl,ir,Ie,fr,xt,Wl,pr,xe,hr,B,oe,so,je,Gl,io,Vl,mr,jt,Xl,dr,Re,cr,C,Jl,fo,Kl,Ql,po,Zl,ea,ho,ta,oa,mo,ra,la,co,aa,na,Me,sa,ia,ur,Rt,fa,vr,Ne,_r,W,re,uo,He,pa,vo,ha,$r,Mt,ma,wr,le,De,da,_o,ca,ua,va,$o,_a,gr,Nt,$a,yr,qe,Er,q,wa,wo,ga,ya,go,Ea,ba,br,ae,Tr,Ht,Ta,kr,Le,Pr,ne,ka,yo,Pa,Aa,Ar,G,se,Eo,ze,Ca,bo,Sa,Cr,ie,Fa,To,Oa,Ia,Sr,Ye,Fr,V,fe,ko,Ue,xa,Po,ja,Or,R,Ra,Ao,Ma,Na,Co,Ha,Da,So,qa,La,Ir,L,Be,za,Fo,Ya,Ua,Ba,X,Wa,Oo,Ga,Va,Io,Xa,Ja,Ka,J,Qa,xo,Za,en,jo,tn,on,xr,pe,jr,K,he,Ro,We,rn,Mo,ln,Rr,me,an,No,nn,sn,Mr,de,Nr,Dt,fn,Hr,Ge,Dr,qt,pn,qr,Ve,Lr,Lt,hn,zr,Q,ce,Ho,Xe,mn,Do,dn,Yr,zt,cn,Ur,z,Je,Ke,un,Qe,vn,_n,$n,qo,Yt,is,wn,Ze,Z,gn,Ut,yn,En,Bt,bn,Tn,kn,ee,et,tt,Pn,Wt,An,Cn,Sn,ot,Fn,rt,lt,On,Gt,In,xn,jn,at,Rn,nt,st,Mn,Vt,Nn,Hn,Dn,it,qn,ft,pt,Ln,ht,zn,Yn,Un,mt,dt,ct,Bn,Lo,Wn,Gn,Vn,ut,Xn,vt,N,Jn,_t,zo,Kn,Qn,Yo,Zn,es,$t,ts,os,rs,wt,Br,Xt,ls,Wr,gt,Gr,ue,Vr;return y=new $e({}),be=new $e({}),Pe=new P({props:{code:"python -m venv .env",highlighted:'python -m venv .<span class="hljs-built_in">env</span>'}}),Ae=new P({props:{code:"source .env/bin/activate",highlighted:'<span class="hljs-built_in">source</span> .<span class="hljs-built_in">env</span>/bin/activate'}}),Ce=new P({props:{code:"pip install transformers",highlighted:"pip install transformers"}}),Se=new P({props:{code:"pip install transformers[torch]",highlighted:"pip install transformers[torch]"}}),Fe=new P({props:{code:"pip install transformers[tf-cpu]",highlighted:"pip install transformers[tf-cpu]"}}),Oe=new P({props:{code:"pip install transformers[flax]",highlighted:"pip install transformers[flax]"}}),Ie=new P({props:{code:`python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))"`,highlighted:'python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;we love you&#x27;))&quot;</span>'}}),xe=new P({props:{code:"[{'label': 'POSITIVE', 'score': 0.9998704791069031}]",highlighted:'[{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: 0.9998704791069031}]'}}),je=new $e({}),Re=new P({props:{code:"pip install git+https://github.com/huggingface/transformers",highlighted:"pip install git+https://github.com/huggingface/transformers"}}),Ne=new P({props:{code:`python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))"`,highlighted:'python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;I love you&#x27;))&quot;</span>'}}),He=new $e({}),qe=new P({props:{code:`git clone https://github.com/huggingface/transformers.git cd transformers pip install -e .`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/huggingface/transformers.git <span class="hljs-built_in">cd</span> transformers pip install -e .`}}),ae=new bl({props:{warning:"&lcub;true}",$$slots:{default:[qi]},$$scope:{ctx:M}}}),Le=new P({props:{code:`cd ~/transformers/ git pull`,highlighted:`<span class="hljs-built_in">cd</span> ~/transformers/ git pull`}}),ze=new $e({}),Ye=new P({props:{code:"conda install -c huggingface transformers",highlighted:"conda install -c huggingface transformers"}}),Ue=new $e({}),pe=new bl({props:{$$slots:{default:[Li]},$$scope:{ctx:M}}}),We=new $e({}),de=new bl({props:{$$slots:{default:[zi]},$$scope:{ctx:M}}}),Ge=new P({props:{code:"python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...",highlighted:"python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ..."}}),Ve=new P({props:{code:`HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \\ python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...`,highlighted:`HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \\ python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...`}}),Xe=new $e({}),ot=new P({props:{code:`from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>)`}}),at=new P({props:{code:`tokenizer.save_pretrained("./your/path/bigscience_t0") model.save_pretrained("./your/path/bigscience_t0")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)`}}),it=new P({props:{code:`tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") model = AutoModel.from_pretrained("./your/path/bigscience_t0")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)`}}),ut=new P({props:{code:"python -m pip install huggingface_hub",highlighted:"python -m pip install huggingface_hub"}}),wt=new P({props:{code:`from huggingface_hub import hf_hub_download hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> hf_hub_download <span class="hljs-meta">&gt;&gt;&gt; </span>hf_hub_download(repo_id=<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>, filename=<span class="hljs-string">&quot;config.json&quot;</span>, cache_dir=<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)`}}),gt=new P({props:{code:`from transformers import AutoConfig config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0/config.json&quot;</span>)`}}),ue=new bl({props:{$$slots:{default:[Yi]},$$scope:{ctx:M}}}),{c(){d=r("meta"),T=p(),c=r("h1"),E=r("a"),k=r("span"),u(y.$$.fragment),b=p(),A=r("span"),F=n("Installation"),S=p(),x=r("p"),j=n("Install \u{1F917} Transformers for whichever deep learning library you\u2019re working with, setup your cache, and optionally configure \u{1F917} Transformers to run offline."),O=p(),Y=r("p"),Et=n("\u{1F917} Transformers is tested on Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, and Flax. Follow the installation instructions below for the deep learning library you are using:"),we=p(),H=r("ul"),bt=r("li"),ge=r("a"),Tl=n("PyTorch"),kl=n(" installation instructions."),Pl=p(),Tt=r("li"),ye=r("a"),Al=n("TensorFlow 2.0"),Cl=n(" installation instructions."),Sl=p(),kt=r("li"),Ee=r("a"),Fl=n("Flax"),Ol=n(" installation instructions."),Go=p(),U=r("h2"),te=r("a"),ao=r("span"),u(be.$$.fragment),Il=p(),no=r("span"),xl=n("Install with pip"),Vo=p(),D=r("p"),jl=n("You should install \u{1F917} Transformers in a "),Te=r("a"),Rl=n("virtual environment"),Ml=n(". If you\u2019re unfamiliar with Python virtual environments, take a look at this "),ke=r("a"),Nl=n("guide"),Hl=n(". A virtual environment makes it easier to manage different projects, and avoid compatibility issues between dependencies."),Xo=p(),Pt=r("p"),Dl=n("Start by creating a virtual environment in your project directory:"),Jo=p(),u(Pe.$$.fragment),Ko=p(),At=r("p"),ql=n("Activate the virtual environment:"),Qo=p(),u(Ae.$$.fragment),Zo=p(),Ct=r("p"),Ll=n("Now you\u2019re ready to install \u{1F917} Transformers with the following command:"),er=p(),u(Ce.$$.fragment),tr=p(),St=r("p"),zl=n("For CPU-support only, you can conveniently install \u{1F917} Transformers and a deep learning library in one line. For example, install \u{1F917} Transformers and PyTorch with:"),or=p(),u(Se.$$.fragment),rr=p(),Ft=r("p"),Yl=n("\u{1F917} Transformers and TensorFlow 2.0:"),lr=p(),u(Fe.$$.fragment),ar=p(),Ot=r("p"),Ul=n("\u{1F917} Transformers and Flax:"),nr=p(),u(Oe.$$.fragment),sr=p(),It=r("p"),Bl=n("Finally, check if \u{1F917} Transformers has been properly installed by running the following command. It will download a pretrained model:"),ir=p(),u(Ie.$$.fragment),fr=p(),xt=r("p"),Wl=n("Then print out the label and score:"),pr=p(),u(xe.$$.fragment),hr=p(),B=r("h2"),oe=r("a"),so=r("span"),u(je.$$.fragment),Gl=p(),io=r("span"),Vl=n("Install from source"),mr=p(),jt=r("p"),Xl=n("Install \u{1F917} Transformers from source with the following command:"),dr=p(),u(Re.$$.fragment),cr=p(),C=r("p"),Jl=n("This command installs the bleeding edge "),fo=r("code"),Kl=n("master"),Ql=n(" version rather than the latest "),po=r("code"),Zl=n("stable"),ea=n(" version. The "),ho=r("code"),ta=n("master"),oa=n(" version is useful for staying up-to-date with the latest developments. For instance, if a bug has been fixed since the last official release but a new release hasn\u2019t been rolled out yet. However, this means the "),mo=r("code"),ra=n("master"),la=n(" version may not always be stable. We strive to keep the "),co=r("code"),aa=n("master"),na=n(" version operational, and most issues are usually resolved within a few hours or a day. If you run into a problem, please open an "),Me=r("a"),sa=n("Issue"),ia=n(" so we can fix it even sooner!"),ur=p(),Rt=r("p"),fa=n("Check if \u{1F917} Transformers has been properly installed by running the following command:"),vr=p(),u(Ne.$$.fragment),_r=p(),W=r("h2"),re=r("a"),uo=r("span"),u(He.$$.fragment),pa=p(),vo=r("span"),ha=n("Editable install"),$r=p(),Mt=r("p"),ma=n("You will need an editable install if you\u2019d like to:"),wr=p(),le=r("ul"),De=r("li"),da=n("Use the "),_o=r("code"),ca=n("master"),ua=n(" version of the source code."),va=p(),$o=r("li"),_a=n("Contribute to \u{1F917} Transformers and need to test changes in the code."),gr=p(),Nt=r("p"),$a=n("Clone the repository and install \u{1F917} Transformers with the following commands:"),yr=p(),u(qe.$$.fragment),Er=p(),q=r("p"),wa=n("These commands will link the folder you cloned the repository to and your Python library paths. Python will now look inside the folder you cloned to in addition to the normal library paths. For example, if your Python packages are typically installed in "),wo=r("code"),ga=n("~/anaconda3/envs/main/lib/python3.7/site-packages/"),ya=n(", Python will also search the folder you cloned to: "),go=r("code"),Ea=n("~/transformers/"),ba=n("."),br=p(),u(ae.$$.fragment),Tr=p(),Ht=r("p"),Ta=n("Now you can easily update your clone to the latest version of \u{1F917} Transformers with the following command:"),kr=p(),u(Le.$$.fragment),Pr=p(),ne=r("p"),ka=n("Your Python environment will find the "),yo=r("code"),Pa=n("master"),Aa=n(" version of \u{1F917} Transformers on the next run."),Ar=p(),G=r("h2"),se=r("a"),Eo=r("span"),u(ze.$$.fragment),Ca=p(),bo=r("span"),Sa=n("Install with conda"),Cr=p(),ie=r("p"),Fa=n("Install from the conda channel "),To=r("code"),Oa=n("huggingface"),Ia=n(":"),Sr=p(),u(Ye.$$.fragment),Fr=p(),V=r("h2"),fe=r("a"),ko=r("span"),u(Ue.$$.fragment),xa=p(),Po=r("span"),ja=n("Cache setup"),Or=p(),R=r("p"),Ra=n("Pretrained models are downloaded and locally cached at: "),Ao=r("code"),Ma=n("~/.cache/huggingface/transformers/"),Na=n(". This is the default directory given by the shell environment variable "),Co=r("code"),Ha=n("TRANSFORMERS_CACHE"),Da=n(". On Windows, the default directory is given by "),So=r("code"),qa=n("C:\\Users\\username\\.cache\\huggingface\\transformers"),La=n(". You can change the shell environment variables shown below - in order of priority - to specify a different cache directory:"),Ir=p(),L=r("ol"),Be=r("li"),za=n("Shell environment variable (default): "),Fo=r("code"),Ya=n("TRANSFORMERS_CACHE"),Ua=n("."),Ba=p(),X=r("li"),Wa=n("Shell environment variable: "),Oo=r("code"),Ga=n("HF_HOME"),Va=n(" + "),Io=r("code"),Xa=n("transformers/"),Ja=n("."),Ka=p(),J=r("li"),Qa=n("Shell environment variable: "),xo=r("code"),Za=n("XDG_CACHE_HOME"),en=n(" + "),jo=r("code"),tn=n("/huggingface/transformers"),on=n("."),xr=p(),u(pe.$$.fragment),jr=p(),K=r("h2"),he=r("a"),Ro=r("span"),u(We.$$.fragment),rn=p(),Mo=r("span"),ln=n("Offline mode"),Rr=p(),me=r("p"),an=n("\u{1F917} Transformers is able to run in a firewalled or offline environment by only using local files. Set the environment variable "),No=r("code"),nn=n("TRANSFORMERS_OFFLINE=1"),sn=n(" to enable this behavior."),Mr=p(),u(de.$$.fragment),Nr=p(),Dt=r("p"),fn=n("For example, you would typically run a program on a normal network firewalled to external instances with the following command:"),Hr=p(),u(Ge.$$.fragment),Dr=p(),qt=r("p"),pn=n("Run this same program in an offline instance with:"),qr=p(),u(Ve.$$.fragment),Lr=p(),Lt=r("p"),hn=n("The script should now run without hanging or waiting to timeout because it knows it should only look for local files."),zr=p(),Q=r("h3"),ce=r("a"),Ho=r("span"),u(Xe.$$.fragment),mn=p(),Do=r("span"),dn=n("Fetch models and tokenizers to use offline"),Yr=p(),zt=r("p"),cn=n("Another option for using \u{1F917} Transformers offline is to download the files ahead of time, and then point to their local path when you need to use them offline. There are three ways to do this:"),Ur=p(),z=r("ul"),Je=r("li"),Ke=r("p"),un=n("Download a file through the user interface on the "),Qe=r("a"),vn=n("Model Hub"),_n=n(" by clicking on the \u2193 icon."),$n=p(),qo=r("p"),Yt=r("img"),wn=p(),Ze=r("li"),Z=r("p"),gn=n("Use the "),Ut=r("a"),yn=n("PreTrainedModel.from_pretrained()"),En=n(" and "),Bt=r("a"),bn=n("PreTrainedModel.save_pretrained()"),Tn=n(" workflow:"),kn=p(),ee=r("ol"),et=r("li"),tt=r("p"),Pn=n("Download your files ahead of time with "),Wt=r("a"),An=n("PreTrainedModel.from_pretrained()"),Cn=n(":"),Sn=p(),u(ot.$$.fragment),Fn=p(),rt=r("li"),lt=r("p"),On=n("Save your files to a specified directory with "),Gt=r("a"),In=n("PreTrainedModel.save_pretrained()"),xn=n(":"),jn=p(),u(at.$$.fragment),Rn=p(),nt=r("li"),st=r("p"),Mn=n("Now when you\u2019re offline, reload your files with "),Vt=r("a"),Nn=n("PreTrainedModel.from_pretrained()"),Hn=n(" from the specified directory:"),Dn=p(),u(it.$$.fragment),qn=p(),ft=r("li"),pt=r("p"),Ln=n("Programmatically download files with the "),ht=r("a"),zn=n("huggingface_hub"),Yn=n(" library:"),Un=p(),mt=r("ol"),dt=r("li"),ct=r("p"),Bn=n("Install the "),Lo=r("code"),Wn=n("huggingface_hub"),Gn=n(" library in your virtual environment:"),Vn=p(),u(ut.$$.fragment),Xn=p(),vt=r("li"),N=r("p"),Jn=n("Use the "),_t=r("a"),zo=r("code"),Kn=n("hf_hub_download"),Qn=n(" function to download a file to a specific path. For example, the following command downloads the "),Yo=r("code"),Zn=n("config.json"),es=n(" file from the "),$t=r("a"),ts=n("T0"),os=n(" model to your desired path:"),rs=p(),u(wt.$$.fragment),Br=p(),Xt=r("p"),ls=n("Once your file is downloaded and locally cached, specify it\u2019s local path to load and use it:"),Wr=p(),u(gt.$$.fragment),Gr=p(),u(ue.$$.fragment),this.h()},l(e){const i=Hi('[data-svelte="svelte-1phssyn"]',document.head);d=l(i,"META",{name:!0,content:!0}),i.forEach(o),T=h(e),c=l(e,"H1",{class:!0});var yt=a(c);E=l(yt,"A",{id:!0,class:!0,href:!0});var Uo=a(E);k=l(Uo,"SPAN",{});var Bo=a(k);v(y.$$.fragment,Bo),Bo.forEach(o),Uo.forEach(o),b=h(yt),A=l(yt,"SPAN",{});var Wo=a(A);F=s(Wo,"Installation"),Wo.forEach(o),yt.forEach(o),S=h(e),x=l(e,"P",{});var fs=a(x);j=s(fs,"Install \u{1F917} Transformers for whichever deep learning library you\u2019re working with, setup your cache, and optionally configure \u{1F917} Transformers to run offline."),fs.forEach(o),O=h(e),Y=l(e,"P",{});var ps=a(Y);Et=s(ps,"\u{1F917} Transformers is tested on Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, and Flax. Follow the installation instructions below for the deep learning library you are using:"),ps.forEach(o),we=h(e),H=l(e,"UL",{});var Jt=a(H);bt=l(Jt,"LI",{});var as=a(bt);ge=l(as,"A",{href:!0,rel:!0});var hs=a(ge);Tl=s(hs,"PyTorch"),hs.forEach(o),kl=s(as," installation instructions."),as.forEach(o),Pl=h(Jt),Tt=l(Jt,"LI",{});var ns=a(Tt);ye=l(ns,"A",{href:!0,rel:!0});var ms=a(ye);Al=s(ms,"TensorFlow 2.0"),ms.forEach(o),Cl=s(ns," installation instructions."),ns.forEach(o),Sl=h(Jt),kt=l(Jt,"LI",{});var ss=a(kt);Ee=l(ss,"A",{href:!0,rel:!0});var ds=a(Ee);Fl=s(ds,"Flax"),ds.forEach(o),Ol=s(ss," installation instructions."),ss.forEach(o),Jt.forEach(o),Go=h(e),U=l(e,"H2",{class:!0});var Xr=a(U);te=l(Xr,"A",{id:!0,class:!0,href:!0});var cs=a(te);ao=l(cs,"SPAN",{});var us=a(ao);v(be.$$.fragment,us),us.forEach(o),cs.forEach(o),Il=h(Xr),no=l(Xr,"SPAN",{});var vs=a(no);xl=s(vs,"Install with pip"),vs.forEach(o),Xr.forEach(o),Vo=h(e),D=l(e,"P",{});var Kt=a(D);jl=s(Kt,"You should install \u{1F917} Transformers in a "),Te=l(Kt,"A",{href:!0,rel:!0});var _s=a(Te);Rl=s(_s,"virtual environment"),_s.forEach(o),Ml=s(Kt,". If you\u2019re unfamiliar with Python virtual environments, take a look at this "),ke=l(Kt,"A",{href:!0,rel:!0});var $s=a(ke);Nl=s($s,"guide"),$s.forEach(o),Hl=s(Kt,". A virtual environment makes it easier to manage different projects, and avoid compatibility issues between dependencies."),Kt.forEach(o),Xo=h(e),Pt=l(e,"P",{});var ws=a(Pt);Dl=s(ws,"Start by creating a virtual environment in your project directory:"),ws.forEach(o),Jo=h(e),v(Pe.$$.fragment,e),Ko=h(e),At=l(e,"P",{});var gs=a(At);ql=s(gs,"Activate the virtual environment:"),gs.forEach(o),Qo=h(e),v(Ae.$$.fragment,e),Zo=h(e),Ct=l(e,"P",{});var ys=a(Ct);Ll=s(ys,"Now you\u2019re ready to install \u{1F917} Transformers with the following command:"),ys.forEach(o),er=h(e),v(Ce.$$.fragment,e),tr=h(e),St=l(e,"P",{});var Es=a(St);zl=s(Es,"For CPU-support only, you can conveniently install \u{1F917} Transformers and a deep learning library in one line. For example, install \u{1F917} Transformers and PyTorch with:"),Es.forEach(o),or=h(e),v(Se.$$.fragment,e),rr=h(e),Ft=l(e,"P",{});var bs=a(Ft);Yl=s(bs,"\u{1F917} Transformers and TensorFlow 2.0:"),bs.forEach(o),lr=h(e),v(Fe.$$.fragment,e),ar=h(e),Ot=l(e,"P",{});var Ts=a(Ot);Ul=s(Ts,"\u{1F917} Transformers and Flax:"),Ts.forEach(o),nr=h(e),v(Oe.$$.fragment,e),sr=h(e),It=l(e,"P",{});var ks=a(It);Bl=s(ks,"Finally, check if \u{1F917} Transformers has been properly installed by running the following command. It will download a pretrained model:"),ks.forEach(o),ir=h(e),v(Ie.$$.fragment,e),fr=h(e),xt=l(e,"P",{});var Ps=a(xt);Wl=s(Ps,"Then print out the label and score:"),Ps.forEach(o),pr=h(e),v(xe.$$.fragment,e),hr=h(e),B=l(e,"H2",{class:!0});var Jr=a(B);oe=l(Jr,"A",{id:!0,class:!0,href:!0});var As=a(oe);so=l(As,"SPAN",{});var Cs=a(so);v(je.$$.fragment,Cs),Cs.forEach(o),As.forEach(o),Gl=h(Jr),io=l(Jr,"SPAN",{});var Ss=a(io);Vl=s(Ss,"Install from source"),Ss.forEach(o),Jr.forEach(o),mr=h(e),jt=l(e,"P",{});var Fs=a(jt);Xl=s(Fs,"Install \u{1F917} Transformers from source with the following command:"),Fs.forEach(o),dr=h(e),v(Re.$$.fragment,e),cr=h(e),C=l(e,"P",{});var I=a(C);Jl=s(I,"This command installs the bleeding edge "),fo=l(I,"CODE",{});var Os=a(fo);Kl=s(Os,"master"),Os.forEach(o),Ql=s(I," version rather than the latest "),po=l(I,"CODE",{});var Is=a(po);Zl=s(Is,"stable"),Is.forEach(o),ea=s(I," version. The "),ho=l(I,"CODE",{});var xs=a(ho);ta=s(xs,"master"),xs.forEach(o),oa=s(I," version is useful for staying up-to-date with the latest developments. For instance, if a bug has been fixed since the last official release but a new release hasn\u2019t been rolled out yet. However, this means the "),mo=l(I,"CODE",{});var js=a(mo);ra=s(js,"master"),js.forEach(o),la=s(I," version may not always be stable. We strive to keep the "),co=l(I,"CODE",{});var Rs=a(co);aa=s(Rs,"master"),Rs.forEach(o),na=s(I," version operational, and most issues are usually resolved within a few hours or a day. If you run into a problem, please open an "),Me=l(I,"A",{href:!0,rel:!0});var Ms=a(Me);sa=s(Ms,"Issue"),Ms.forEach(o),ia=s(I," so we can fix it even sooner!"),I.forEach(o),ur=h(e),Rt=l(e,"P",{});var Ns=a(Rt);fa=s(Ns,"Check if \u{1F917} Transformers has been properly installed by running the following command:"),Ns.forEach(o),vr=h(e),v(Ne.$$.fragment,e),_r=h(e),W=l(e,"H2",{class:!0});var Kr=a(W);re=l(Kr,"A",{id:!0,class:!0,href:!0});var Hs=a(re);uo=l(Hs,"SPAN",{});var Ds=a(uo);v(He.$$.fragment,Ds),Ds.forEach(o),Hs.forEach(o),pa=h(Kr),vo=l(Kr,"SPAN",{});var qs=a(vo);ha=s(qs,"Editable install"),qs.forEach(o),Kr.forEach(o),$r=h(e),Mt=l(e,"P",{});var Ls=a(Mt);ma=s(Ls,"You will need an editable install if you\u2019d like to:"),Ls.forEach(o),wr=h(e),le=l(e,"UL",{});var Qr=a(le);De=l(Qr,"LI",{});var Zr=a(De);da=s(Zr,"Use the "),_o=l(Zr,"CODE",{});var zs=a(_o);ca=s(zs,"master"),zs.forEach(o),ua=s(Zr," version of the source code."),Zr.forEach(o),va=h(Qr),$o=l(Qr,"LI",{});var Ys=a($o);_a=s(Ys,"Contribute to \u{1F917} Transformers and need to test changes in the code."),Ys.forEach(o),Qr.forEach(o),gr=h(e),Nt=l(e,"P",{});var Us=a(Nt);$a=s(Us,"Clone the repository and install \u{1F917} Transformers with the following commands:"),Us.forEach(o),yr=h(e),v(qe.$$.fragment,e),Er=h(e),q=l(e,"P",{});var Qt=a(q);wa=s(Qt,"These commands will link the folder you cloned the repository to and your Python library paths. Python will now look inside the folder you cloned to in addition to the normal library paths. For example, if your Python packages are typically installed in "),wo=l(Qt,"CODE",{});var Bs=a(wo);ga=s(Bs,"~/anaconda3/envs/main/lib/python3.7/site-packages/"),Bs.forEach(o),ya=s(Qt,", Python will also search the folder you cloned to: "),go=l(Qt,"CODE",{});var Ws=a(go);Ea=s(Ws,"~/transformers/"),Ws.forEach(o),ba=s(Qt,"."),Qt.forEach(o),br=h(e),v(ae.$$.fragment,e),Tr=h(e),Ht=l(e,"P",{});var Gs=a(Ht);Ta=s(Gs,"Now you can easily update your clone to the latest version of \u{1F917} Transformers with the following command:"),Gs.forEach(o),kr=h(e),v(Le.$$.fragment,e),Pr=h(e),ne=l(e,"P",{});var el=a(ne);ka=s(el,"Your Python environment will find the "),yo=l(el,"CODE",{});var Vs=a(yo);Pa=s(Vs,"master"),Vs.forEach(o),Aa=s(el," version of \u{1F917} Transformers on the next run."),el.forEach(o),Ar=h(e),G=l(e,"H2",{class:!0});var tl=a(G);se=l(tl,"A",{id:!0,class:!0,href:!0});var Xs=a(se);Eo=l(Xs,"SPAN",{});var Js=a(Eo);v(ze.$$.fragment,Js),Js.forEach(o),Xs.forEach(o),Ca=h(tl),bo=l(tl,"SPAN",{});var Ks=a(bo);Sa=s(Ks,"Install with conda"),Ks.forEach(o),tl.forEach(o),Cr=h(e),ie=l(e,"P",{});var ol=a(ie);Fa=s(ol,"Install from the conda channel "),To=l(ol,"CODE",{});var Qs=a(To);Oa=s(Qs,"huggingface"),Qs.forEach(o),Ia=s(ol,":"),ol.forEach(o),Sr=h(e),v(Ye.$$.fragment,e),Fr=h(e),V=l(e,"H2",{class:!0});var rl=a(V);fe=l(rl,"A",{id:!0,class:!0,href:!0});var Zs=a(fe);ko=l(Zs,"SPAN",{});var ei=a(ko);v(Ue.$$.fragment,ei),ei.forEach(o),Zs.forEach(o),xa=h(rl),Po=l(rl,"SPAN",{});var ti=a(Po);ja=s(ti,"Cache setup"),ti.forEach(o),rl.forEach(o),Or=h(e),R=l(e,"P",{});var ve=a(R);Ra=s(ve,"Pretrained models are downloaded and locally cached at: "),Ao=l(ve,"CODE",{});var oi=a(Ao);Ma=s(oi,"~/.cache/huggingface/transformers/"),oi.forEach(o),Na=s(ve,". This is the default directory given by the shell environment variable "),Co=l(ve,"CODE",{});var ri=a(Co);Ha=s(ri,"TRANSFORMERS_CACHE"),ri.forEach(o),Da=s(ve,". On Windows, the default directory is given by "),So=l(ve,"CODE",{});var li=a(So);qa=s(li,"C:\\Users\\username\\.cache\\huggingface\\transformers"),li.forEach(o),La=s(ve,". You can change the shell environment variables shown below - in order of priority - to specify a different cache directory:"),ve.forEach(o),Ir=h(e),L=l(e,"OL",{});var Zt=a(L);Be=l(Zt,"LI",{});var ll=a(Be);za=s(ll,"Shell environment variable (default): "),Fo=l(ll,"CODE",{});var ai=a(Fo);Ya=s(ai,"TRANSFORMERS_CACHE"),ai.forEach(o),Ua=s(ll,"."),ll.forEach(o),Ba=h(Zt),X=l(Zt,"LI",{});var eo=a(X);Wa=s(eo,"Shell environment variable: "),Oo=l(eo,"CODE",{});var ni=a(Oo);Ga=s(ni,"HF_HOME"),ni.forEach(o),Va=s(eo," + "),Io=l(eo,"CODE",{});var si=a(Io);Xa=s(si,"transformers/"),si.forEach(o),Ja=s(eo,"."),eo.forEach(o),Ka=h(Zt),J=l(Zt,"LI",{});var to=a(J);Qa=s(to,"Shell environment variable: "),xo=l(to,"CODE",{});var ii=a(xo);Za=s(ii,"XDG_CACHE_HOME"),ii.forEach(o),en=s(to," + "),jo=l(to,"CODE",{});var fi=a(jo);tn=s(fi,"/huggingface/transformers"),fi.forEach(o),on=s(to,"."),to.forEach(o),Zt.forEach(o),xr=h(e),v(pe.$$.fragment,e),jr=h(e),K=l(e,"H2",{class:!0});var al=a(K);he=l(al,"A",{id:!0,class:!0,href:!0});var pi=a(he);Ro=l(pi,"SPAN",{});var hi=a(Ro);v(We.$$.fragment,hi),hi.forEach(o),pi.forEach(o),rn=h(al),Mo=l(al,"SPAN",{});var mi=a(Mo);ln=s(mi,"Offline mode"),mi.forEach(o),al.forEach(o),Rr=h(e),me=l(e,"P",{});var nl=a(me);an=s(nl,"\u{1F917} Transformers is able to run in a firewalled or offline environment by only using local files. Set the environment variable "),No=l(nl,"CODE",{});var di=a(No);nn=s(di,"TRANSFORMERS_OFFLINE=1"),di.forEach(o),sn=s(nl," to enable this behavior."),nl.forEach(o),Mr=h(e),v(de.$$.fragment,e),Nr=h(e),Dt=l(e,"P",{});var ci=a(Dt);fn=s(ci,"For example, you would typically run a program on a normal network firewalled to external instances with the following command:"),ci.forEach(o),Hr=h(e),v(Ge.$$.fragment,e),Dr=h(e),qt=l(e,"P",{});var ui=a(qt);pn=s(ui,"Run this same program in an offline instance with:"),ui.forEach(o),qr=h(e),v(Ve.$$.fragment,e),Lr=h(e),Lt=l(e,"P",{});var vi=a(Lt);hn=s(vi,"The script should now run without hanging or waiting to timeout because it knows it should only look for local files."),vi.forEach(o),zr=h(e),Q=l(e,"H3",{class:!0});var sl=a(Q);ce=l(sl,"A",{id:!0,class:!0,href:!0});var _i=a(ce);Ho=l(_i,"SPAN",{});var $i=a(Ho);v(Xe.$$.fragment,$i),$i.forEach(o),_i.forEach(o),mn=h(sl),Do=l(sl,"SPAN",{});var wi=a(Do);dn=s(wi,"Fetch models and tokenizers to use offline"),wi.forEach(o),sl.forEach(o),Yr=h(e),zt=l(e,"P",{});var gi=a(zt);cn=s(gi,"Another option for using \u{1F917} Transformers offline is to download the files ahead of time, and then point to their local path when you need to use them offline. There are three ways to do this:"),gi.forEach(o),Ur=h(e),z=l(e,"UL",{});var oo=a(z);Je=l(oo,"LI",{});var il=a(Je);Ke=l(il,"P",{});var fl=a(Ke);un=s(fl,"Download a file through the user interface on the "),Qe=l(fl,"A",{href:!0,rel:!0});var yi=a(Qe);vn=s(yi,"Model Hub"),yi.forEach(o),_n=s(fl," by clicking on the \u2193 icon."),fl.forEach(o),$n=h(il),qo=l(il,"P",{});var Ei=a(qo);Yt=l(Ei,"IMG",{src:!0,alt:!0}),Ei.forEach(o),il.forEach(o),wn=h(oo),Ze=l(oo,"LI",{});var pl=a(Ze);Z=l(pl,"P",{});var ro=a(Z);gn=s(ro,"Use the "),Ut=l(ro,"A",{href:!0});var bi=a(Ut);yn=s(bi,"PreTrainedModel.from_pretrained()"),bi.forEach(o),En=s(ro," and "),Bt=l(ro,"A",{href:!0});var Ti=a(Bt);bn=s(Ti,"PreTrainedModel.save_pretrained()"),Ti.forEach(o),Tn=s(ro," workflow:"),ro.forEach(o),kn=h(pl),ee=l(pl,"OL",{});var lo=a(ee);et=l(lo,"LI",{});var hl=a(et);tt=l(hl,"P",{});var ml=a(tt);Pn=s(ml,"Download your files ahead of time with "),Wt=l(ml,"A",{href:!0});var ki=a(Wt);An=s(ki,"PreTrainedModel.from_pretrained()"),ki.forEach(o),Cn=s(ml,":"),ml.forEach(o),Sn=h(hl),v(ot.$$.fragment,hl),hl.forEach(o),Fn=h(lo),rt=l(lo,"LI",{});var dl=a(rt);lt=l(dl,"P",{});var cl=a(lt);On=s(cl,"Save your files to a specified directory with "),Gt=l(cl,"A",{href:!0});var Pi=a(Gt);In=s(Pi,"PreTrainedModel.save_pretrained()"),Pi.forEach(o),xn=s(cl,":"),cl.forEach(o),jn=h(dl),v(at.$$.fragment,dl),dl.forEach(o),Rn=h(lo),nt=l(lo,"LI",{});var ul=a(nt);st=l(ul,"P",{});var vl=a(st);Mn=s(vl,"Now when you\u2019re offline, reload your files with "),Vt=l(vl,"A",{href:!0});var Ai=a(Vt);Nn=s(Ai,"PreTrainedModel.from_pretrained()"),Ai.forEach(o),Hn=s(vl," from the specified directory:"),vl.forEach(o),Dn=h(ul),v(it.$$.fragment,ul),ul.forEach(o),lo.forEach(o),pl.forEach(o),qn=h(oo),ft=l(oo,"LI",{});var _l=a(ft);pt=l(_l,"P",{});var $l=a(pt);Ln=s($l,"Programmatically download files with the "),ht=l($l,"A",{href:!0,rel:!0});var Ci=a(ht);zn=s(Ci,"huggingface_hub"),Ci.forEach(o),Yn=s($l," library:"),$l.forEach(o),Un=h(_l),mt=l(_l,"OL",{});var wl=a(mt);dt=l(wl,"LI",{});var gl=a(dt);ct=l(gl,"P",{});var yl=a(ct);Bn=s(yl,"Install the "),Lo=l(yl,"CODE",{});var Si=a(Lo);Wn=s(Si,"huggingface_hub"),Si.forEach(o),Gn=s(yl," library in your virtual environment:"),yl.forEach(o),Vn=h(gl),v(ut.$$.fragment,gl),gl.forEach(o),Xn=h(wl),vt=l(wl,"LI",{});var El=a(vt);N=l(El,"P",{});var _e=a(N);Jn=s(_e,"Use the "),_t=l(_e,"A",{href:!0,rel:!0});var Fi=a(_t);zo=l(Fi,"CODE",{});var Oi=a(zo);Kn=s(Oi,"hf_hub_download"),Oi.forEach(o),Fi.forEach(o),Qn=s(_e," function to download a file to a specific path. For example, the following command downloads the "),Yo=l(_e,"CODE",{});var Ii=a(Yo);Zn=s(Ii,"config.json"),Ii.forEach(o),es=s(_e," file from the "),$t=l(_e,"A",{href:!0,rel:!0});var xi=a($t);ts=s(xi,"T0"),xi.forEach(o),os=s(_e," model to your desired path:"),_e.forEach(o),rs=h(El),v(wt.$$.fragment,El),El.forEach(o),wl.forEach(o),_l.forEach(o),oo.forEach(o),Br=h(e),Xt=l(e,"P",{});var ji=a(Xt);ls=s(ji,"Once your file is downloaded and locally cached, specify it\u2019s local path to load and use it:"),ji.forEach(o),Wr=h(e),v(gt.$$.fragment,e),Gr=h(e),v(ue.$$.fragment,e),this.h()},h(){m(d,"name","hf:doc:metadata"),m(d,"content",JSON.stringify(Bi)),m(E,"id","installation"),m(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(E,"href","#installation"),m(c,"class","relative group"),m(ge,"href","https://pytorch.org/get-started/locally/"),m(ge,"rel","nofollow"),m(ye,"href","https://www.tensorflow.org/install/pip"),m(ye,"rel","nofollow"),m(Ee,"href","https://flax.readthedocs.io/en/latest/"),m(Ee,"rel","nofollow"),m(te,"id","install-with-pip"),m(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(te,"href","#install-with-pip"),m(U,"class","relative group"),m(Te,"href","https://docs.python.org/3/library/venv.html"),m(Te,"rel","nofollow"),m(ke,"href","https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/"),m(ke,"rel","nofollow"),m(oe,"id","install-from-source"),m(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(oe,"href","#install-from-source"),m(B,"class","relative group"),m(Me,"href","https://github.com/huggingface/transformers/issues"),m(Me,"rel","nofollow"),m(re,"id","editable-install"),m(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(re,"href","#editable-install"),m(W,"class","relative group"),m(se,"id","install-with-conda"),m(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(se,"href","#install-with-conda"),m(G,"class","relative group"),m(fe,"id","cache-setup"),m(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(fe,"href","#cache-setup"),m(V,"class","relative group"),m(he,"id","offline-mode"),m(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(he,"href","#offline-mode"),m(K,"class","relative group"),m(ce,"id","fetch-models-and-tokenizers-to-use-offline"),m(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ce,"href","#fetch-models-and-tokenizers-to-use-offline"),m(Q,"class","relative group"),m(Qe,"href","https://huggingface.co/models"),m(Qe,"rel","nofollow"),Di(Yt.src,is="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png")||m(Yt,"src",is),m(Yt,"alt","download-icon"),m(Ut,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),m(Bt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained"),m(Wt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),m(Gt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained"),m(Vt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),m(ht,"href","https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub"),m(ht,"rel","nofollow"),m(_t,"href","https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub"),m(_t,"rel","nofollow"),m($t,"href","https://huggingface.co/bigscience/T0_3B"),m($t,"rel","nofollow")},m(e,i){t(document.head,d),f(e,T,i),f(e,c,i),t(c,E),t(E,k),_(y,k,null),t(c,b),t(c,A),t(A,F),f(e,S,i),f(e,x,i),t(x,j),f(e,O,i),f(e,Y,i),t(Y,Et),f(e,we,i),f(e,H,i),t(H,bt),t(bt,ge),t(ge,Tl),t(bt,kl),t(H,Pl),t(H,Tt),t(Tt,ye),t(ye,Al),t(Tt,Cl),t(H,Sl),t(H,kt),t(kt,Ee),t(Ee,Fl),t(kt,Ol),f(e,Go,i),f(e,U,i),t(U,te),t(te,ao),_(be,ao,null),t(U,Il),t(U,no),t(no,xl),f(e,Vo,i),f(e,D,i),t(D,jl),t(D,Te),t(Te,Rl),t(D,Ml),t(D,ke),t(ke,Nl),t(D,Hl),f(e,Xo,i),f(e,Pt,i),t(Pt,Dl),f(e,Jo,i),_(Pe,e,i),f(e,Ko,i),f(e,At,i),t(At,ql),f(e,Qo,i),_(Ae,e,i),f(e,Zo,i),f(e,Ct,i),t(Ct,Ll),f(e,er,i),_(Ce,e,i),f(e,tr,i),f(e,St,i),t(St,zl),f(e,or,i),_(Se,e,i),f(e,rr,i),f(e,Ft,i),t(Ft,Yl),f(e,lr,i),_(Fe,e,i),f(e,ar,i),f(e,Ot,i),t(Ot,Ul),f(e,nr,i),_(Oe,e,i),f(e,sr,i),f(e,It,i),t(It,Bl),f(e,ir,i),_(Ie,e,i),f(e,fr,i),f(e,xt,i),t(xt,Wl),f(e,pr,i),_(xe,e,i),f(e,hr,i),f(e,B,i),t(B,oe),t(oe,so),_(je,so,null),t(B,Gl),t(B,io),t(io,Vl),f(e,mr,i),f(e,jt,i),t(jt,Xl),f(e,dr,i),_(Re,e,i),f(e,cr,i),f(e,C,i),t(C,Jl),t(C,fo),t(fo,Kl),t(C,Ql),t(C,po),t(po,Zl),t(C,ea),t(C,ho),t(ho,ta),t(C,oa),t(C,mo),t(mo,ra),t(C,la),t(C,co),t(co,aa),t(C,na),t(C,Me),t(Me,sa),t(C,ia),f(e,ur,i),f(e,Rt,i),t(Rt,fa),f(e,vr,i),_(Ne,e,i),f(e,_r,i),f(e,W,i),t(W,re),t(re,uo),_(He,uo,null),t(W,pa),t(W,vo),t(vo,ha),f(e,$r,i),f(e,Mt,i),t(Mt,ma),f(e,wr,i),f(e,le,i),t(le,De),t(De,da),t(De,_o),t(_o,ca),t(De,ua),t(le,va),t(le,$o),t($o,_a),f(e,gr,i),f(e,Nt,i),t(Nt,$a),f(e,yr,i),_(qe,e,i),f(e,Er,i),f(e,q,i),t(q,wa),t(q,wo),t(wo,ga),t(q,ya),t(q,go),t(go,Ea),t(q,ba),f(e,br,i),_(ae,e,i),f(e,Tr,i),f(e,Ht,i),t(Ht,Ta),f(e,kr,i),_(Le,e,i),f(e,Pr,i),f(e,ne,i),t(ne,ka),t(ne,yo),t(yo,Pa),t(ne,Aa),f(e,Ar,i),f(e,G,i),t(G,se),t(se,Eo),_(ze,Eo,null),t(G,Ca),t(G,bo),t(bo,Sa),f(e,Cr,i),f(e,ie,i),t(ie,Fa),t(ie,To),t(To,Oa),t(ie,Ia),f(e,Sr,i),_(Ye,e,i),f(e,Fr,i),f(e,V,i),t(V,fe),t(fe,ko),_(Ue,ko,null),t(V,xa),t(V,Po),t(Po,ja),f(e,Or,i),f(e,R,i),t(R,Ra),t(R,Ao),t(Ao,Ma),t(R,Na),t(R,Co),t(Co,Ha),t(R,Da),t(R,So),t(So,qa),t(R,La),f(e,Ir,i),f(e,L,i),t(L,Be),t(Be,za),t(Be,Fo),t(Fo,Ya),t(Be,Ua),t(L,Ba),t(L,X),t(X,Wa),t(X,Oo),t(Oo,Ga),t(X,Va),t(X,Io),t(Io,Xa),t(X,Ja),t(L,Ka),t(L,J),t(J,Qa),t(J,xo),t(xo,Za),t(J,en),t(J,jo),t(jo,tn),t(J,on),f(e,xr,i),_(pe,e,i),f(e,jr,i),f(e,K,i),t(K,he),t(he,Ro),_(We,Ro,null),t(K,rn),t(K,Mo),t(Mo,ln),f(e,Rr,i),f(e,me,i),t(me,an),t(me,No),t(No,nn),t(me,sn),f(e,Mr,i),_(de,e,i),f(e,Nr,i),f(e,Dt,i),t(Dt,fn),f(e,Hr,i),_(Ge,e,i),f(e,Dr,i),f(e,qt,i),t(qt,pn),f(e,qr,i),_(Ve,e,i),f(e,Lr,i),f(e,Lt,i),t(Lt,hn),f(e,zr,i),f(e,Q,i),t(Q,ce),t(ce,Ho),_(Xe,Ho,null),t(Q,mn),t(Q,Do),t(Do,dn),f(e,Yr,i),f(e,zt,i),t(zt,cn),f(e,Ur,i),f(e,z,i),t(z,Je),t(Je,Ke),t(Ke,un),t(Ke,Qe),t(Qe,vn),t(Ke,_n),t(Je,$n),t(Je,qo),t(qo,Yt),t(z,wn),t(z,Ze),t(Ze,Z),t(Z,gn),t(Z,Ut),t(Ut,yn),t(Z,En),t(Z,Bt),t(Bt,bn),t(Z,Tn),t(Ze,kn),t(Ze,ee),t(ee,et),t(et,tt),t(tt,Pn),t(tt,Wt),t(Wt,An),t(tt,Cn),t(et,Sn),_(ot,et,null),t(ee,Fn),t(ee,rt),t(rt,lt),t(lt,On),t(lt,Gt),t(Gt,In),t(lt,xn),t(rt,jn),_(at,rt,null),t(ee,Rn),t(ee,nt),t(nt,st),t(st,Mn),t(st,Vt),t(Vt,Nn),t(st,Hn),t(nt,Dn),_(it,nt,null),t(z,qn),t(z,ft),t(ft,pt),t(pt,Ln),t(pt,ht),t(ht,zn),t(pt,Yn),t(ft,Un),t(ft,mt),t(mt,dt),t(dt,ct),t(ct,Bn),t(ct,Lo),t(Lo,Wn),t(ct,Gn),t(dt,Vn),_(ut,dt,null),t(mt,Xn),t(mt,vt),t(vt,N),t(N,Jn),t(N,_t),t(_t,zo),t(zo,Kn),t(N,Qn),t(N,Yo),t(Yo,Zn),t(N,es),t(N,$t),t($t,ts),t(N,os),t(vt,rs),_(wt,vt,null),f(e,Br,i),f(e,Xt,i),t(Xt,ls),f(e,Wr,i),_(gt,e,i),f(e,Gr,i),_(ue,e,i),Vr=!0},p(e,[i]){const yt={};i&2&&(yt.$$scope={dirty:i,ctx:e}),ae.$set(yt);const Uo={};i&2&&(Uo.$$scope={dirty:i,ctx:e}),pe.$set(Uo);const Bo={};i&2&&(Bo.$$scope={dirty:i,ctx:e}),de.$set(Bo);const Wo={};i&2&&(Wo.$$scope={dirty:i,ctx:e}),ue.$set(Wo)},i(e){Vr||($(y.$$.fragment,e),$(be.$$.fragment,e),$(Pe.$$.fragment,e),$(Ae.$$.fragment,e),$(Ce.$$.fragment,e),$(Se.$$.fragment,e),$(Fe.$$.fragment,e),$(Oe.$$.fragment,e),$(Ie.$$.fragment,e),$(xe.$$.fragment,e),$(je.$$.fragment,e),$(Re.$$.fragment,e),$(Ne.$$.fragment,e),$(He.$$.fragment,e),$(qe.$$.fragment,e),$(ae.$$.fragment,e),$(Le.$$.fragment,e),$(ze.$$.fragment,e),$(Ye.$$.fragment,e),$(Ue.$$.fragment,e),$(pe.$$.fragment,e),$(We.$$.fragment,e),$(de.$$.fragment,e),$(Ge.$$.fragment,e),$(Ve.$$.fragment,e),$(Xe.$$.fragment,e),$(ot.$$.fragment,e),$(at.$$.fragment,e),$(it.$$.fragment,e),$(ut.$$.fragment,e),$(wt.$$.fragment,e),$(gt.$$.fragment,e),$(ue.$$.fragment,e),Vr=!0)},o(e){w(y.$$.fragment,e),w(be.$$.fragment,e),w(Pe.$$.fragment,e),w(Ae.$$.fragment,e),w(Ce.$$.fragment,e),w(Se.$$.fragment,e),w(Fe.$$.fragment,e),w(Oe.$$.fragment,e),w(Ie.$$.fragment,e),w(xe.$$.fragment,e),w(je.$$.fragment,e),w(Re.$$.fragment,e),w(Ne.$$.fragment,e),w(He.$$.fragment,e),w(qe.$$.fragment,e),w(ae.$$.fragment,e),w(Le.$$.fragment,e),w(ze.$$.fragment,e),w(Ye.$$.fragment,e),w(Ue.$$.fragment,e),w(pe.$$.fragment,e),w(We.$$.fragment,e),w(de.$$.fragment,e),w(Ge.$$.fragment,e),w(Ve.$$.fragment,e),w(Xe.$$.fragment,e),w(ot.$$.fragment,e),w(at.$$.fragment,e),w(it.$$.fragment,e),w(ut.$$.fragment,e),w(wt.$$.fragment,e),w(gt.$$.fragment,e),w(ue.$$.fragment,e),Vr=!1},d(e){o(d),e&&o(T),e&&o(c),g(y),e&&o(S),e&&o(x),e&&o(O),e&&o(Y),e&&o(we),e&&o(H),e&&o(Go),e&&o(U),g(be),e&&o(Vo),e&&o(D),e&&o(Xo),e&&o(Pt),e&&o(Jo),g(Pe,e),e&&o(Ko),e&&o(At),e&&o(Qo),g(Ae,e),e&&o(Zo),e&&o(Ct),e&&o(er),g(Ce,e),e&&o(tr),e&&o(St),e&&o(or),g(Se,e),e&&o(rr),e&&o(Ft),e&&o(lr),g(Fe,e),e&&o(ar),e&&o(Ot),e&&o(nr),g(Oe,e),e&&o(sr),e&&o(It),e&&o(ir),g(Ie,e),e&&o(fr),e&&o(xt),e&&o(pr),g(xe,e),e&&o(hr),e&&o(B),g(je),e&&o(mr),e&&o(jt),e&&o(dr),g(Re,e),e&&o(cr),e&&o(C),e&&o(ur),e&&o(Rt),e&&o(vr),g(Ne,e),e&&o(_r),e&&o(W),g(He),e&&o($r),e&&o(Mt),e&&o(wr),e&&o(le),e&&o(gr),e&&o(Nt),e&&o(yr),g(qe,e),e&&o(Er),e&&o(q),e&&o(br),g(ae,e),e&&o(Tr),e&&o(Ht),e&&o(kr),g(Le,e),e&&o(Pr),e&&o(ne),e&&o(Ar),e&&o(G),g(ze),e&&o(Cr),e&&o(ie),e&&o(Sr),g(Ye,e),e&&o(Fr),e&&o(V),g(Ue),e&&o(Or),e&&o(R),e&&o(Ir),e&&o(L),e&&o(xr),g(pe,e),e&&o(jr),e&&o(K),g(We),e&&o(Rr),e&&o(me),e&&o(Mr),g(de,e),e&&o(Nr),e&&o(Dt),e&&o(Hr),g(Ge,e),e&&o(Dr),e&&o(qt),e&&o(qr),g(Ve,e),e&&o(Lr),e&&o(Lt),e&&o(zr),e&&o(Q),g(Xe),e&&o(Yr),e&&o(zt),e&&o(Ur),e&&o(z),g(ot),g(at),g(it),g(ut),g(wt),e&&o(Br),e&&o(Xt),e&&o(Wr),g(gt,e),e&&o(Gr),g(ue,e)}}}const Bi={local:"installation",sections:[{local:"install-with-pip",title:"Install with pip"},{local:"install-from-source",title:"Install from source"},{local:"editable-install",title:"Editable install"},{local:"install-with-conda",title:"Install with conda"},{local:"cache-setup",title:"Cache setup"},{local:"offline-mode",sections:[{local:"fetch-models-and-tokenizers-to-use-offline",title:"Fetch models and tokenizers to use offline"}],title:"Offline mode"}],title:"Installation"};function Wi(M,d,T){let{fw:c}=d;return M.$$set=E=>{"fw"in E&&T(0,c=E.fw)},[c]}class Qi extends Ri{constructor(d){super();Mi(this,d,Wi,Ui,Ni,{fw:0})}}export{Qi as default,Bi as metadata};
278
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js
import{S as W,i as X,s as Z,l as A,g,q as j,o as C,d as _,e as k,t as V,k as D,c as y,a as x,h as $,m as S,b as v,F as w,j as L,G as F,H as O,f as m,I as P,J as Q,K as R}from"../chunks/vendor-4833417e.js";import{b as H}from"../chunks/paths-4b3c6e7e.js";function N(n,e,s){const a=n.slice();return a[3]=e[s],a}function T(n,e,s){const a=n.slice();return a[6]=e[s],a}function Y(n,e,s){const a=n.slice();return a[9]=e[s],a}function ee(n){let e,s,a,l,t,i,h,r,o,f,c=n[0],d=[];for(let u=0;u<c.length;u+=1)d[u]=M(N(n,c,u));const q=n[2].default,b=O(q,n,n[1],null);return{c(){e=k("style"),s=V(`body, html { padding: 0; margin: 0; }`),a=D(),l=k("div"),t=k("aside"),i=k("ul");for(let u=0;u<d.length;u+=1)d[u].c();h=D(),r=k("div"),o=k("div"),b&&b.c(),this.h()},l(u){e=y(u,"STYLE",{});var E=x(e);s=$(E,`body, html { padding: 0; margin: 0; }`),E.forEach(_),a=S(u),l=y(u,"DIV",{style:!0});var p=x(l);t=y(p,"ASIDE",{style:!0});var I=x(t);i=y(I,"UL",{class:!0});var G=x(i);for(let U=0;U<d.length;U+=1)d[U].l(G);G.forEach(_),I.forEach(_),h=S(p),r=y(p,"DIV",{style:!0,class:!0});var J=x(r);o=y(J,"DIV",{class:!0});var K=x(o);b&&b.l(K),K.forEach(_),J.forEach(_),p.forEach(_),this.h()},h(){v(i,"class","pt-2 flex flex-col pl-3"),m(t,"width","270px"),m(t,"min-width","270px"),m(t,"max-width","270px"),m(t,"border-right","1px solid gray"),m(t,"height","100vh"),m(t,"position","fixed"),m(t,"overflow-y","auto"),m(t,"display","flex"),m(t,"flex-direction","column"),v(o,"class","prose prose-doc dark:prose-light max-w-4xl mx-auto break-words relative"),m(r,"margin-left","270px"),v(r,"class","px-4 pt-3"),m(l,"width","100vh"),m(l,"height","100vh"),m(l,"margin","0"),m(l,"padding","0"),m(l,"display","flex"),m(l,"flex-direction","row")},m(u,E){g(u,e,E),w(e,s),g(u,a,E),g(u,l,E),w(l,t),w(t,i);for(let p=0;p<d.length;p+=1)d[p].m(i,null);w(l,h),w(l,r),w(r,o),b&&b.m(o,null),f=!0},p(u,E){if(E&1){c=u[0];let p;for(p=0;p<c.length;p+=1){const I=N(u,c,p);d[p]?d[p].p(I,E):(d[p]=M(I),d[p].c(),d[p].m(i,null))}for(;p<d.length;p+=1)d[p].d(1);d.length=c.length}b&&b.p&&(!f||E&2)&&P(b,q,u,u[1],f?R(q,u[1],E,null):Q(u[1]),null)},i(u){f||(j(b,u),f=!0)},o(u){C(b,u),f=!1},d(u){u&&_(e),u&&_(a),u&&_(l),F(d,u),b&&b.d(u)}}}function te(n){let e;const s=n[2].default,a=O(s,n,n[1],null);return{c(){a&&a.c()},l(l){a&&a.l(l)},m(l,t){a&&a.m(l,t),e=!0},p(l,t){a&&a.p&&(!e||t&2)&&P(a,s,l,l[1],e?R(s,l[1],t,null):Q(l[1]),null)},i(l){e||(j(a,l),e=!0)},o(l){C(a,l),e=!1},d(l){a&&a.d(l)}}}function le(n){let e,s=n[6].title+"",a,l;return{c(){e=k("a"),a=V(s),this.h()},l(t){e=y(t,"A",{role:!0,class:!0,href:!0});var i=x(e);a=$(i,s),i.forEach(_),this.h()},h(){v(e,"role","navigation"),v(e,"class","block text-gray-500 pr-2 hover:text-black dark:hover:text-gray-300 py-1 transform transition-all hover:translate-x-px first:mt-1 last:mb-4 pl-2 ml-2"),v(e,"href",l=H+"/"+n[6].local.replace(/\bindex$/,""))},m(t,i){g(t,e,i),w(e,a)},p(t,i){i&1&&s!==(s=t[6].title+"")&&L(a,s),i&1&&l!==(l=H+"/"+t[6].local.replace(/\bindex$/,""))&&v(e,"href",l)},d(t){t&&_(e)}}}function re(n){let e,s=n[6].title+"",a,l,t,i=n[6].sections,h=[];for(let r=0;r<i.length;r+=1)h[r]=z(Y(n,i,r));return{c(){e=k("h3"),a=V(s),l=D(),t=k("ul");for(let r=0;r<h.length;r+=1)h[r].c();this.h()},l(r){e=y(r,"H3",{class:!0});var o=x(e);a=$(o,s),o.forEach(_),l=S(r),t=y(r,"UL",{class:!0});var f=x(t);for(let c=0;c<h.length;c+=1)h[c].l(f);f.forEach(_),this.h()},h(){v(e,"class","flex group-hover:after:content-['\u25B6'] after:absolute after:right-4 after:text-gray-500 after:transition after:duration-100 after:ease-in after:transform after:rotate-90"),v(t,"class","pt-2 flex flex-col pl-3")},m(r,o){g(r,e,o),w(e,a),g(r,l,o),g(r,t,o);for(let f=0;f<h.length;f+=1)h[f].m(t,null)},p(r,o){if(o&1&&s!==(s=r[6].title+"")&&L(a,s),o&1){i=r[6].sections;let f;for(f=0;f<i.length;f+=1){const c=Y(r,i,f);h[f]?h[f].p(c,o):(h[f]=z(c),h[f].c(),h[f].m(t,null))}for(;f<h.length;f+=1)h[f].d(1);h.length=i.length}},d(r){r&&_(e),r&&_(l),r&&_(t),F(h,r)}}}function z(n){let e,s=n[9].title+"",a,l;return{c(){e=k("a"),a=V(s),this.h()},l(t){e=y(t,"A",{role:!0,class:!0,href:!0});var i=x(e);a=$(i,s),i.forEach(_),this.h()},h(){v(e,"role","navigation"),v(e,"class","block text-gray-500 pr-2 hover:text-black dark:hover:text-gray-300 py-1 transform transition-all hover:translate-x-px first:mt-1 last:mb-4 pl-2 ml-2"),v(e,"href",l=H+"/"+n[9].local.replace(/\bindex$/,""))},m(t,i){g(t,e,i),w(e,a)},p(t,i){i&1&&s!==(s=t[9].title+"")&&L(a,s),i&1&&l!==(l=H+"/"+t[9].local.replace(/\bindex$/,""))&&v(e,"href",l)},d(t){t&&_(e)}}}function B(n){let e;function s(t,i){return"sections"in t[6]?re:le}let a=s(n),l=a(n);return{c(){l.c(),e=A()},l(t){l.l(t),e=A()},m(t,i){l.m(t,i),g(t,e,i)},p(t,i){a===(a=s(t))&&l?l.p(t,i):(l.d(1),l=a(t),l&&(l.c(),l.m(e.parentNode,e)))},d(t){l.d(t),t&&_(e)}}}function M(n){let e,s=n[3].title+"",a,l,t,i,h=n[3].sections,r=[];for(let o=0;o<h.length;o+=1)r[o]=B(T(n,h,o));return{c(){e=k("h3"),a=V(s),l=D(),t=k("div");for(let o=0;o<r.length;o+=1)r[o].c();i=D(),this.h()},l(o){e=y(o,"H3",{class:!0});var f=x(e);a=$(f,s),f.forEach(_),l=S(o),t=y(o,"DIV",{class:!0});var c=x(t);for(let d=0;d<r.length;d+=1)r[d].l(c);i=S(c),c.forEach(_),this.h()},h(){v(e,"class","flex group-hover:after:content-['\u25B6'] after:absolute after:right-4 after:text-gray-500 after:transition after:duration-100 after:ease-in after:transform after:rotate-90"),v(t,"class","pl-4")},m(o,f){g(o,e,f),w(e,a),g(o,l,f),g(o,t,f);for(let c=0;c<r.length;c+=1)r[c].m(t,null);w(t,i)},p(o,f){if(f&1&&s!==(s=o[3].title+"")&&L(a,s),f&1){h=o[3].sections;let c;for(c=0;c<h.length;c+=1){const d=T(o,h,c);r[c]?r[c].p(d,f):(r[c]=B(d),r[c].c(),r[c].m(t,i))}for(;c<r.length;c+=1)r[c].d(1);r.length=h.length}},d(o){o&&_(e),o&&_(l),o&&_(t),F(r,o)}}}function ae(n){let e,s,a,l;const t=[te,ee],i=[];function h(r,o){return 0}return e=h(),s=i[e]=t[e](n),{c(){s.c(),a=A()},l(r){s.l(r),a=A()},m(r,o){i[e].m(r,o),g(r,a,o),l=!0},p(r,[o]){s.p(r,o)},i(r){l||(j(s),l=!0)},o(r){C(s),l=!1},d(r){i[e].d(r),r&&_(a)}}}async function ne(n){return{}}function se(n,e,s){let{$$slots:a={},$$scope:l}=e,{toc:t}=e;return n.$$set=i=>{"toc"in i&&s(0,t=i.toc),"$$scope"in i&&s(1,l=i.$$scope)},[t,l,a]}class fe extends W{constructor(e){super();X(this,e,se,ae,Z,{toc:0})}}export{fe as default,ne as load};
279
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/clip.mdx-b328d557.js
import{S as r1,i as a1,s as i1,e as n,k as l,w as g,t as a,M as l1,c as s,d as o,m as d,a as r,x as _,h as i,b as c,F as e,g as m,y as v,q as P,o as T,B as I}from"../../chunks/vendor-4833417e.js";import{T as ce}from"../../chunks/Tip-fffd6df1.js";import{D as w}from"../../chunks/Docstring-4f315ed9.js";import{C as A}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as D}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function d1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function c1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function p1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function h1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function m1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function f1(x){let p,L,u,b,C,y,f,k,Ho,at,M,ue,ye,be,Ko,Ce,Go,Xt,pe,Me,Le,Zt,H,he,je,it,Qt,Fe,ke,Jo,Yt,O,ge,we,$e,Xo,xe,Zo,eo,me,Qo,qe,_e;return{c(){p=n("p"),L=a("TF 2.0 models accepts two formats as inputs:"),u=l(),b=n("ul"),C=n("li"),y=a("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),k=n("li"),Ho=a("having all inputs as a list, tuple or dict in the first positional arguments."),at=l(),M=n("p"),ue=a("This second option is useful when using "),ye=n("code"),be=a("tf.keras.Model.fit"),Ko=a(` method which currently requires having all the tensors in the first argument of the model call function: `),Ce=n("code"),Go=a("model(inputs)"),Xt=a("."),pe=l(),Me=n("p"),Le=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Zt=l(),H=n("ul"),he=n("li"),je=a("a single Tensor with "),it=n("code"),Qt=a("input_ids"),Fe=a(" only and nothing else: "),ke=n("code"),Jo=a("model(input_ids)"),Yt=l(),O=n("li"),ge=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),we=n("code"),$e=a("model([input_ids, attention_mask])"),Xo=a(" or "),xe=n("code"),Zo=a("model([input_ids, attention_mask, token_type_ids])"),eo=l(),me=n("li"),Qo=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),qe=n("code"),_e=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l($){p=s($,"P",{});var j=r(p);L=i(j,"TF 2.0 models accepts two formats as inputs:"),j.forEach(o),u=d($),b=s($,"UL",{});var to=r(b);C=s(to,"LI",{});var Qs=r(C);y=i(Qs,"having all inputs as keyword arguments (like PyTorch models), or"),Qs.forEach(o),f=d(to),k=s(to,"LI",{});var Yo=r(k);Ho=i(Yo,"having all inputs as a list, tuple or dict in the first positional arguments."),Yo.forEach(o),to.forEach(o),at=d($),M=s($,"P",{});var E=r(M);ue=i(E,"This second option is useful when using "),ye=s(E,"CODE",{});var Ys=r(ye);be=i(Ys,"tf.keras.Model.fit"),Ys.forEach(o),Ko=i(E,` method which currently requires having all the tensors in the first argument of the model call function: `),Ce=s(E,"CODE",{});var lt=r(Ce);Go=i(lt,"model(inputs)"),lt.forEach(o),Xt=i(E,"."),E.forEach(o),pe=d($),Me=s($,"P",{});var er=r(Me);Le=i(er,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),er.forEach(o),Zt=d($),H=s($,"UL",{});var De=r(H);he=s(De,"LI",{});var ve=r(he);je=i(ve,"a single Tensor with "),it=s(ve,"CODE",{});var tr=r(it);Qt=i(tr,"input_ids"),tr.forEach(o),Fe=i(ve," only and nothing else: "),ke=s(ve,"CODE",{});var or=r(ke);Jo=i(or,"model(input_ids)"),or.forEach(o),ve.forEach(o),Yt=d(De),O=s(De,"LI",{});var Pe=r(O);ge=i(Pe,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),we=s(Pe,"CODE",{});var nr=r(we);$e=i(nr,"model([input_ids, attention_mask])"),nr.forEach(o),Xo=i(Pe," or "),xe=s(Pe,"CODE",{});var sr=r(xe);Zo=i(sr,"model([input_ids, attention_mask, token_type_ids])"),sr.forEach(o),Pe.forEach(o),eo=d(De),me=s(De,"LI",{});var Oe=r(me);Qo=i(Oe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),qe=s(Oe,"CODE",{});var rr=r(qe);_e=i(rr,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),rr.forEach(o),Oe.forEach(o),De.forEach(o)},m($,j){m($,p,j),e(p,L),m($,u,j),m($,b,j),e(b,C),e(C,y),e(b,f),e(b,k),e(k,Ho),m($,at,j),m($,M,j),e(M,ue),e(M,ye),e(ye,be),e(M,Ko),e(M,Ce),e(Ce,Go),e(M,Xt),m($,pe,j),m($,Me,j),e(Me,Le),m($,Zt,j),m($,H,j),e(H,he),e(he,je),e(he,it),e(it,Qt),e(he,Fe),e(he,ke),e(ke,Jo),e(H,Yt),e(H,O),e(O,ge),e(O,we),e(we,$e),e(O,Xo),e(O,xe),e(xe,Zo),e(H,eo),e(H,me),e(me,Qo),e(me,qe),e(qe,_e)},d($){$&&o(p),$&&o(u),$&&o(b),$&&o(at),$&&o(M),$&&o(pe),$&&o(Me),$&&o(Zt),$&&o(H)}}}function u1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function g1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function _1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function v1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function P1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function T1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function I1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function y1(x){let p,L,u,b,C;return{c(){p=n("p"),L=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=n("code"),b=a("Module"),C=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=s(y,"P",{});var f=r(p);L=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(f,"CODE",{});var k=r(u);b=i(k,"Module"),k.forEach(o),C=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(o)},m(y,f){m(y,p,f),e(p,L),e(p,u),e(u,b),e(p,C)},d(y){y&&o(p)}}}function b1(x){let p,L,u,b,C,y,f,k,Ho,at,M,ue,ye,be,Ko,Ce,Go,Xt,pe,Me,Le,Zt,H,he,je,it,Qt,Fe,ke,Jo,Yt,O,ge,we,$e,Xo,xe,Zo,eo,me,Qo,qe,_e,$,j,to,Qs,Yo,E,Ys,lt,er,De,ve,tr,or,Pe,nr,sr,Oe,rr,xd,ar,Ed,zd,ir,Md,jd,nl,en,sl,Ae,Fd,tn,qd,Dd,on,Od,Ad,rl,dt,oo,pa,nn,Nd,ha,Vd,al,fe,sn,Sd,no,lr,Wd,Bd,dr,Ud,Rd,Hd,ct,Kd,cr,Gd,Jd,pr,Xd,Zd,Qd,so,rn,Yd,an,ec,hr,tc,oc,il,pt,ro,ma,ln,nc,fa,sc,ll,V,dn,rc,ht,ac,mr,ic,lc,cn,dc,cc,pc,mt,hc,fr,mc,fc,ur,uc,gc,_c,ua,vc,Pc,pn,dl,ft,ao,ga,hn,Tc,_a,Ic,cl,S,mn,yc,ut,bc,gr,Cc,Lc,fn,kc,wc,$c,gt,xc,_r,Ec,zc,vr,Mc,jc,Fc,va,qc,Dc,un,pl,_t,io,Pa,gn,Oc,Ta,Ac,hl,F,_n,Nc,Ia,Vc,Sc,vn,Wc,Pr,Bc,Uc,Rc,Te,Pn,Hc,ya,Kc,Gc,ba,Tr,Jc,Ca,Xc,Zc,La,Qc,Yc,lo,Tn,ep,In,tp,ka,op,np,sp,co,yn,rp,wa,ap,ip,Ir,bn,ml,vt,po,$a,Cn,lp,xa,dp,fl,W,Ln,cp,kn,pp,Ea,hp,mp,fp,wn,up,yr,gp,_p,vp,Ie,$n,Pp,za,Tp,Ip,Ma,br,yp,ja,bp,Cp,Fa,Lp,kp,ho,xn,wp,qa,$p,ul,Pt,mo,Da,En,xp,Oa,Ep,gl,B,zn,zp,Aa,Mp,jp,Mn,Fp,Cr,qp,Dp,Op,fo,jn,Ap,Fn,Np,Na,Vp,Sp,Wp,uo,qn,Bp,Tt,Up,Va,Rp,Hp,Sa,Kp,Gp,_l,It,go,Wa,Dn,Jp,Ba,Xp,vl,U,On,Zp,Ua,Qp,Yp,K,Lr,eh,th,kr,oh,nh,wr,sh,rh,Ra,ah,ih,$r,lh,dh,ch,_o,An,ph,Nn,hh,xr,mh,fh,uh,vo,Vn,gh,Sn,_h,Er,vh,Ph,Pl,yt,Po,Ha,Wn,Th,Ka,Ih,Tl,R,Bn,yh,Un,bh,Rn,Ch,Lh,kh,G,Hn,wh,bt,$h,zr,xh,Eh,Ga,zh,Mh,jh,To,Fh,Ja,qh,Dh,Kn,Oh,J,Gn,Ah,Ct,Nh,Mr,Vh,Sh,Xa,Wh,Bh,Uh,Io,Rh,Za,Hh,Kh,Jn,Gh,X,Xn,Jh,Lt,Xh,jr,Zh,Qh,Qa,Yh,em,tm,yo,om,Ya,nm,sm,Zn,Il,kt,bo,ei,Qn,rm,ti,am,yl,wt,Yn,im,Z,es,lm,$t,dm,Fr,cm,pm,oi,hm,mm,fm,Co,um,ni,gm,_m,ts,bl,xt,Lo,si,os,vm,ri,Pm,Cl,Et,ns,Tm,Q,ss,Im,zt,ym,qr,bm,Cm,ai,Lm,km,wm,ko,$m,ii,xm,Em,rs,Ll,Mt,wo,li,as,zm,di,Mm,kl,q,is,jm,ls,Fm,Dr,qm,Dm,Om,ds,Am,cs,Nm,Vm,Sm,$o,Wm,Y,ps,Bm,jt,Um,Or,Rm,Hm,ci,Km,Gm,Jm,xo,Xm,pi,Zm,Qm,hs,Ym,ee,ms,ef,Ft,tf,Ar,of,nf,hi,sf,rf,af,Eo,lf,mi,df,cf,fs,pf,te,us,hf,qt,mf,Nr,ff,uf,fi,gf,_f,vf,zo,Pf,ui,Tf,If,gs,wl,Dt,Mo,gi,_s,yf,_i,bf,$l,Ot,vs,Cf,oe,Ps,Lf,At,kf,Vr,wf,$f,vi,xf,Ef,zf,jo,Mf,Pi,jf,Ff,Ts,xl,Nt,Fo,Ti,Is,qf,Ii,Df,El,Vt,ys,Of,ne,bs,Af,St,Nf,Sr,Vf,Sf,yi,Wf,Bf,Uf,qo,Rf,bi,Hf,Kf,Cs,zl,Wt,Do,Ci,Ls,Gf,Li,Jf,Ml,z,ks,Xf,ws,Zf,Wr,Qf,Yf,eu,$s,tu,xs,ou,nu,su,ki,ru,au,Ee,wi,Es,iu,lu,$i,zs,du,cu,xi,Ms,pu,hu,Ei,js,mu,fu,se,Fs,uu,Bt,gu,zi,_u,vu,Mi,Pu,Tu,Iu,Oo,yu,ji,bu,Cu,qs,Lu,Ne,Ds,ku,Fi,wu,$u,Os,xu,Ve,As,Eu,qi,zu,Mu,Ns,jl,Ut,Ao,Di,Vs,ju,Oi,Fu,Fl,Rt,Ss,qu,re,Ws,Du,Ht,Ou,Ai,Au,Nu,Ni,Vu,Su,Wu,No,Bu,Vi,Uu,Ru,Bs,ql,Kt,Vo,Si,Us,Hu,Wi,Ku,Dl,Gt,Rs,Gu,ae,Hs,Ju,Jt,Xu,Bi,Zu,Qu,Ui,Yu,eg,tg,So,og,Ri,ng,sg,Ks,Ol;return y=new D({}),be=new D({}),$e=new D({}),en=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits_per_image.softmax(dim=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),nn=new D({}),sn=new w({props:{name:"class transformers.CLIPConfig",anchor:"transformers.CLIPConfig",parameters:[{name:"text_config_dict",val:" = None"},{name:"vision_config_dict",val:" = None"},{name:"projection_dim",val:" = 512"},{name:"logit_scale_init_value",val:" = 2.6592"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/configuration_clip.py#L209",parametersDescription:[{anchor:"transformers.CLIPConfig.text_config_dict",description:`<strong>text_config_dict</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Dictionary of configuration options used to initialize <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextConfig">CLIPTextConfig</a>.`,name:"text_config_dict"},{anchor:"transformers.CLIPConfig.vision_config_dict",description:`<strong>vision_config_dict</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Dictionary of configuration options used to initialize <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionConfig">CLIPVisionConfig</a>.`,name:"vision_config_dict"},{anchor:"transformers.CLIPConfig.projection_dim",description:`<strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimentionality of text and vision projection layers.`,name:"projection_dim"},{anchor:"transformers.CLIPConfig.logit_scale_init_value",description:`<strong>logit_scale_init_value</strong> (<code>float</code>, <em>optional</em>, defaults to 2.6592) &#x2014; The inital value of the <em>logit_scale</em> paramter. Default is used as per the original CLIP implementation.`,name:"logit_scale_init_value"},{anchor:"transformers.CLIPConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments.`,name:"kwargs"}]}}),rn=new w({props:{name:"from_text_vision_configs",anchor:"transformers.CLIPConfig.from_text_vision_configs",parameters:[{name:"text_config",val:": CLIPTextConfig"},{name:"vision_config",val:": CLIPVisionConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/configuration_clip.py#L258",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPConfig" >CLIPConfig</a></p> `}}),ln=new D({}),dn=new w({props:{name:"class transformers.CLIPTextConfig",anchor:"transformers.CLIPTextConfig",parameters:[{name:"vocab_size",val:" = 49408"},{name:"hidden_size",val:" = 512"},{name:"intermediate_size",val:" = 2048"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 8"},{name:"max_position_embeddings",val:" = 77"},{name:"hidden_act",val:" = 'quick_gelu'"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"dropout",val:" = 0.0"},{name:"attention_dropout",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"initializer_factor",val:" = 1.0"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/configuration_clip.py#L31",parametersDescription:[{anchor:"transformers.CLIPTextConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 49408) &#x2014; Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel">CLIPModel</a>.`,name:"vocab_size"},{anchor:"transformers.CLIPTextConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.CLIPTextConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.CLIPTextConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.CLIPTextConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.CLIPTextConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 77) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.CLIPTextConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;quick_gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> \`<code>&quot;quick_gelu&quot;</code> are supported. layer_norm_eps (<code>float</code>, <em>optional</em>, defaults to 1e-5): The epsilon used by the layer normalization layers.`,name:"hidden_act"},{anchor:"transformers.CLIPTextConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.CLIPTextConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.CLIPTextConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.CLIPTextConfig.initializer_factor",description:`<strong>initializer_factor</strong> (\`float&#x201C;, <em>optional</em>, defaults to 1) &#x2014; A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing).`,name:"initializer_factor"}]}}),pn=new A({props:{code:`from transformers import CLIPTextModel, CLIPTextConfig # Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration configuration = CLIPTextConfig() # Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration model = CLIPTextModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTextModel, CLIPTextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = CLIPTextConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPTextModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),hn=new D({}),mn=new w({props:{name:"class transformers.CLIPVisionConfig",anchor:"transformers.CLIPVisionConfig",parameters:[{name:"hidden_size",val:" = 768"},{name:"intermediate_size",val:" = 3072"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"image_size",val:" = 224"},{name:"patch_size",val:" = 32"},{name:"hidden_act",val:" = 'quick_gelu'"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"dropout",val:" = 0.0"},{name:"attention_dropout",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"initializer_factor",val:" = 1.0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/configuration_clip.py#L122",parametersDescription:[{anchor:"transformers.CLIPVisionConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.CLIPVisionConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.CLIPVisionConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.CLIPVisionConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.CLIPVisionConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.CLIPVisionConfig.patch_size",description:`<strong>patch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The size (resolution) of each patch.`,name:"patch_size"},{anchor:"transformers.CLIPVisionConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;quick_gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> \`<code>&quot;quick_gelu&quot;</code> are supported. layer_norm_eps (<code>float</code>, <em>optional</em>, defaults to 1e-5): The epsilon used by the layer normalization layers.`,name:"hidden_act"},{anchor:"transformers.CLIPVisionConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.CLIPVisionConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.CLIPVisionConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.CLIPVisionConfig.initializer_factor",description:`<strong>initializer_factor</strong> (\`float&#x201C;, <em>optional</em>, defaults to 1) &#x2014; A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing).`,name:"initializer_factor"}]}}),un=new A({props:{code:`from transformers import CLIPVisionModel, CLIPVisionConfig # Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration configuration = CLIPVisionConfig() # Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration model = CLIPVisionModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPVisionModel, CLIPVisionConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = CLIPVisionConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPVisionModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),gn=new D({}),_n=new w({props:{name:"class transformers.CLIPTokenizer",anchor:"transformers.CLIPTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"unk_token",val:" = '<|endoftext|>'"},{name:"bos_token",val:" = '<|startoftext|>'"},{name:"eos_token",val:" = '<|endoftext|>'"},{name:"pad_token",val:" = '<|endoftext|>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip.py#L100",parametersDescription:[{anchor:"transformers.CLIPTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.CLIPTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.CLIPTokenizer.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.CLIPTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.CLIPTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The beginning of sequence token.`,name:"bos_token"},{anchor:"transformers.CLIPTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The end of sequence token.`,name:"eos_token"}]}}),Pn=new w({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.CLIPTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip.py#L186",parametersDescription:[{anchor:"transformers.CLIPTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.CLIPTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Tn=new w({props:{name:"get_special_tokens_mask",anchor:"transformers.CLIPTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip.py#L213",parametersDescription:[{anchor:"transformers.CLIPTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CLIPTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.CLIPTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),yn=new w({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.CLIPTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip.py#L241",parametersDescription:[{anchor:"transformers.CLIPTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CLIPTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),bn=new w({props:{name:"save_vocabulary",anchor:"transformers.CLIPTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip.py#L336"}}),Cn=new D({}),Ln=new w({props:{name:"class transformers.CLIPTokenizerFast",anchor:"transformers.CLIPTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"unk_token",val:" = '<|endoftext|>'"},{name:"bos_token",val:" = '<|startoftext|>'"},{name:"eos_token",val:" = '<|endoftext|>'"},{name:"pad_token",val:" = '<|endoftext|>'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip_fast.py#L48",parametersDescription:[{anchor:"transformers.CLIPTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.CLIPTokenizerFast.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.CLIPTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.CLIPTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The beginning of sequence token.`,name:"bos_token"},{anchor:"transformers.CLIPTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;|endoftext|&gt;</code>) &#x2014; The end of sequence token.`,name:"eos_token"}]}}),$n=new w({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.CLIPTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip_fast.py#L121",parametersDescription:[{anchor:"transformers.CLIPTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.CLIPTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),xn=new w({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.CLIPTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/tokenization_clip_fast.py#L148",parametersDescription:[{anchor:"transformers.CLIPTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.CLIPTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),En=new D({}),zn=new w({props:{name:"class transformers.CLIPFeatureExtractor",anchor:"transformers.CLIPFeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 224"},{name:"resample",val:" = 3"},{name:"do_center_crop",val:" = True"},{name:"crop_size",val:" = 224"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/feature_extraction_clip.py#L31",parametersDescription:[{anchor:"transformers.CLIPFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input to a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.CLIPFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; Resize the input to the given size. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.CLIPFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BICUBIC</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.CLIPFeatureExtractor.do_center_crop",description:`<strong>do_center_crop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to crop the input at the center. If the input size is smaller than <code>crop_size</code> along any edge, the image is padded with 0&#x2019;s and then center cropped.`,name:"do_center_crop"},{anchor:"transformers.CLIPFeatureExtractor.crop_size",description:`<strong>crop_size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; Desired output size when applying center-cropping. Only has an effect if <code>do_center_crop</code> is set to <code>True</code>.`,name:"crop_size"},{anchor:"transformers.CLIPFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with <code>image_mean</code> and <code>image_std</code>.`,name:"do_normalize"},{anchor:"transformers.CLIPFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>List[int]</code>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images.`,name:"image_mean"},{anchor:"transformers.CLIPFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>List[int]</code>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images.`,name:"image_std"}]}}),jn=new w({props:{name:"center_crop",anchor:"transformers.CLIPFeatureExtractor.center_crop",parameters:[{name:"image",val:""},{name:"size",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/feature_extraction_clip.py#L159",parametersDescription:[{anchor:"transformers.CLIPFeatureExtractor.center_crop.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.`,name:"image"},{anchor:"transformers.CLIPFeatureExtractor.center_crop.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to which crop the image.`,name:"size"}]}}),qn=new w({props:{name:"resize",anchor:"transformers.CLIPFeatureExtractor.resize",parameters:[{name:"image",val:""},{name:"size",val:""},{name:"resample",val:" = 3"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/feature_extraction_clip.py#L185",parametersDescription:[{anchor:"transformers.CLIPFeatureExtractor.resize.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.`,name:"image"},{anchor:"transformers.CLIPFeatureExtractor.resize.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to use for resizing the image. If <code>int</code> it will be resized to match the shorter side`,name:"size"},{anchor:"transformers.CLIPFeatureExtractor.resize.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; The filter to user for resampling.`,name:"resample"}]}}),Dn=new D({}),On=new w({props:{name:"class transformers.CLIPProcessor",anchor:"transformers.CLIPProcessor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/processing_clip.py#L22",parametersDescription:[{anchor:"transformers.CLIPProcessor.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>) &#x2014; The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.CLIPProcessor.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizerFast">CLIPTokenizerFast</a>) &#x2014; The tokenizer is a required input.`,name:"tokenizer"}]}}),An=new w({props:{name:"batch_decode",anchor:"transformers.CLIPProcessor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/processing_clip.py#L95"}}),Vn=new w({props:{name:"decode",anchor:"transformers.CLIPProcessor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/processing_clip.py#L102"}}),Wn=new D({}),Bn=new w({props:{name:"class transformers.CLIPModel",anchor:"transformers.CLIPModel",parameters:[{name:"config",val:": CLIPConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L842",parametersDescription:[{anchor:"transformers.CLIPModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPConfig">CLIPConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Hn=new w({props:{name:"forward",anchor:"transformers.CLIPModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"pixel_values",val:": typing.Optional[torch.FloatTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"return_loss",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L957",parametersDescription:[{anchor:"transformers.CLIPModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CLIPModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CLIPModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CLIPModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details.`,name:"pixel_values"},{anchor:"transformers.CLIPModel.forward.return_loss",description:`<strong>return_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the contrastive loss.`,name:"return_loss"},{anchor:"transformers.CLIPModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_clip.CLIPOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>return_loss</code> is <code>True</code>) \u2014 Contrastive loss for image-text similarity.</li> <li><strong>logits_per_image:(<code>torch.FloatTensor</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>torch.FloatTensor</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>image_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_clip.CLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),To=new ce({props:{$$slots:{default:[d1]},$$scope:{ctx:x}}}),Kn=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor( <span class="hljs-meta">... </span> text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits_per_image.softmax(dim=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),Gn=new w({props:{name:"get_text_features",anchor:"transformers.CLIPModel.get_text_features",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L875",parametersDescription:[{anchor:"transformers.CLIPModel.get_text_features.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CLIPModel.get_text_features.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CLIPModel.get_text_features.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CLIPModel.get_text_features.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPModel.get_text_features.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPModel.get_text_features.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</p> `,returnType:` <p>text_features (<code>torch.FloatTensor</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),Io=new ce({props:{$$slots:{default:[c1]},$$scope:{ctx:x}}}),Jn=new A({props:{code:`from transformers import CLIPTokenizer, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") text_features = model.get_text_features(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text_features = model.get_text_features(**inputs)`}}),Xn=new w({props:{name:"get_image_features",anchor:"transformers.CLIPModel.get_image_features",parameters:[{name:"pixel_values",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L915",parametersDescription:[{anchor:"transformers.CLIPModel.get_image_features.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details.`,name:"pixel_values"},{anchor:"transformers.CLIPModel.get_image_features.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPModel.get_image_features.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPModel.get_image_features.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</p> `,returnType:` <p>image_features (<code>torch.FloatTensor</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),yo=new ce({props:{$$slots:{default:[p1]},$$scope:{ctx:x}}}),Zn=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="pt") image_features = model.get_image_features(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image_features = model.get_image_features(**inputs)`}}),Qn=new D({}),Yn=new w({props:{name:"class transformers.CLIPTextModel",anchor:"transformers.CLIPTextModel",parameters:[{name:"config",val:": CLIPTextConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L678"}}),es=new w({props:{name:"forward",anchor:"transformers.CLIPTextModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L693",parametersDescription:[{anchor:"transformers.CLIPTextModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.CLIPTextModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.CLIPTextModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.CLIPTextModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPTextModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPTextModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPTextConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Co=new ce({props:{$$slots:{default:[h1]},$$scope:{ctx:x}}}),ts=new A({props:{code:`from transformers import CLIPTokenizer, CLIPTextModel model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output # pooled (EOS token) states`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, CLIPTextModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPTextModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-comment"># pooled (EOS token) states</span>`}}),os=new D({}),ns=new w({props:{name:"class transformers.CLIPVisionModel",anchor:"transformers.CLIPVisionModel",parameters:[{name:"config",val:": CLIPVisionConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L789"}}),ss=new w({props:{name:"forward",anchor:"transformers.CLIPVisionModel.forward",parameters:[{name:"pixel_values",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_clip.py#L802",parametersDescription:[{anchor:"transformers.CLIPVisionModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details.`,name:"pixel_values"},{anchor:"transformers.CLIPVisionModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.CLIPVisionModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.CLIPVisionModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPVisionConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ko=new ce({props:{$$slots:{default:[m1]},$$scope:{ctx:x}}}),rs=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, CLIPVisionModel model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output # pooled CLS states`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, CLIPVisionModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = CLIPVisionModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-comment"># pooled CLS states</span>`}}),as=new D({}),is=new w({props:{name:"class transformers.TFCLIPModel",anchor:"transformers.TFCLIPModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1204",parametersDescription:[{anchor:"transformers.TFCLIPModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPConfig">CLIPConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$o=new ce({props:{$$slots:{default:[f1]},$$scope:{ctx:x}}}),ps=new w({props:{name:"call",anchor:"transformers.TFCLIPModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"pixel_values",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"return_loss",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1334",parametersDescription:[{anchor:"transformers.TFCLIPModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFCLIPModel.call.pixel_values",description:`<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> <code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details.`,name:"pixel_values"},{anchor:"transformers.TFCLIPModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFCLIPModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFCLIPModel.call.return_loss",description:`<strong>return_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the contrastive loss.`,name:"return_loss"},{anchor:"transformers.TFCLIPModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFCLIPModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCLIPModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCLIPModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_tf_clip.TFCLIPOutput</code>or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>return_loss</code> is <code>True</code>) \u2014 Contrastive loss for image-text similarity.</li> <li><strong>logits_per_image:(<code>tf.Tensor</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>tf.Tensor</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>tf.Tensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPTextModel" >TFCLIPTextModel</a>.</li> <li><strong>image_embeds(<code>tf.Tensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPVisionModel" >TFCLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>TFBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPTextModel" >TFCLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>TFBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPVisionModel" >TFCLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_tf_clip.TFCLIPOutput</code>or <code>tuple(tf.Tensor)</code></p> `}}),xo=new ce({props:{$$slots:{default:[u1]},$$scope:{ctx:x}}}),hs=new A({props:{code:`import tensorflow as tf from PIL import Image import requests from transformers import CLIPProcessor, TFCLIPModel model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True ) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, TFCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor( <span class="hljs-meta">... </span> text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = tf.nn.softmax(logits_per_image, axis=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),ms=new w({props:{name:"get_text_features",anchor:"transformers.TFCLIPModel.get_text_features",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1250",parametersDescription:[{anchor:"transformers.TFCLIPModel.get_text_features.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFCLIPModel.get_text_features.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFCLIPModel.get_text_features.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFCLIPModel.get_text_features.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFCLIPModel.get_text_features.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCLIPModel.get_text_features.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCLIPModel.get_text_features.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPTextModel" >TFCLIPTextModel</a>.</p> `,returnType:` <p>text_features (<code>tf.Tensor</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),Eo=new ce({props:{$$slots:{default:[g1]},$$scope:{ctx:x}}}),fs=new A({props:{code:`from transformers import CLIPTokenizer, TFCLIPModel model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") text_features = model.get_text_features(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, TFCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text_features = model.get_text_features(**inputs)`}}),us=new w({props:{name:"get_image_features",anchor:"transformers.TFCLIPModel.get_image_features",parameters:[{name:"pixel_values",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1291",parametersDescription:[{anchor:"transformers.TFCLIPModel.get_image_features.pixel_values",description:`<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details. output_attentions (<code>bool</code>, <em>optional</em>): Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"pixel_values"},{anchor:"transformers.TFCLIPModel.get_image_features.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCLIPModel.get_image_features.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCLIPModel.get_image_features.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPVisionModel" >TFCLIPVisionModel</a>.</p> `,returnType:` <p>image_features (<code>tf.Tensor</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),zo=new ce({props:{$$slots:{default:[_1]},$$scope:{ctx:x}}}),gs=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, TFCLIPModel model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="tf") image_features = model.get_image_features(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, TFCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image_features = model.get_image_features(**inputs)`}}),_s=new D({}),vs=new w({props:{name:"class transformers.TFCLIPTextModel",anchor:"transformers.TFCLIPTextModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1041"}}),Ps=new w({props:{name:"call",anchor:"transformers.TFCLIPTextModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1049",parametersDescription:[{anchor:"transformers.TFCLIPTextModel.call.input_ids",description:`<strong>input_ids</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFCLIPTextModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFCLIPTextModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFCLIPTextModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFCLIPTextModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCLIPTextModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCLIPTextModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPTextConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> `}}),jo=new ce({props:{$$slots:{default:[v1]},$$scope:{ctx:x}}}),Ts=new A({props:{code:`from transformers import CLIPTokenizer, TFCLIPTextModel model = TFCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output # pooled (EOS token) states`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, TFCLIPTextModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCLIPTextModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-comment"># pooled (EOS token) states</span>`}}),Is=new D({}),ys=new w({props:{name:"class transformers.TFCLIPVisionModel",anchor:"transformers.TFCLIPVisionModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1105"}}),bs=new w({props:{name:"call",anchor:"transformers.TFCLIPVisionModel.call",parameters:[{name:"pixel_values",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_tf_clip.py#L1146",parametersDescription:[{anchor:"transformers.TFCLIPVisionModel.call.pixel_values",description:`<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details. output_attentions (<code>bool</code>, <em>optional</em>): Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"pixel_values"},{anchor:"transformers.TFCLIPVisionModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFCLIPVisionModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFCLIPVisionModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPVisionConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling</a> or <code>tuple(tf.Tensor)</code></p> `}}),qo=new ce({props:{$$slots:{default:[P1]},$$scope:{ctx:x}}}),Cs=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, TFCLIPVisionModel model = TFCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="tf") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output # pooled CLS states`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, TFCLIPVisionModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFCLIPVisionModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooled_output = outputs.pooler_output <span class="hljs-comment"># pooled CLS states</span>`}}),Ls=new D({}),ks=new w({props:{name:"class transformers.FlaxCLIPModel",anchor:"transformers.FlaxCLIPModel",parameters:[{name:"config",val:": CLIPConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L1108",parametersDescription:[{anchor:"transformers.FlaxCLIPModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPConfig">CLIPConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxCLIPModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Fs=new w({props:{name:"__call__",anchor:"transformers.FlaxCLIPPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"pixel_values",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L737",parametersDescription:[{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details.`,name:"pixel_values"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxCLIPPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>logits_per_image:(<code>jnp.ndarray</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>jnp.ndarray</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>image_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Oo=new ce({props:{$$slots:{default:[T1]},$$scope:{ctx:x}}}),qs=new A({props:{code:`import jax from PIL import Image import requests from transformers import CLIPProcessor, FlaxCLIPModel model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="np", padding=True ) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, FlaxCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor( <span class="hljs-meta">... </span> text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = jax.nn.softmax(logits_per_image, axis=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),Ds=new w({props:{name:"get_text_features",anchor:"transformers.FlaxCLIPPreTrainedModel.get_text_features",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L782",parametersDescription:[{anchor:"transformers.FlaxCLIPPreTrainedModel.get_text_features.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"}],returnDescription:` <p>The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</p> `,returnType:` <p>text_features (<code>jnp.ndarray</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),Os=new A({props:{code:`from transformers import CLIPTokenizer, FlaxCLIPModel model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np") text_features = model.get_text_features(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, FlaxCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text_features = model.get_text_features(**inputs)`}}),As=new w({props:{name:"get_image_features",anchor:"transformers.FlaxCLIPPreTrainedModel.get_image_features",parameters:[{name:"pixel_values",val:""},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L849",parametersDescription:[{anchor:"transformers.FlaxCLIPPreTrainedModel.get_image_features.pixel_values",description:`<strong>pixel_values</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details.`,name:"pixel_values"}],returnDescription:` <p>The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a></p> `,returnType:` <p>image_features (<code>jnp.ndarray</code> of shape <code>(batch_size, output_dim</code>)</p> `}}),Ns=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, FlaxCLIPModel model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="np") image_features = model.get_image_features(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, FlaxCLIPModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image_features = model.get_image_features(**inputs)`}}),Vs=new D({}),Ss=new w({props:{name:"class transformers.FlaxCLIPTextModel",anchor:"transformers.FlaxCLIPTextModel",parameters:[{name:"config",val:": CLIPTextConfig"},{name:"input_shape",val:" = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L929"}}),Ws=new w({props:{name:"__call__",anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L604",parametersDescription:[{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxCLIPTextPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPTextConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),No=new ce({props:{$$slots:{default:[I1]},$$scope:{ctx:x}}}),Bs=new A({props:{code:`from transformers import CLIPTokenizer, FlaxCLIPTextModel model = FlaxCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooler_output = outputs.pooler_output # pooled (EOS token) states`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPTokenizer, FlaxCLIPTextModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPTextModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = CLIPTokenizer.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooler_output = outputs.pooler_output <span class="hljs-comment"># pooled (EOS token) states</span>`}}),Us=new D({}),Rs=new w({props:{name:"class transformers.FlaxCLIPVisionModel",anchor:"transformers.FlaxCLIPVisionModel",parameters:[{name:"config",val:": CLIPVisionConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L982"}}),Hs=new w({props:{name:"__call__",anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__",parameters:[{name:"pixel_values",val:""},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/clip/modeling_flax_clip.py#L673",parametersDescription:[{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor">CLIPFeatureExtractor</a>. See <code>CLIPFeatureExtractor.__call__()</code>for details.`,name:"pixel_values"},{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxCLIPVisionPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.clip.configuration_clip.CLIPVisionConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),So=new ce({props:{$$slots:{default:[y1]},$$scope:{ctx:x}}}),Ks=new A({props:{code:`from PIL import Image import requests from transformers import CLIPProcessor, FlaxCLIPVisionModel model = FlaxCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="np") outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state pooler_output = outputs.pooler_output # pooled CLS states`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPProcessor, FlaxCLIPVisionModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxCLIPVisionModel.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = CLIPProcessor.from_pretrained(<span class="hljs-string">&quot;openai/clip-vit-base-patch32&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>pooler_output = outputs.pooler_output <span class="hljs-comment"># pooled CLS states</span>`}}),{c(){p=n("meta"),L=l(),u=n("h1"),b=n("a"),C=n("span"),g(y.$$.fragment),f=l(),k=n("span"),Ho=a("CLIP"),at=l(),M=n("h2"),ue=n("a"),ye=n("span"),g(be.$$.fragment),Ko=l(),Ce=n("span"),Go=a("Overview"),Xt=l(),pe=n("p"),Me=a("The CLIP model was proposed in "),Le=n("a"),Zt=a("Learning Transferable Visual Models From Natural Language Supervision"),H=a(` by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP (Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing for the task, similarly to the zero-shot capabilities of GPT-2 and 3.`),he=l(),je=n("p"),it=a("The abstract from the paper is the following:"),Qt=l(),Fe=n("p"),ke=n("em"),Jo=a(`State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at this https URL.`),Yt=l(),O=n("h2"),ge=n("a"),we=n("span"),g($e.$$.fragment),Xo=l(),xe=n("span"),Zo=a("Usage"),eo=l(),me=n("p"),Qo=a(`CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text features. Both the text and visual features are then projected to a latent space with identical dimension. The dot product between the projected image and text features is then used as a similar score.`),qe=l(),_e=n("p"),$=a(`To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. The `),j=n("a"),to=a("CLIPFeatureExtractor"),Qs=a(" can be used to resize (or rescale) and normalize images for the model."),Yo=l(),E=n("p"),Ys=a("The "),lt=n("a"),er=a("CLIPTokenizer"),De=a(" is used to encode the text. The "),ve=n("a"),tr=a("CLIPProcessor"),or=a(` wraps `),Pe=n("a"),nr=a("CLIPFeatureExtractor"),sr=a(" and "),Oe=n("a"),rr=a("CLIPTokenizer"),xd=a(` into a single instance to both encode the text and prepare the images. The following example shows how to get the image-text similarity scores using `),ar=n("a"),Ed=a("CLIPProcessor"),zd=a(" and "),ir=n("a"),Md=a("CLIPModel"),jd=a("."),nl=l(),g(en.$$.fragment),sl=l(),Ae=n("p"),Fd=a("This model was contributed by "),tn=n("a"),qd=a("valhalla"),Dd=a(". The original code can be found "),on=n("a"),Od=a("here"),Ad=a("."),rl=l(),dt=n("h2"),oo=n("a"),pa=n("span"),g(nn.$$.fragment),Nd=l(),ha=n("span"),Vd=a("CLIPConfig"),al=l(),fe=n("div"),g(sn.$$.fragment),Sd=l(),no=n("p"),lr=n("a"),Wd=a("CLIPConfig"),Bd=a(" is the configuration class to store the configuration of a "),dr=n("a"),Ud=a("CLIPModel"),Rd=a(`. It is used to instantiate CLIP model according to the specified arguments, defining the text model and vision model configs.`),Hd=l(),ct=n("p"),Kd=a("Configuration objects inherit from "),cr=n("a"),Gd=a("PretrainedConfig"),Jd=a(` and can be used to control the model outputs. Read the documentation from `),pr=n("a"),Xd=a("PretrainedConfig"),Zd=a(" for more information."),Qd=l(),so=n("div"),g(rn.$$.fragment),Yd=l(),an=n("p"),ec=a("Instantiate a "),hr=n("a"),tc=a("CLIPConfig"),oc=a(` (or a derived class) from clip text model configuration and clip vision model configuration.`),il=l(),pt=n("h2"),ro=n("a"),ma=n("span"),g(ln.$$.fragment),nc=l(),fa=n("span"),sc=a("CLIPTextConfig"),ll=l(),V=n("div"),g(dn.$$.fragment),rc=l(),ht=n("p"),ac=a("This is the configuration class to store the configuration of a "),mr=n("a"),ic=a("CLIPModel"),lc=a(`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),cn=n("a"),dc=a("openai/clip-vit-base-patch32"),cc=a(" architecture."),pc=l(),mt=n("p"),hc=a("Configuration objects inherit from "),fr=n("a"),mc=a("PretrainedConfig"),fc=a(` and can be used to control the model outputs. Read the documentation from `),ur=n("a"),uc=a("PretrainedConfig"),gc=a(" for more information."),_c=l(),ua=n("p"),vc=a("Example:"),Pc=l(),g(pn.$$.fragment),dl=l(),ft=n("h2"),ao=n("a"),ga=n("span"),g(hn.$$.fragment),Tc=l(),_a=n("span"),Ic=a("CLIPVisionConfig"),cl=l(),S=n("div"),g(mn.$$.fragment),yc=l(),ut=n("p"),bc=a("This is the configuration class to store the configuration of a "),gr=n("a"),Cc=a("CLIPModel"),Lc=a(`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),fn=n("a"),kc=a("openai/clip-vit-base-patch32"),wc=a(" architecture."),$c=l(),gt=n("p"),xc=a("Configuration objects inherit from "),_r=n("a"),Ec=a("PretrainedConfig"),zc=a(` and can be used to control the model outputs. Read the documentation from `),vr=n("a"),Mc=a("PretrainedConfig"),jc=a(" for more information."),Fc=l(),va=n("p"),qc=a("Example:"),Dc=l(),g(un.$$.fragment),pl=l(),_t=n("h2"),io=n("a"),Pa=n("span"),g(gn.$$.fragment),Oc=l(),Ta=n("span"),Ac=a("CLIPTokenizer"),hl=l(),F=n("div"),g(_n.$$.fragment),Nc=l(),Ia=n("p"),Vc=a("Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding."),Sc=l(),vn=n("p"),Wc=a("This tokenizer inherits from "),Pr=n("a"),Bc=a("PreTrainedTokenizer"),Uc=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Rc=l(),Te=n("div"),g(Pn.$$.fragment),Hc=l(),ya=n("p"),Kc=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CLIP sequence has the following format:`),Gc=l(),ba=n("ul"),Tr=n("li"),Jc=a("single sequence: "),Ca=n("code"),Xc=a("<|startoftext|> X <|endoftext|>"),Zc=l(),La=n("p"),Qc=a("Pairs of sequences are not the expected use case, but they will be handled without a separator."),Yc=l(),lo=n("div"),g(Tn.$$.fragment),ep=l(),In=n("p"),tp=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ka=n("code"),op=a("prepare_for_model"),np=a(" method."),sp=l(),co=n("div"),g(yn.$$.fragment),rp=l(),wa=n("p"),ap=a(`Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of zeros is returned.`),ip=l(),Ir=n("div"),g(bn.$$.fragment),ml=l(),vt=n("h2"),po=n("a"),$a=n("span"),g(Cn.$$.fragment),lp=l(),xa=n("span"),dp=a("CLIPTokenizerFast"),fl=l(),W=n("div"),g(Ln.$$.fragment),cp=l(),kn=n("p"),pp=a("Construct a \u201Cfast\u201D CLIP tokenizer (backed by HuggingFace\u2019s "),Ea=n("em"),hp=a("tokenizers"),mp=a(` library). Based on byte-level Byte-Pair-Encoding.`),fp=l(),wn=n("p"),up=a("This tokenizer inherits from "),yr=n("a"),gp=a("PreTrainedTokenizerFast"),_p=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),vp=l(),Ie=n("div"),g($n.$$.fragment),Pp=l(),za=n("p"),Tp=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CLIP sequence has the following format:`),Ip=l(),Ma=n("ul"),br=n("li"),yp=a("single sequence: "),ja=n("code"),bp=a("<|startoftext|> X <|endoftext|>"),Cp=l(),Fa=n("p"),Lp=a("Pairs of sequences are not the expected use case, but they will be handled without a separator."),kp=l(),ho=n("div"),g(xn.$$.fragment),wp=l(),qa=n("p"),$p=a(`Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of zeros is returned.`),ul=l(),Pt=n("h2"),mo=n("a"),Da=n("span"),g(En.$$.fragment),xp=l(),Oa=n("span"),Ep=a("CLIPFeatureExtractor"),gl=l(),B=n("div"),g(zn.$$.fragment),zp=l(),Aa=n("p"),Mp=a("Constructs a CLIP feature extractor."),jp=l(),Mn=n("p"),Fp=a("This feature extractor inherits from "),Cr=n("a"),qp=a("FeatureExtractionMixin"),Dp=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Op=l(),fo=n("div"),g(jn.$$.fragment),Ap=l(),Fn=n("p"),Np=a("Crops "),Na=n("code"),Vp=a("image"),Sp=a(` to the given size using a center crop. Note that if the image is too small to be cropped to the size is given, it will be padded (so the returned result has the size asked).`),Wp=l(),uo=n("div"),g(qn.$$.fragment),Bp=l(),Tt=n("p"),Up=a("Resizes "),Va=n("code"),Rp=a("image"),Hp=a(". Note that this will trigger a conversion of "),Sa=n("code"),Kp=a("image"),Gp=a(" to a PIL Image."),_l=l(),It=n("h2"),go=n("a"),Wa=n("span"),g(Dn.$$.fragment),Jp=l(),Ba=n("span"),Xp=a("CLIPProcessor"),vl=l(),U=n("div"),g(On.$$.fragment),Zp=l(),Ua=n("p"),Qp=a("Constructs a CLIP processor which wraps a CLIP feature extractor and a CLIP tokenizer into a single processor."),Yp=l(),K=n("p"),Lr=n("a"),eh=a("CLIPProcessor"),th=a(" offers all the functionalities of "),kr=n("a"),oh=a("CLIPFeatureExtractor"),nh=a(" and "),wr=n("a"),sh=a("CLIPTokenizerFast"),rh=a(`. See the `),Ra=n("code"),ah=a("__call__()"),ih=a("and "),$r=n("a"),lh=a("decode()"),dh=a(" for more information."),ch=l(),_o=n("div"),g(An.$$.fragment),ph=l(),Nn=n("p"),hh=a("This method forwards all its arguments to CLIPTokenizerFast\u2019s "),xr=n("a"),mh=a("batch_decode()"),fh=a(`. Please refer to the docstring of this method for more information.`),uh=l(),vo=n("div"),g(Vn.$$.fragment),gh=l(),Sn=n("p"),_h=a("This method forwards all its arguments to CLIPTokenizerFast\u2019s "),Er=n("a"),vh=a("decode()"),Ph=a(`. Please refer to the docstring of this method for more information.`),Pl=l(),yt=n("h2"),Po=n("a"),Ha=n("span"),g(Wn.$$.fragment),Th=l(),Ka=n("span"),Ih=a("CLIPModel"),Tl=l(),R=n("div"),g(Bn.$$.fragment),yh=l(),Un=n("p"),bh=a("This model is a PyTorch "),Rn=n("a"),Ch=a("torch.nn.Module"),Lh=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kh=l(),G=n("div"),g(Hn.$$.fragment),wh=l(),bt=n("p"),$h=a("The "),zr=n("a"),xh=a("CLIPModel"),Eh=a(" forward method, overrides the "),Ga=n("code"),zh=a("__call__"),Mh=a(" special method."),jh=l(),g(To.$$.fragment),Fh=l(),Ja=n("p"),qh=a("Examples:"),Dh=l(),g(Kn.$$.fragment),Oh=l(),J=n("div"),g(Gn.$$.fragment),Ah=l(),Ct=n("p"),Nh=a("The "),Mr=n("a"),Vh=a("CLIPModel"),Sh=a(" forward method, overrides the "),Xa=n("code"),Wh=a("__call__"),Bh=a(" special method."),Uh=l(),g(Io.$$.fragment),Rh=l(),Za=n("p"),Hh=a("Examples:"),Kh=l(),g(Jn.$$.fragment),Gh=l(),X=n("div"),g(Xn.$$.fragment),Jh=l(),Lt=n("p"),Xh=a("The "),jr=n("a"),Zh=a("CLIPModel"),Qh=a(" forward method, overrides the "),Qa=n("code"),Yh=a("__call__"),em=a(" special method."),tm=l(),g(yo.$$.fragment),om=l(),Ya=n("p"),nm=a("Examples:"),sm=l(),g(Zn.$$.fragment),Il=l(),kt=n("h2"),bo=n("a"),ei=n("span"),g(Qn.$$.fragment),rm=l(),ti=n("span"),am=a("CLIPTextModel"),yl=l(),wt=n("div"),g(Yn.$$.fragment),im=l(),Z=n("div"),g(es.$$.fragment),lm=l(),$t=n("p"),dm=a("The "),Fr=n("a"),cm=a("CLIPTextModel"),pm=a(" forward method, overrides the "),oi=n("code"),hm=a("__call__"),mm=a(" special method."),fm=l(),g(Co.$$.fragment),um=l(),ni=n("p"),gm=a("Examples:"),_m=l(),g(ts.$$.fragment),bl=l(),xt=n("h2"),Lo=n("a"),si=n("span"),g(os.$$.fragment),vm=l(),ri=n("span"),Pm=a("CLIPVisionModel"),Cl=l(),Et=n("div"),g(ns.$$.fragment),Tm=l(),Q=n("div"),g(ss.$$.fragment),Im=l(),zt=n("p"),ym=a("The "),qr=n("a"),bm=a("CLIPVisionModel"),Cm=a(" forward method, overrides the "),ai=n("code"),Lm=a("__call__"),km=a(" special method."),wm=l(),g(ko.$$.fragment),$m=l(),ii=n("p"),xm=a("Examples:"),Em=l(),g(rs.$$.fragment),Ll=l(),Mt=n("h2"),wo=n("a"),li=n("span"),g(as.$$.fragment),zm=l(),di=n("span"),Mm=a("TFCLIPModel"),kl=l(),q=n("div"),g(is.$$.fragment),jm=l(),ls=n("p"),Fm=a("This model inherits from "),Dr=n("a"),qm=a("TFPreTrainedModel"),Dm=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Om=l(),ds=n("p"),Am=a("This model is also a "),cs=n("a"),Nm=a("tf.keras.Model"),Vm=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Sm=l(),g($o.$$.fragment),Wm=l(),Y=n("div"),g(ps.$$.fragment),Bm=l(),jt=n("p"),Um=a("The "),Or=n("a"),Rm=a("TFCLIPModel"),Hm=a(" forward method, overrides the "),ci=n("code"),Km=a("__call__"),Gm=a(" special method."),Jm=l(),g(xo.$$.fragment),Xm=l(),pi=n("p"),Zm=a("Examples:"),Qm=l(),g(hs.$$.fragment),Ym=l(),ee=n("div"),g(ms.$$.fragment),ef=l(),Ft=n("p"),tf=a("The "),Ar=n("a"),of=a("TFCLIPModel"),nf=a(" forward method, overrides the "),hi=n("code"),sf=a("__call__"),rf=a(" special method."),af=l(),g(Eo.$$.fragment),lf=l(),mi=n("p"),df=a("Examples:"),cf=l(),g(fs.$$.fragment),pf=l(),te=n("div"),g(us.$$.fragment),hf=l(),qt=n("p"),mf=a("The "),Nr=n("a"),ff=a("TFCLIPModel"),uf=a(" forward method, overrides the "),fi=n("code"),gf=a("__call__"),_f=a(" special method."),vf=l(),g(zo.$$.fragment),Pf=l(),ui=n("p"),Tf=a("Examples:"),If=l(),g(gs.$$.fragment),wl=l(),Dt=n("h2"),Mo=n("a"),gi=n("span"),g(_s.$$.fragment),yf=l(),_i=n("span"),bf=a("TFCLIPTextModel"),$l=l(),Ot=n("div"),g(vs.$$.fragment),Cf=l(),oe=n("div"),g(Ps.$$.fragment),Lf=l(),At=n("p"),kf=a("The "),Vr=n("a"),wf=a("TFCLIPTextModel"),$f=a(" forward method, overrides the "),vi=n("code"),xf=a("__call__"),Ef=a(" special method."),zf=l(),g(jo.$$.fragment),Mf=l(),Pi=n("p"),jf=a("Examples:"),Ff=l(),g(Ts.$$.fragment),xl=l(),Nt=n("h2"),Fo=n("a"),Ti=n("span"),g(Is.$$.fragment),qf=l(),Ii=n("span"),Df=a("TFCLIPVisionModel"),El=l(),Vt=n("div"),g(ys.$$.fragment),Of=l(),ne=n("div"),g(bs.$$.fragment),Af=l(),St=n("p"),Nf=a("The "),Sr=n("a"),Vf=a("TFCLIPVisionModel"),Sf=a(" forward method, overrides the "),yi=n("code"),Wf=a("__call__"),Bf=a(" special method."),Uf=l(),g(qo.$$.fragment),Rf=l(),bi=n("p"),Hf=a("Examples:"),Kf=l(),g(Cs.$$.fragment),zl=l(),Wt=n("h2"),Do=n("a"),Ci=n("span"),g(Ls.$$.fragment),Gf=l(),Li=n("span"),Jf=a("FlaxCLIPModel"),Ml=l(),z=n("div"),g(ks.$$.fragment),Xf=l(),ws=n("p"),Zf=a("This model inherits from "),Wr=n("a"),Qf=a("FlaxPreTrainedModel"),Yf=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),eu=l(),$s=n("p"),tu=a("This model is also a Flax Linen "),xs=n("a"),ou=a("flax.linen.Module"),nu=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),su=l(),ki=n("p"),ru=a("Finally, this model supports inherent JAX features such as:"),au=l(),Ee=n("ul"),wi=n("li"),Es=n("a"),iu=a("Just-In-Time (JIT) compilation"),lu=l(),$i=n("li"),zs=n("a"),du=a("Automatic Differentiation"),cu=l(),xi=n("li"),Ms=n("a"),pu=a("Vectorization"),hu=l(),Ei=n("li"),js=n("a"),mu=a("Parallelization"),fu=l(),se=n("div"),g(Fs.$$.fragment),uu=l(),Bt=n("p"),gu=a("The "),zi=n("code"),_u=a("FlaxCLIPPreTrainedModel"),vu=a("forward method, overrides the "),Mi=n("code"),Pu=a("__call__"),Tu=a(" special method."),Iu=l(),g(Oo.$$.fragment),yu=l(),ji=n("p"),bu=a("Example:"),Cu=l(),g(qs.$$.fragment),Lu=l(),Ne=n("div"),g(Ds.$$.fragment),ku=l(),Fi=n("p"),wu=a("Examples:"),$u=l(),g(Os.$$.fragment),xu=l(),Ve=n("div"),g(As.$$.fragment),Eu=l(),qi=n("p"),zu=a("Examples:"),Mu=l(),g(Ns.$$.fragment),jl=l(),Ut=n("h2"),Ao=n("a"),Di=n("span"),g(Vs.$$.fragment),ju=l(),Oi=n("span"),Fu=a("FlaxCLIPTextModel"),Fl=l(),Rt=n("div"),g(Ss.$$.fragment),qu=l(),re=n("div"),g(Ws.$$.fragment),Du=l(),Ht=n("p"),Ou=a("The "),Ai=n("code"),Au=a("FlaxCLIPTextPreTrainedModel"),Nu=a("forward method, overrides the "),Ni=n("code"),Vu=a("__call__"),Su=a(" special method."),Wu=l(),g(No.$$.fragment),Bu=l(),Vi=n("p"),Uu=a("Example:"),Ru=l(),g(Bs.$$.fragment),ql=l(),Kt=n("h2"),Vo=n("a"),Si=n("span"),g(Us.$$.fragment),Hu=l(),Wi=n("span"),Ku=a("FlaxCLIPVisionModel"),Dl=l(),Gt=n("div"),g(Rs.$$.fragment),Gu=l(),ae=n("div"),g(Hs.$$.fragment),Ju=l(),Jt=n("p"),Xu=a("The "),Bi=n("code"),Zu=a("FlaxCLIPVisionPreTrainedModel"),Qu=a("forward method, overrides the "),Ui=n("code"),Yu=a("__call__"),eg=a(" special method."),tg=l(),g(So.$$.fragment),og=l(),Ri=n("p"),ng=a("Example:"),sg=l(),g(Ks.$$.fragment),this.h()},l(t){const h=l1('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(o),L=d(t),u=s(t,"H1",{class:!0});var Gs=r(u);b=s(Gs,"A",{id:!0,class:!0,href:!0});var Hi=r(b);C=s(Hi,"SPAN",{});var Ki=r(C);_(y.$$.fragment,Ki),Ki.forEach(o),Hi.forEach(o),f=d(Gs),k=s(Gs,"SPAN",{});var Gi=r(k);Ho=i(Gi,"CLIP"),Gi.forEach(o),Gs.forEach(o),at=d(t),M=s(t,"H2",{class:!0});var Js=r(M);ue=s(Js,"A",{id:!0,class:!0,href:!0});var Ji=r(ue);ye=s(Ji,"SPAN",{});var Xi=r(ye);_(be.$$.fragment,Xi),Xi.forEach(o),Ji.forEach(o),Ko=d(Js),Ce=s(Js,"SPAN",{});var Zi=r(Ce);Go=i(Zi,"Overview"),Zi.forEach(o),Js.forEach(o),Xt=d(t),pe=s(t,"P",{});var Xs=r(pe);Me=i(Xs,"The CLIP model was proposed in "),Le=s(Xs,"A",{href:!0,rel:!0});var Qi=r(Le);Zt=i(Qi,"Learning Transferable Visual Models From Natural Language Supervision"),Qi.forEach(o),H=i(Xs,` by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP (Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing for the task, similarly to the zero-shot capabilities of GPT-2 and 3.`),Xs.forEach(o),he=d(t),je=s(t,"P",{});var Yi=r(je);it=i(Yi,"The abstract from the paper is the following:"),Yi.forEach(o),Qt=d(t),Fe=s(t,"P",{});var el=r(Fe);ke=s(el,"EM",{});var tl=r(ke);Jo=i(tl,`State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at this https URL.`),tl.forEach(o),el.forEach(o),Yt=d(t),O=s(t,"H2",{class:!0});var Zs=r(O);ge=s(Zs,"A",{id:!0,class:!0,href:!0});var ig=r(ge);we=s(ig,"SPAN",{});var lg=r(we);_($e.$$.fragment,lg),lg.forEach(o),ig.forEach(o),Xo=d(Zs),xe=s(Zs,"SPAN",{});var dg=r(xe);Zo=i(dg,"Usage"),dg.forEach(o),Zs.forEach(o),eo=d(t),me=s(t,"P",{});var cg=r(me);Qo=i(cg,`CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text features. Both the text and visual features are then projected to a latent space with identical dimension. The dot product between the projected image and text features is then used as a similar score.`),cg.forEach(o),qe=d(t),_e=s(t,"P",{});var Al=r(_e);$=i(Al,`To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. The `),j=s(Al,"A",{href:!0});var pg=r(j);to=i(pg,"CLIPFeatureExtractor"),pg.forEach(o),Qs=i(Al," can be used to resize (or rescale) and normalize images for the model."),Al.forEach(o),Yo=d(t),E=s(t,"P",{});var ie=r(E);Ys=i(ie,"The "),lt=s(ie,"A",{href:!0});var hg=r(lt);er=i(hg,"CLIPTokenizer"),hg.forEach(o),De=i(ie," is used to encode the text. The "),ve=s(ie,"A",{href:!0});var mg=r(ve);tr=i(mg,"CLIPProcessor"),mg.forEach(o),or=i(ie,` wraps `),Pe=s(ie,"A",{href:!0});var fg=r(Pe);nr=i(fg,"CLIPFeatureExtractor"),fg.forEach(o),sr=i(ie," and "),Oe=s(ie,"A",{href:!0});var ug=r(Oe);rr=i(ug,"CLIPTokenizer"),ug.forEach(o),xd=i(ie,` into a single instance to both encode the text and prepare the images. The following example shows how to get the image-text similarity scores using `),ar=s(ie,"A",{href:!0});var gg=r(ar);Ed=i(gg,"CLIPProcessor"),gg.forEach(o),zd=i(ie," and "),ir=s(ie,"A",{href:!0});var _g=r(ir);Md=i(_g,"CLIPModel"),_g.forEach(o),jd=i(ie,"."),ie.forEach(o),nl=d(t),_(en.$$.fragment,t),sl=d(t),Ae=s(t,"P",{});var Br=r(Ae);Fd=i(Br,"This model was contributed by "),tn=s(Br,"A",{href:!0,rel:!0});var vg=r(tn);qd=i(vg,"valhalla"),vg.forEach(o),Dd=i(Br,". The original code can be found "),on=s(Br,"A",{href:!0,rel:!0});var Pg=r(on);Od=i(Pg,"here"),Pg.forEach(o),Ad=i(Br,"."),Br.forEach(o),rl=d(t),dt=s(t,"H2",{class:!0});var Nl=r(dt);oo=s(Nl,"A",{id:!0,class:!0,href:!0});var Tg=r(oo);pa=s(Tg,"SPAN",{});var Ig=r(pa);_(nn.$$.fragment,Ig),Ig.forEach(o),Tg.forEach(o),Nd=d(Nl),ha=s(Nl,"SPAN",{});var yg=r(ha);Vd=i(yg,"CLIPConfig"),yg.forEach(o),Nl.forEach(o),al=d(t),fe=s(t,"DIV",{class:!0});var Wo=r(fe);_(sn.$$.fragment,Wo),Sd=d(Wo),no=s(Wo,"P",{});var ol=r(no);lr=s(ol,"A",{href:!0});var bg=r(lr);Wd=i(bg,"CLIPConfig"),bg.forEach(o),Bd=i(ol," is the configuration class to store the configuration of a "),dr=s(ol,"A",{href:!0});var Cg=r(dr);Ud=i(Cg,"CLIPModel"),Cg.forEach(o),Rd=i(ol,`. It is used to instantiate CLIP model according to the specified arguments, defining the text model and vision model configs.`),ol.forEach(o),Hd=d(Wo),ct=s(Wo,"P",{});var Ur=r(ct);Kd=i(Ur,"Configuration objects inherit from "),cr=s(Ur,"A",{href:!0});var Lg=r(cr);Gd=i(Lg,"PretrainedConfig"),Lg.forEach(o),Jd=i(Ur,` and can be used to control the model outputs. Read the documentation from `),pr=s(Ur,"A",{href:!0});var kg=r(pr);Xd=i(kg,"PretrainedConfig"),kg.forEach(o),Zd=i(Ur," for more information."),Ur.forEach(o),Qd=d(Wo),so=s(Wo,"DIV",{class:!0});var Vl=r(so);_(rn.$$.fragment,Vl),Yd=d(Vl),an=s(Vl,"P",{});var Sl=r(an);ec=i(Sl,"Instantiate a "),hr=s(Sl,"A",{href:!0});var wg=r(hr);tc=i(wg,"CLIPConfig"),wg.forEach(o),oc=i(Sl,` (or a derived class) from clip text model configuration and clip vision model configuration.`),Sl.forEach(o),Vl.forEach(o),Wo.forEach(o),il=d(t),pt=s(t,"H2",{class:!0});var Wl=r(pt);ro=s(Wl,"A",{id:!0,class:!0,href:!0});var $g=r(ro);ma=s($g,"SPAN",{});var xg=r(ma);_(ln.$$.fragment,xg),xg.forEach(o),$g.forEach(o),nc=d(Wl),fa=s(Wl,"SPAN",{});var Eg=r(fa);sc=i(Eg,"CLIPTextConfig"),Eg.forEach(o),Wl.forEach(o),ll=d(t),V=s(t,"DIV",{class:!0});var Se=r(V);_(dn.$$.fragment,Se),rc=d(Se),ht=s(Se,"P",{});var Rr=r(ht);ac=i(Rr,"This is the configuration class to store the configuration of a "),mr=s(Rr,"A",{href:!0});var zg=r(mr);ic=i(zg,"CLIPModel"),zg.forEach(o),lc=i(Rr,`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),cn=s(Rr,"A",{href:!0,rel:!0});var Mg=r(cn);dc=i(Mg,"openai/clip-vit-base-patch32"),Mg.forEach(o),cc=i(Rr," architecture."),Rr.forEach(o),pc=d(Se),mt=s(Se,"P",{});var Hr=r(mt);hc=i(Hr,"Configuration objects inherit from "),fr=s(Hr,"A",{href:!0});var jg=r(fr);mc=i(jg,"PretrainedConfig"),jg.forEach(o),fc=i(Hr,` and can be used to control the model outputs. Read the documentation from `),ur=s(Hr,"A",{href:!0});var Fg=r(ur);uc=i(Fg,"PretrainedConfig"),Fg.forEach(o),gc=i(Hr," for more information."),Hr.forEach(o),_c=d(Se),ua=s(Se,"P",{});var qg=r(ua);vc=i(qg,"Example:"),qg.forEach(o),Pc=d(Se),_(pn.$$.fragment,Se),Se.forEach(o),dl=d(t),ft=s(t,"H2",{class:!0});var Bl=r(ft);ao=s(Bl,"A",{id:!0,class:!0,href:!0});var Dg=r(ao);ga=s(Dg,"SPAN",{});var Og=r(ga);_(hn.$$.fragment,Og),Og.forEach(o),Dg.forEach(o),Tc=d(Bl),_a=s(Bl,"SPAN",{});var Ag=r(_a);Ic=i(Ag,"CLIPVisionConfig"),Ag.forEach(o),Bl.forEach(o),cl=d(t),S=s(t,"DIV",{class:!0});var We=r(S);_(mn.$$.fragment,We),yc=d(We),ut=s(We,"P",{});var Kr=r(ut);bc=i(Kr,"This is the configuration class to store the configuration of a "),gr=s(Kr,"A",{href:!0});var Ng=r(gr);Cc=i(Ng,"CLIPModel"),Ng.forEach(o),Lc=i(Kr,`. It is used to instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIP `),fn=s(Kr,"A",{href:!0,rel:!0});var Vg=r(fn);kc=i(Vg,"openai/clip-vit-base-patch32"),Vg.forEach(o),wc=i(Kr," architecture."),Kr.forEach(o),$c=d(We),gt=s(We,"P",{});var Gr=r(gt);xc=i(Gr,"Configuration objects inherit from "),_r=s(Gr,"A",{href:!0});var Sg=r(_r);Ec=i(Sg,"PretrainedConfig"),Sg.forEach(o),zc=i(Gr,` and can be used to control the model outputs. Read the documentation from `),vr=s(Gr,"A",{href:!0});var Wg=r(vr);Mc=i(Wg,"PretrainedConfig"),Wg.forEach(o),jc=i(Gr," for more information."),Gr.forEach(o),Fc=d(We),va=s(We,"P",{});var Bg=r(va);qc=i(Bg,"Example:"),Bg.forEach(o),Dc=d(We),_(un.$$.fragment,We),We.forEach(o),pl=d(t),_t=s(t,"H2",{class:!0});var Ul=r(_t);io=s(Ul,"A",{id:!0,class:!0,href:!0});var Ug=r(io);Pa=s(Ug,"SPAN",{});var Rg=r(Pa);_(gn.$$.fragment,Rg),Rg.forEach(o),Ug.forEach(o),Oc=d(Ul),Ta=s(Ul,"SPAN",{});var Hg=r(Ta);Ac=i(Hg,"CLIPTokenizer"),Hg.forEach(o),Ul.forEach(o),hl=d(t),F=s(t,"DIV",{class:!0});var le=r(F);_(_n.$$.fragment,le),Nc=d(le),Ia=s(le,"P",{});var Kg=r(Ia);Vc=i(Kg,"Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding."),Kg.forEach(o),Sc=d(le),vn=s(le,"P",{});var Rl=r(vn);Wc=i(Rl,"This tokenizer inherits from "),Pr=s(Rl,"A",{href:!0});var Gg=r(Pr);Bc=i(Gg,"PreTrainedTokenizer"),Gg.forEach(o),Uc=i(Rl,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Rl.forEach(o),Rc=d(le),Te=s(le,"DIV",{class:!0});var Bo=r(Te);_(Pn.$$.fragment,Bo),Hc=d(Bo),ya=s(Bo,"P",{});var Jg=r(ya);Kc=i(Jg,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CLIP sequence has the following format:`),Jg.forEach(o),Gc=d(Bo),ba=s(Bo,"UL",{});var Xg=r(ba);Tr=s(Xg,"LI",{});var rg=r(Tr);Jc=i(rg,"single sequence: "),Ca=s(rg,"CODE",{});var Zg=r(Ca);Xc=i(Zg,"<|startoftext|> X <|endoftext|>"),Zg.forEach(o),rg.forEach(o),Xg.forEach(o),Zc=d(Bo),La=s(Bo,"P",{});var Qg=r(La);Qc=i(Qg,"Pairs of sequences are not the expected use case, but they will be handled without a separator."),Qg.forEach(o),Bo.forEach(o),Yc=d(le),lo=s(le,"DIV",{class:!0});var Hl=r(lo);_(Tn.$$.fragment,Hl),ep=d(Hl),In=s(Hl,"P",{});var Kl=r(In);tp=i(Kl,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),ka=s(Kl,"CODE",{});var Yg=r(ka);op=i(Yg,"prepare_for_model"),Yg.forEach(o),np=i(Kl," method."),Kl.forEach(o),Hl.forEach(o),sp=d(le),co=s(le,"DIV",{class:!0});var Gl=r(co);_(yn.$$.fragment,Gl),rp=d(Gl),wa=s(Gl,"P",{});var e_=r(wa);ap=i(e_,`Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of zeros is returned.`),e_.forEach(o),Gl.forEach(o),ip=d(le),Ir=s(le,"DIV",{class:!0});var t_=r(Ir);_(bn.$$.fragment,t_),t_.forEach(o),le.forEach(o),ml=d(t),vt=s(t,"H2",{class:!0});var Jl=r(vt);po=s(Jl,"A",{id:!0,class:!0,href:!0});var o_=r(po);$a=s(o_,"SPAN",{});var n_=r($a);_(Cn.$$.fragment,n_),n_.forEach(o),o_.forEach(o),lp=d(Jl),xa=s(Jl,"SPAN",{});var s_=r(xa);dp=i(s_,"CLIPTokenizerFast"),s_.forEach(o),Jl.forEach(o),fl=d(t),W=s(t,"DIV",{class:!0});var Be=r(W);_(Ln.$$.fragment,Be),cp=d(Be),kn=s(Be,"P",{});var Xl=r(kn);pp=i(Xl,"Construct a \u201Cfast\u201D CLIP tokenizer (backed by HuggingFace\u2019s "),Ea=s(Xl,"EM",{});var r_=r(Ea);hp=i(r_,"tokenizers"),r_.forEach(o),mp=i(Xl,` library). Based on byte-level Byte-Pair-Encoding.`),Xl.forEach(o),fp=d(Be),wn=s(Be,"P",{});var Zl=r(wn);up=i(Zl,"This tokenizer inherits from "),yr=s(Zl,"A",{href:!0});var a_=r(yr);gp=i(a_,"PreTrainedTokenizerFast"),a_.forEach(o),_p=i(Zl,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Zl.forEach(o),vp=d(Be),Ie=s(Be,"DIV",{class:!0});var Uo=r(Ie);_($n.$$.fragment,Uo),Pp=d(Uo),za=s(Uo,"P",{});var i_=r(za);Tp=i(i_,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CLIP sequence has the following format:`),i_.forEach(o),Ip=d(Uo),Ma=s(Uo,"UL",{});var l_=r(Ma);br=s(l_,"LI",{});var ag=r(br);yp=i(ag,"single sequence: "),ja=s(ag,"CODE",{});var d_=r(ja);bp=i(d_,"<|startoftext|> X <|endoftext|>"),d_.forEach(o),ag.forEach(o),l_.forEach(o),Cp=d(Uo),Fa=s(Uo,"P",{});var c_=r(Fa);Lp=i(c_,"Pairs of sequences are not the expected use case, but they will be handled without a separator."),c_.forEach(o),Uo.forEach(o),kp=d(Be),ho=s(Be,"DIV",{class:!0});var Ql=r(ho);_(xn.$$.fragment,Ql),wp=d(Ql),qa=s(Ql,"P",{});var p_=r(qa);$p=i(p_,`Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of zeros is returned.`),p_.forEach(o),Ql.forEach(o),Be.forEach(o),ul=d(t),Pt=s(t,"H2",{class:!0});var Yl=r(Pt);mo=s(Yl,"A",{id:!0,class:!0,href:!0});var h_=r(mo);Da=s(h_,"SPAN",{});var m_=r(Da);_(En.$$.fragment,m_),m_.forEach(o),h_.forEach(o),xp=d(Yl),Oa=s(Yl,"SPAN",{});var f_=r(Oa);Ep=i(f_,"CLIPFeatureExtractor"),f_.forEach(o),Yl.forEach(o),gl=d(t),B=s(t,"DIV",{class:!0});var Ue=r(B);_(zn.$$.fragment,Ue),zp=d(Ue),Aa=s(Ue,"P",{});var u_=r(Aa);Mp=i(u_,"Constructs a CLIP feature extractor."),u_.forEach(o),jp=d(Ue),Mn=s(Ue,"P",{});var ed=r(Mn);Fp=i(ed,"This feature extractor inherits from "),Cr=s(ed,"A",{href:!0});var g_=r(Cr);qp=i(g_,"FeatureExtractionMixin"),g_.forEach(o),Dp=i(ed,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ed.forEach(o),Op=d(Ue),fo=s(Ue,"DIV",{class:!0});var td=r(fo);_(jn.$$.fragment,td),Ap=d(td),Fn=s(td,"P",{});var od=r(Fn);Np=i(od,"Crops "),Na=s(od,"CODE",{});var __=r(Na);Vp=i(__,"image"),__.forEach(o),Sp=i(od,` to the given size using a center crop. Note that if the image is too small to be cropped to the size is given, it will be padded (so the returned result has the size asked).`),od.forEach(o),td.forEach(o),Wp=d(Ue),uo=s(Ue,"DIV",{class:!0});var nd=r(uo);_(qn.$$.fragment,nd),Bp=d(nd),Tt=s(nd,"P",{});var Jr=r(Tt);Up=i(Jr,"Resizes "),Va=s(Jr,"CODE",{});var v_=r(Va);Rp=i(v_,"image"),v_.forEach(o),Hp=i(Jr,". Note that this will trigger a conversion of "),Sa=s(Jr,"CODE",{});var P_=r(Sa);Kp=i(P_,"image"),P_.forEach(o),Gp=i(Jr," to a PIL Image."),Jr.forEach(o),nd.forEach(o),Ue.forEach(o),_l=d(t),It=s(t,"H2",{class:!0});var sd=r(It);go=s(sd,"A",{id:!0,class:!0,href:!0});var T_=r(go);Wa=s(T_,"SPAN",{});var I_=r(Wa);_(Dn.$$.fragment,I_),I_.forEach(o),T_.forEach(o),Jp=d(sd),Ba=s(sd,"SPAN",{});var y_=r(Ba);Xp=i(y_,"CLIPProcessor"),y_.forEach(o),sd.forEach(o),vl=d(t),U=s(t,"DIV",{class:!0});var Re=r(U);_(On.$$.fragment,Re),Zp=d(Re),Ua=s(Re,"P",{});var b_=r(Ua);Qp=i(b_,"Constructs a CLIP processor which wraps a CLIP feature extractor and a CLIP tokenizer into a single processor."),b_.forEach(o),Yp=d(Re),K=s(Re,"P",{});var ze=r(K);Lr=s(ze,"A",{href:!0});var C_=r(Lr);eh=i(C_,"CLIPProcessor"),C_.forEach(o),th=i(ze," offers all the functionalities of "),kr=s(ze,"A",{href:!0});var L_=r(kr);oh=i(L_,"CLIPFeatureExtractor"),L_.forEach(o),nh=i(ze," and "),wr=s(ze,"A",{href:!0});var k_=r(wr);sh=i(k_,"CLIPTokenizerFast"),k_.forEach(o),rh=i(ze,`. See the `),Ra=s(ze,"CODE",{});var w_=r(Ra);ah=i(w_,"__call__()"),w_.forEach(o),ih=i(ze,"and "),$r=s(ze,"A",{href:!0});var $_=r($r);lh=i($_,"decode()"),$_.forEach(o),dh=i(ze," for more information."),ze.forEach(o),ch=d(Re),_o=s(Re,"DIV",{class:!0});var rd=r(_o);_(An.$$.fragment,rd),ph=d(rd),Nn=s(rd,"P",{});var ad=r(Nn);hh=i(ad,"This method forwards all its arguments to CLIPTokenizerFast\u2019s "),xr=s(ad,"A",{href:!0});var x_=r(xr);mh=i(x_,"batch_decode()"),x_.forEach(o),fh=i(ad,`. Please refer to the docstring of this method for more information.`),ad.forEach(o),rd.forEach(o),uh=d(Re),vo=s(Re,"DIV",{class:!0});var id=r(vo);_(Vn.$$.fragment,id),gh=d(id),Sn=s(id,"P",{});var ld=r(Sn);_h=i(ld,"This method forwards all its arguments to CLIPTokenizerFast\u2019s "),Er=s(ld,"A",{href:!0});var E_=r(Er);vh=i(E_,"decode()"),E_.forEach(o),Ph=i(ld,`. Please refer to the docstring of this method for more information.`),ld.forEach(o),id.forEach(o),Re.forEach(o),Pl=d(t),yt=s(t,"H2",{class:!0});var dd=r(yt);Po=s(dd,"A",{id:!0,class:!0,href:!0});var z_=r(Po);Ha=s(z_,"SPAN",{});var M_=r(Ha);_(Wn.$$.fragment,M_),M_.forEach(o),z_.forEach(o),Th=d(dd),Ka=s(dd,"SPAN",{});var j_=r(Ka);Ih=i(j_,"CLIPModel"),j_.forEach(o),dd.forEach(o),Tl=d(t),R=s(t,"DIV",{class:!0});var He=r(R);_(Bn.$$.fragment,He),yh=d(He),Un=s(He,"P",{});var cd=r(Un);bh=i(cd,"This model is a PyTorch "),Rn=s(cd,"A",{href:!0,rel:!0});var F_=r(Rn);Ch=i(F_,"torch.nn.Module"),F_.forEach(o),Lh=i(cd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cd.forEach(o),kh=d(He),G=s(He,"DIV",{class:!0});var Ke=r(G);_(Hn.$$.fragment,Ke),wh=d(Ke),bt=s(Ke,"P",{});var Xr=r(bt);$h=i(Xr,"The "),zr=s(Xr,"A",{href:!0});var q_=r(zr);xh=i(q_,"CLIPModel"),q_.forEach(o),Eh=i(Xr," forward method, overrides the "),Ga=s(Xr,"CODE",{});var D_=r(Ga);zh=i(D_,"__call__"),D_.forEach(o),Mh=i(Xr," special method."),Xr.forEach(o),jh=d(Ke),_(To.$$.fragment,Ke),Fh=d(Ke),Ja=s(Ke,"P",{});var O_=r(Ja);qh=i(O_,"Examples:"),O_.forEach(o),Dh=d(Ke),_(Kn.$$.fragment,Ke),Ke.forEach(o),Oh=d(He),J=s(He,"DIV",{class:!0});var Ge=r(J);_(Gn.$$.fragment,Ge),Ah=d(Ge),Ct=s(Ge,"P",{});var Zr=r(Ct);Nh=i(Zr,"The "),Mr=s(Zr,"A",{href:!0});var A_=r(Mr);Vh=i(A_,"CLIPModel"),A_.forEach(o),Sh=i(Zr," forward method, overrides the "),Xa=s(Zr,"CODE",{});var N_=r(Xa);Wh=i(N_,"__call__"),N_.forEach(o),Bh=i(Zr," special method."),Zr.forEach(o),Uh=d(Ge),_(Io.$$.fragment,Ge),Rh=d(Ge),Za=s(Ge,"P",{});var V_=r(Za);Hh=i(V_,"Examples:"),V_.forEach(o),Kh=d(Ge),_(Jn.$$.fragment,Ge),Ge.forEach(o),Gh=d(He),X=s(He,"DIV",{class:!0});var Je=r(X);_(Xn.$$.fragment,Je),Jh=d(Je),Lt=s(Je,"P",{});var Qr=r(Lt);Xh=i(Qr,"The "),jr=s(Qr,"A",{href:!0});var S_=r(jr);Zh=i(S_,"CLIPModel"),S_.forEach(o),Qh=i(Qr," forward method, overrides the "),Qa=s(Qr,"CODE",{});var W_=r(Qa);Yh=i(W_,"__call__"),W_.forEach(o),em=i(Qr," special method."),Qr.forEach(o),tm=d(Je),_(yo.$$.fragment,Je),om=d(Je),Ya=s(Je,"P",{});var B_=r(Ya);nm=i(B_,"Examples:"),B_.forEach(o),sm=d(Je),_(Zn.$$.fragment,Je),Je.forEach(o),He.forEach(o),Il=d(t),kt=s(t,"H2",{class:!0});var pd=r(kt);bo=s(pd,"A",{id:!0,class:!0,href:!0});var U_=r(bo);ei=s(U_,"SPAN",{});var R_=r(ei);_(Qn.$$.fragment,R_),R_.forEach(o),U_.forEach(o),rm=d(pd),ti=s(pd,"SPAN",{});var H_=r(ti);am=i(H_,"CLIPTextModel"),H_.forEach(o),pd.forEach(o),yl=d(t),wt=s(t,"DIV",{class:!0});var hd=r(wt);_(Yn.$$.fragment,hd),im=d(hd),Z=s(hd,"DIV",{class:!0});var Xe=r(Z);_(es.$$.fragment,Xe),lm=d(Xe),$t=s(Xe,"P",{});var Yr=r($t);dm=i(Yr,"The "),Fr=s(Yr,"A",{href:!0});var K_=r(Fr);cm=i(K_,"CLIPTextModel"),K_.forEach(o),pm=i(Yr," forward method, overrides the "),oi=s(Yr,"CODE",{});var G_=r(oi);hm=i(G_,"__call__"),G_.forEach(o),mm=i(Yr," special method."),Yr.forEach(o),fm=d(Xe),_(Co.$$.fragment,Xe),um=d(Xe),ni=s(Xe,"P",{});var J_=r(ni);gm=i(J_,"Examples:"),J_.forEach(o),_m=d(Xe),_(ts.$$.fragment,Xe),Xe.forEach(o),hd.forEach(o),bl=d(t),xt=s(t,"H2",{class:!0});var md=r(xt);Lo=s(md,"A",{id:!0,class:!0,href:!0});var X_=r(Lo);si=s(X_,"SPAN",{});var Z_=r(si);_(os.$$.fragment,Z_),Z_.forEach(o),X_.forEach(o),vm=d(md),ri=s(md,"SPAN",{});var Q_=r(ri);Pm=i(Q_,"CLIPVisionModel"),Q_.forEach(o),md.forEach(o),Cl=d(t),Et=s(t,"DIV",{class:!0});var fd=r(Et);_(ns.$$.fragment,fd),Tm=d(fd),Q=s(fd,"DIV",{class:!0});var Ze=r(Q);_(ss.$$.fragment,Ze),Im=d(Ze),zt=s(Ze,"P",{});var ea=r(zt);ym=i(ea,"The "),qr=s(ea,"A",{href:!0});var Y_=r(qr);bm=i(Y_,"CLIPVisionModel"),Y_.forEach(o),Cm=i(ea," forward method, overrides the "),ai=s(ea,"CODE",{});var ev=r(ai);Lm=i(ev,"__call__"),ev.forEach(o),km=i(ea," special method."),ea.forEach(o),wm=d(Ze),_(ko.$$.fragment,Ze),$m=d(Ze),ii=s(Ze,"P",{});var tv=r(ii);xm=i(tv,"Examples:"),tv.forEach(o),Em=d(Ze),_(rs.$$.fragment,Ze),Ze.forEach(o),fd.forEach(o),Ll=d(t),Mt=s(t,"H2",{class:!0});var ud=r(Mt);wo=s(ud,"A",{id:!0,class:!0,href:!0});var ov=r(wo);li=s(ov,"SPAN",{});var nv=r(li);_(as.$$.fragment,nv),nv.forEach(o),ov.forEach(o),zm=d(ud),di=s(ud,"SPAN",{});var sv=r(di);Mm=i(sv,"TFCLIPModel"),sv.forEach(o),ud.forEach(o),kl=d(t),q=s(t,"DIV",{class:!0});var de=r(q);_(is.$$.fragment,de),jm=d(de),ls=s(de,"P",{});var gd=r(ls);Fm=i(gd,"This model inherits from "),Dr=s(gd,"A",{href:!0});var rv=r(Dr);qm=i(rv,"TFPreTrainedModel"),rv.forEach(o),Dm=i(gd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gd.forEach(o),Om=d(de),ds=s(de,"P",{});var _d=r(ds);Am=i(_d,"This model is also a "),cs=s(_d,"A",{href:!0,rel:!0});var av=r(cs);Nm=i(av,"tf.keras.Model"),av.forEach(o),Vm=i(_d,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_d.forEach(o),Sm=d(de),_($o.$$.fragment,de),Wm=d(de),Y=s(de,"DIV",{class:!0});var Qe=r(Y);_(ps.$$.fragment,Qe),Bm=d(Qe),jt=s(Qe,"P",{});var ta=r(jt);Um=i(ta,"The "),Or=s(ta,"A",{href:!0});var iv=r(Or);Rm=i(iv,"TFCLIPModel"),iv.forEach(o),Hm=i(ta," forward method, overrides the "),ci=s(ta,"CODE",{});var lv=r(ci);Km=i(lv,"__call__"),lv.forEach(o),Gm=i(ta," special method."),ta.forEach(o),Jm=d(Qe),_(xo.$$.fragment,Qe),Xm=d(Qe),pi=s(Qe,"P",{});var dv=r(pi);Zm=i(dv,"Examples:"),dv.forEach(o),Qm=d(Qe),_(hs.$$.fragment,Qe),Qe.forEach(o),Ym=d(de),ee=s(de,"DIV",{class:!0});var Ye=r(ee);_(ms.$$.fragment,Ye),ef=d(Ye),Ft=s(Ye,"P",{});var oa=r(Ft);tf=i(oa,"The "),Ar=s(oa,"A",{href:!0});var cv=r(Ar);of=i(cv,"TFCLIPModel"),cv.forEach(o),nf=i(oa," forward method, overrides the "),hi=s(oa,"CODE",{});var pv=r(hi);sf=i(pv,"__call__"),pv.forEach(o),rf=i(oa," special method."),oa.forEach(o),af=d(Ye),_(Eo.$$.fragment,Ye),lf=d(Ye),mi=s(Ye,"P",{});var hv=r(mi);df=i(hv,"Examples:"),hv.forEach(o),cf=d(Ye),_(fs.$$.fragment,Ye),Ye.forEach(o),pf=d(de),te=s(de,"DIV",{class:!0});var et=r(te);_(us.$$.fragment,et),hf=d(et),qt=s(et,"P",{});var na=r(qt);mf=i(na,"The "),Nr=s(na,"A",{href:!0});var mv=r(Nr);ff=i(mv,"TFCLIPModel"),mv.forEach(o),uf=i(na," forward method, overrides the "),fi=s(na,"CODE",{});var fv=r(fi);gf=i(fv,"__call__"),fv.forEach(o),_f=i(na," special method."),na.forEach(o),vf=d(et),_(zo.$$.fragment,et),Pf=d(et),ui=s(et,"P",{});var uv=r(ui);Tf=i(uv,"Examples:"),uv.forEach(o),If=d(et),_(gs.$$.fragment,et),et.forEach(o),de.forEach(o),wl=d(t),Dt=s(t,"H2",{class:!0});var vd=r(Dt);Mo=s(vd,"A",{id:!0,class:!0,href:!0});var gv=r(Mo);gi=s(gv,"SPAN",{});var _v=r(gi);_(_s.$$.fragment,_v),_v.forEach(o),gv.forEach(o),yf=d(vd),_i=s(vd,"SPAN",{});var vv=r(_i);bf=i(vv,"TFCLIPTextModel"),vv.forEach(o),vd.forEach(o),$l=d(t),Ot=s(t,"DIV",{class:!0});var Pd=r(Ot);_(vs.$$.fragment,Pd),Cf=d(Pd),oe=s(Pd,"DIV",{class:!0});var tt=r(oe);_(Ps.$$.fragment,tt),Lf=d(tt),At=s(tt,"P",{});var sa=r(At);kf=i(sa,"The "),Vr=s(sa,"A",{href:!0});var Pv=r(Vr);wf=i(Pv,"TFCLIPTextModel"),Pv.forEach(o),$f=i(sa," forward method, overrides the "),vi=s(sa,"CODE",{});var Tv=r(vi);xf=i(Tv,"__call__"),Tv.forEach(o),Ef=i(sa," special method."),sa.forEach(o),zf=d(tt),_(jo.$$.fragment,tt),Mf=d(tt),Pi=s(tt,"P",{});var Iv=r(Pi);jf=i(Iv,"Examples:"),Iv.forEach(o),Ff=d(tt),_(Ts.$$.fragment,tt),tt.forEach(o),Pd.forEach(o),xl=d(t),Nt=s(t,"H2",{class:!0});var Td=r(Nt);Fo=s(Td,"A",{id:!0,class:!0,href:!0});var yv=r(Fo);Ti=s(yv,"SPAN",{});var bv=r(Ti);_(Is.$$.fragment,bv),bv.forEach(o),yv.forEach(o),qf=d(Td),Ii=s(Td,"SPAN",{});var Cv=r(Ii);Df=i(Cv,"TFCLIPVisionModel"),Cv.forEach(o),Td.forEach(o),El=d(t),Vt=s(t,"DIV",{class:!0});var Id=r(Vt);_(ys.$$.fragment,Id),Of=d(Id),ne=s(Id,"DIV",{class:!0});var ot=r(ne);_(bs.$$.fragment,ot),Af=d(ot),St=s(ot,"P",{});var ra=r(St);Nf=i(ra,"The "),Sr=s(ra,"A",{href:!0});var Lv=r(Sr);Vf=i(Lv,"TFCLIPVisionModel"),Lv.forEach(o),Sf=i(ra," forward method, overrides the "),yi=s(ra,"CODE",{});var kv=r(yi);Wf=i(kv,"__call__"),kv.forEach(o),Bf=i(ra," special method."),ra.forEach(o),Uf=d(ot),_(qo.$$.fragment,ot),Rf=d(ot),bi=s(ot,"P",{});var wv=r(bi);Hf=i(wv,"Examples:"),wv.forEach(o),Kf=d(ot),_(Cs.$$.fragment,ot),ot.forEach(o),Id.forEach(o),zl=d(t),Wt=s(t,"H2",{class:!0});var yd=r(Wt);Do=s(yd,"A",{id:!0,class:!0,href:!0});var $v=r(Do);Ci=s($v,"SPAN",{});var xv=r(Ci);_(Ls.$$.fragment,xv),xv.forEach(o),$v.forEach(o),Gf=d(yd),Li=s(yd,"SPAN",{});var Ev=r(Li);Jf=i(Ev,"FlaxCLIPModel"),Ev.forEach(o),yd.forEach(o),Ml=d(t),z=s(t,"DIV",{class:!0});var N=r(z);_(ks.$$.fragment,N),Xf=d(N),ws=s(N,"P",{});var bd=r(ws);Zf=i(bd,"This model inherits from "),Wr=s(bd,"A",{href:!0});var zv=r(Wr);Qf=i(zv,"FlaxPreTrainedModel"),zv.forEach(o),Yf=i(bd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),bd.forEach(o),eu=d(N),$s=s(N,"P",{});var Cd=r($s);tu=i(Cd,"This model is also a Flax Linen "),xs=s(Cd,"A",{href:!0,rel:!0});var Mv=r(xs);ou=i(Mv,"flax.linen.Module"),Mv.forEach(o),nu=i(Cd,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Cd.forEach(o),su=d(N),ki=s(N,"P",{});var jv=r(ki);ru=i(jv,"Finally, this model supports inherent JAX features such as:"),jv.forEach(o),au=d(N),Ee=s(N,"UL",{});var Ro=r(Ee);wi=s(Ro,"LI",{});var Fv=r(wi);Es=s(Fv,"A",{href:!0,rel:!0});var qv=r(Es);iu=i(qv,"Just-In-Time (JIT) compilation"),qv.forEach(o),Fv.forEach(o),lu=d(Ro),$i=s(Ro,"LI",{});var Dv=r($i);zs=s(Dv,"A",{href:!0,rel:!0});var Ov=r(zs);du=i(Ov,"Automatic Differentiation"),Ov.forEach(o),Dv.forEach(o),cu=d(Ro),xi=s(Ro,"LI",{});var Av=r(xi);Ms=s(Av,"A",{href:!0,rel:!0});var Nv=r(Ms);pu=i(Nv,"Vectorization"),Nv.forEach(o),Av.forEach(o),hu=d(Ro),Ei=s(Ro,"LI",{});var Vv=r(Ei);js=s(Vv,"A",{href:!0,rel:!0});var Sv=r(js);mu=i(Sv,"Parallelization"),Sv.forEach(o),Vv.forEach(o),Ro.forEach(o),fu=d(N),se=s(N,"DIV",{class:!0});var nt=r(se);_(Fs.$$.fragment,nt),uu=d(nt),Bt=s(nt,"P",{});var aa=r(Bt);gu=i(aa,"The "),zi=s(aa,"CODE",{});var Wv=r(zi);_u=i(Wv,"FlaxCLIPPreTrainedModel"),Wv.forEach(o),vu=i(aa,"forward method, overrides the "),Mi=s(aa,"CODE",{});var Bv=r(Mi);Pu=i(Bv,"__call__"),Bv.forEach(o),Tu=i(aa," special method."),aa.forEach(o),Iu=d(nt),_(Oo.$$.fragment,nt),yu=d(nt),ji=s(nt,"P",{});var Uv=r(ji);bu=i(Uv,"Example:"),Uv.forEach(o),Cu=d(nt),_(qs.$$.fragment,nt),nt.forEach(o),Lu=d(N),Ne=s(N,"DIV",{class:!0});var ia=r(Ne);_(Ds.$$.fragment,ia),ku=d(ia),Fi=s(ia,"P",{});var Rv=r(Fi);wu=i(Rv,"Examples:"),Rv.forEach(o),$u=d(ia),_(Os.$$.fragment,ia),ia.forEach(o),xu=d(N),Ve=s(N,"DIV",{class:!0});var la=r(Ve);_(As.$$.fragment,la),Eu=d(la),qi=s(la,"P",{});var Hv=r(qi);zu=i(Hv,"Examples:"),Hv.forEach(o),Mu=d(la),_(Ns.$$.fragment,la),la.forEach(o),N.forEach(o),jl=d(t),Ut=s(t,"H2",{class:!0});var Ld=r(Ut);Ao=s(Ld,"A",{id:!0,class:!0,href:!0});var Kv=r(Ao);Di=s(Kv,"SPAN",{});var Gv=r(Di);_(Vs.$$.fragment,Gv),Gv.forEach(o),Kv.forEach(o),ju=d(Ld),Oi=s(Ld,"SPAN",{});var Jv=r(Oi);Fu=i(Jv,"FlaxCLIPTextModel"),Jv.forEach(o),Ld.forEach(o),Fl=d(t),Rt=s(t,"DIV",{class:!0});var kd=r(Rt);_(Ss.$$.fragment,kd),qu=d(kd),re=s(kd,"DIV",{class:!0});var st=r(re);_(Ws.$$.fragment,st),Du=d(st),Ht=s(st,"P",{});var da=r(Ht);Ou=i(da,"The "),Ai=s(da,"CODE",{});var Xv=r(Ai);Au=i(Xv,"FlaxCLIPTextPreTrainedModel"),Xv.forEach(o),Nu=i(da,"forward method, overrides the "),Ni=s(da,"CODE",{});var Zv=r(Ni);Vu=i(Zv,"__call__"),Zv.forEach(o),Su=i(da," special method."),da.forEach(o),Wu=d(st),_(No.$$.fragment,st),Bu=d(st),Vi=s(st,"P",{});var Qv=r(Vi);Uu=i(Qv,"Example:"),Qv.forEach(o),Ru=d(st),_(Bs.$$.fragment,st),st.forEach(o),kd.forEach(o),ql=d(t),Kt=s(t,"H2",{class:!0});var wd=r(Kt);Vo=s(wd,"A",{id:!0,class:!0,href:!0});var Yv=r(Vo);Si=s(Yv,"SPAN",{});var e1=r(Si);_(Us.$$.fragment,e1),e1.forEach(o),Yv.forEach(o),Hu=d(wd),Wi=s(wd,"SPAN",{});var t1=r(Wi);Ku=i(t1,"FlaxCLIPVisionModel"),t1.forEach(o),wd.forEach(o),Dl=d(t),Gt=s(t,"DIV",{class:!0});var $d=r(Gt);_(Rs.$$.fragment,$d),Gu=d($d),ae=s($d,"DIV",{class:!0});var rt=r(ae);_(Hs.$$.fragment,rt),Ju=d(rt),Jt=s(rt,"P",{});var ca=r(Jt);Xu=i(ca,"The "),Bi=s(ca,"CODE",{});var o1=r(Bi);Zu=i(o1,"FlaxCLIPVisionPreTrainedModel"),o1.forEach(o),Qu=i(ca,"forward method, overrides the "),Ui=s(ca,"CODE",{});var n1=r(Ui);Yu=i(n1,"__call__"),n1.forEach(o),eg=i(ca," special method."),ca.forEach(o),tg=d(rt),_(So.$$.fragment,rt),og=d(rt),Ri=s(rt,"P",{});var s1=r(Ri);ng=i(s1,"Example:"),s1.forEach(o),sg=d(rt),_(Ks.$$.fragment,rt),rt.forEach(o),$d.forEach(o),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(C1)),c(b,"id","clip"),c(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(b,"href","#clip"),c(u,"class","relative group"),c(ue,"id","overview"),c(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ue,"href","#overview"),c(M,"class","relative group"),c(Le,"href","https://arxiv.org/abs/2103.00020"),c(Le,"rel","nofollow"),c(ge,"id","usage"),c(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ge,"href","#usage"),c(O,"class","relative group"),c(j,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor"),c(lt,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer"),c(ve,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPProcessor"),c(Pe,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor"),c(Oe,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer"),c(ar,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPProcessor"),c(ir,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel"),c(tn,"href","https://huggingface.co/valhalla"),c(tn,"rel","nofollow"),c(on,"href","https://github.com/openai/CLIP"),c(on,"rel","nofollow"),c(oo,"id","transformers.CLIPConfig"),c(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(oo,"href","#transformers.CLIPConfig"),c(dt,"class","relative group"),c(lr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPConfig"),c(dr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel"),c(cr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(pr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(hr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPConfig"),c(so,"class","docstring"),c(fe,"class","docstring"),c(ro,"id","transformers.CLIPTextConfig"),c(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ro,"href","#transformers.CLIPTextConfig"),c(pt,"class","relative group"),c(mr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel"),c(cn,"href","https://huggingface.co/openai/clip-vit-base-patch32"),c(cn,"rel","nofollow"),c(fr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(ur,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(V,"class","docstring"),c(ao,"id","transformers.CLIPVisionConfig"),c(ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ao,"href","#transformers.CLIPVisionConfig"),c(ft,"class","relative group"),c(gr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel"),c(fn,"href","https://huggingface.co/openai/clip-vit-base-patch32"),c(fn,"rel","nofollow"),c(_r,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(vr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(S,"class","docstring"),c(io,"id","transformers.CLIPTokenizer"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#transformers.CLIPTokenizer"),c(_t,"class","relative group"),c(Pr,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Te,"class","docstring"),c(lo,"class","docstring"),c(co,"class","docstring"),c(Ir,"class","docstring"),c(F,"class","docstring"),c(po,"id","transformers.CLIPTokenizerFast"),c(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(po,"href","#transformers.CLIPTokenizerFast"),c(vt,"class","relative group"),c(yr,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(Ie,"class","docstring"),c(ho,"class","docstring"),c(W,"class","docstring"),c(mo,"id","transformers.CLIPFeatureExtractor"),c(mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(mo,"href","#transformers.CLIPFeatureExtractor"),c(Pt,"class","relative group"),c(Cr,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin"),c(fo,"class","docstring"),c(uo,"class","docstring"),c(B,"class","docstring"),c(go,"id","transformers.CLIPProcessor"),c(go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(go,"href","#transformers.CLIPProcessor"),c(It,"class","relative group"),c(Lr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPProcessor"),c(kr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPFeatureExtractor"),c(wr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizerFast"),c($r,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPProcessor.decode"),c(xr,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),c(_o,"class","docstring"),c(Er,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),c(vo,"class","docstring"),c(U,"class","docstring"),c(Po,"id","transformers.CLIPModel"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.CLIPModel"),c(yt,"class","relative group"),c(Rn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Rn,"rel","nofollow"),c(zr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel"),c(G,"class","docstring"),c(Mr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel"),c(J,"class","docstring"),c(jr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPModel"),c(X,"class","docstring"),c(R,"class","docstring"),c(bo,"id","transformers.CLIPTextModel"),c(bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bo,"href","#transformers.CLIPTextModel"),c(kt,"class","relative group"),c(Fr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel"),c(Z,"class","docstring"),c(wt,"class","docstring"),c(Lo,"id","transformers.CLIPVisionModel"),c(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lo,"href","#transformers.CLIPVisionModel"),c(xt,"class","relative group"),c(qr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel"),c(Q,"class","docstring"),c(Et,"class","docstring"),c(wo,"id","transformers.TFCLIPModel"),c(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wo,"href","#transformers.TFCLIPModel"),c(Mt,"class","relative group"),c(Dr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(cs,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(cs,"rel","nofollow"),c(Or,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPModel"),c(Y,"class","docstring"),c(Ar,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPModel"),c(ee,"class","docstring"),c(Nr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPModel"),c(te,"class","docstring"),c(q,"class","docstring"),c(Mo,"id","transformers.TFCLIPTextModel"),c(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mo,"href","#transformers.TFCLIPTextModel"),c(Dt,"class","relative group"),c(Vr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPTextModel"),c(oe,"class","docstring"),c(Ot,"class","docstring"),c(Fo,"id","transformers.TFCLIPVisionModel"),c(Fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fo,"href","#transformers.TFCLIPVisionModel"),c(Nt,"class","relative group"),c(Sr,"href","/docs/transformers/pr_16143/en/model_doc/clip#transformers.TFCLIPVisionModel"),c(ne,"class","docstring"),c(Vt,"class","docstring"),c(Do,"id","transformers.FlaxCLIPModel"),c(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Do,"href","#transformers.FlaxCLIPModel"),c(Wt,"class","relative group"),c(Wr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(xs,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(xs,"rel","nofollow"),c(Es,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Es,"rel","nofollow"),c(zs,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(zs,"rel","nofollow"),c(Ms,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Ms,"rel","nofollow"),c(js,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(js,"rel","nofollow"),c(se,"class","docstring"),c(Ne,"class","docstring"),c(Ve,"class","docstring"),c(z,"class","docstring"),c(Ao,"id","transformers.FlaxCLIPTextModel"),c(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ao,"href","#transformers.FlaxCLIPTextModel"),c(Ut,"class","relative group"),c(re,"class","docstring"),c(Rt,"class","docstring"),c(Vo,"id","transformers.FlaxCLIPVisionModel"),c(Vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vo,"href","#transformers.FlaxCLIPVisionModel"),c(Kt,"class","relative group"),c(ae,"class","docstring"),c(Gt,"class","docstring")},m(t,h){e(document.head,p),m(t,L,h),m(t,u,h),e(u,b),e(b,C),v(y,C,null),e(u,f),e(u,k),e(k,Ho),m(t,at,h),m(t,M,h),e(M,ue),e(ue,ye),v(be,ye,null),e(M,Ko),e(M,Ce),e(Ce,Go),m(t,Xt,h),m(t,pe,h),e(pe,Me),e(pe,Le),e(Le,Zt),e(pe,H),m(t,he,h),m(t,je,h),e(je,it),m(t,Qt,h),m(t,Fe,h),e(Fe,ke),e(ke,Jo),m(t,Yt,h),m(t,O,h),e(O,ge),e(ge,we),v($e,we,null),e(O,Xo),e(O,xe),e(xe,Zo),m(t,eo,h),m(t,me,h),e(me,Qo),m(t,qe,h),m(t,_e,h),e(_e,$),e(_e,j),e(j,to),e(_e,Qs),m(t,Yo,h),m(t,E,h),e(E,Ys),e(E,lt),e(lt,er),e(E,De),e(E,ve),e(ve,tr),e(E,or),e(E,Pe),e(Pe,nr),e(E,sr),e(E,Oe),e(Oe,rr),e(E,xd),e(E,ar),e(ar,Ed),e(E,zd),e(E,ir),e(ir,Md),e(E,jd),m(t,nl,h),v(en,t,h),m(t,sl,h),m(t,Ae,h),e(Ae,Fd),e(Ae,tn),e(tn,qd),e(Ae,Dd),e(Ae,on),e(on,Od),e(Ae,Ad),m(t,rl,h),m(t,dt,h),e(dt,oo),e(oo,pa),v(nn,pa,null),e(dt,Nd),e(dt,ha),e(ha,Vd),m(t,al,h),m(t,fe,h),v(sn,fe,null),e(fe,Sd),e(fe,no),e(no,lr),e(lr,Wd),e(no,Bd),e(no,dr),e(dr,Ud),e(no,Rd),e(fe,Hd),e(fe,ct),e(ct,Kd),e(ct,cr),e(cr,Gd),e(ct,Jd),e(ct,pr),e(pr,Xd),e(ct,Zd),e(fe,Qd),e(fe,so),v(rn,so,null),e(so,Yd),e(so,an),e(an,ec),e(an,hr),e(hr,tc),e(an,oc),m(t,il,h),m(t,pt,h),e(pt,ro),e(ro,ma),v(ln,ma,null),e(pt,nc),e(pt,fa),e(fa,sc),m(t,ll,h),m(t,V,h),v(dn,V,null),e(V,rc),e(V,ht),e(ht,ac),e(ht,mr),e(mr,ic),e(ht,lc),e(ht,cn),e(cn,dc),e(ht,cc),e(V,pc),e(V,mt),e(mt,hc),e(mt,fr),e(fr,mc),e(mt,fc),e(mt,ur),e(ur,uc),e(mt,gc),e(V,_c),e(V,ua),e(ua,vc),e(V,Pc),v(pn,V,null),m(t,dl,h),m(t,ft,h),e(ft,ao),e(ao,ga),v(hn,ga,null),e(ft,Tc),e(ft,_a),e(_a,Ic),m(t,cl,h),m(t,S,h),v(mn,S,null),e(S,yc),e(S,ut),e(ut,bc),e(ut,gr),e(gr,Cc),e(ut,Lc),e(ut,fn),e(fn,kc),e(ut,wc),e(S,$c),e(S,gt),e(gt,xc),e(gt,_r),e(_r,Ec),e(gt,zc),e(gt,vr),e(vr,Mc),e(gt,jc),e(S,Fc),e(S,va),e(va,qc),e(S,Dc),v(un,S,null),m(t,pl,h),m(t,_t,h),e(_t,io),e(io,Pa),v(gn,Pa,null),e(_t,Oc),e(_t,Ta),e(Ta,Ac),m(t,hl,h),m(t,F,h),v(_n,F,null),e(F,Nc),e(F,Ia),e(Ia,Vc),e(F,Sc),e(F,vn),e(vn,Wc),e(vn,Pr),e(Pr,Bc),e(vn,Uc),e(F,Rc),e(F,Te),v(Pn,Te,null),e(Te,Hc),e(Te,ya),e(ya,Kc),e(Te,Gc),e(Te,ba),e(ba,Tr),e(Tr,Jc),e(Tr,Ca),e(Ca,Xc),e(Te,Zc),e(Te,La),e(La,Qc),e(F,Yc),e(F,lo),v(Tn,lo,null),e(lo,ep),e(lo,In),e(In,tp),e(In,ka),e(ka,op),e(In,np),e(F,sp),e(F,co),v(yn,co,null),e(co,rp),e(co,wa),e(wa,ap),e(F,ip),e(F,Ir),v(bn,Ir,null),m(t,ml,h),m(t,vt,h),e(vt,po),e(po,$a),v(Cn,$a,null),e(vt,lp),e(vt,xa),e(xa,dp),m(t,fl,h),m(t,W,h),v(Ln,W,null),e(W,cp),e(W,kn),e(kn,pp),e(kn,Ea),e(Ea,hp),e(kn,mp),e(W,fp),e(W,wn),e(wn,up),e(wn,yr),e(yr,gp),e(wn,_p),e(W,vp),e(W,Ie),v($n,Ie,null),e(Ie,Pp),e(Ie,za),e(za,Tp),e(Ie,Ip),e(Ie,Ma),e(Ma,br),e(br,yp),e(br,ja),e(ja,bp),e(Ie,Cp),e(Ie,Fa),e(Fa,Lp),e(W,kp),e(W,ho),v(xn,ho,null),e(ho,wp),e(ho,qa),e(qa,$p),m(t,ul,h),m(t,Pt,h),e(Pt,mo),e(mo,Da),v(En,Da,null),e(Pt,xp),e(Pt,Oa),e(Oa,Ep),m(t,gl,h),m(t,B,h),v(zn,B,null),e(B,zp),e(B,Aa),e(Aa,Mp),e(B,jp),e(B,Mn),e(Mn,Fp),e(Mn,Cr),e(Cr,qp),e(Mn,Dp),e(B,Op),e(B,fo),v(jn,fo,null),e(fo,Ap),e(fo,Fn),e(Fn,Np),e(Fn,Na),e(Na,Vp),e(Fn,Sp),e(B,Wp),e(B,uo),v(qn,uo,null),e(uo,Bp),e(uo,Tt),e(Tt,Up),e(Tt,Va),e(Va,Rp),e(Tt,Hp),e(Tt,Sa),e(Sa,Kp),e(Tt,Gp),m(t,_l,h),m(t,It,h),e(It,go),e(go,Wa),v(Dn,Wa,null),e(It,Jp),e(It,Ba),e(Ba,Xp),m(t,vl,h),m(t,U,h),v(On,U,null),e(U,Zp),e(U,Ua),e(Ua,Qp),e(U,Yp),e(U,K),e(K,Lr),e(Lr,eh),e(K,th),e(K,kr),e(kr,oh),e(K,nh),e(K,wr),e(wr,sh),e(K,rh),e(K,Ra),e(Ra,ah),e(K,ih),e(K,$r),e($r,lh),e(K,dh),e(U,ch),e(U,_o),v(An,_o,null),e(_o,ph),e(_o,Nn),e(Nn,hh),e(Nn,xr),e(xr,mh),e(Nn,fh),e(U,uh),e(U,vo),v(Vn,vo,null),e(vo,gh),e(vo,Sn),e(Sn,_h),e(Sn,Er),e(Er,vh),e(Sn,Ph),m(t,Pl,h),m(t,yt,h),e(yt,Po),e(Po,Ha),v(Wn,Ha,null),e(yt,Th),e(yt,Ka),e(Ka,Ih),m(t,Tl,h),m(t,R,h),v(Bn,R,null),e(R,yh),e(R,Un),e(Un,bh),e(Un,Rn),e(Rn,Ch),e(Un,Lh),e(R,kh),e(R,G),v(Hn,G,null),e(G,wh),e(G,bt),e(bt,$h),e(bt,zr),e(zr,xh),e(bt,Eh),e(bt,Ga),e(Ga,zh),e(bt,Mh),e(G,jh),v(To,G,null),e(G,Fh),e(G,Ja),e(Ja,qh),e(G,Dh),v(Kn,G,null),e(R,Oh),e(R,J),v(Gn,J,null),e(J,Ah),e(J,Ct),e(Ct,Nh),e(Ct,Mr),e(Mr,Vh),e(Ct,Sh),e(Ct,Xa),e(Xa,Wh),e(Ct,Bh),e(J,Uh),v(Io,J,null),e(J,Rh),e(J,Za),e(Za,Hh),e(J,Kh),v(Jn,J,null),e(R,Gh),e(R,X),v(Xn,X,null),e(X,Jh),e(X,Lt),e(Lt,Xh),e(Lt,jr),e(jr,Zh),e(Lt,Qh),e(Lt,Qa),e(Qa,Yh),e(Lt,em),e(X,tm),v(yo,X,null),e(X,om),e(X,Ya),e(Ya,nm),e(X,sm),v(Zn,X,null),m(t,Il,h),m(t,kt,h),e(kt,bo),e(bo,ei),v(Qn,ei,null),e(kt,rm),e(kt,ti),e(ti,am),m(t,yl,h),m(t,wt,h),v(Yn,wt,null),e(wt,im),e(wt,Z),v(es,Z,null),e(Z,lm),e(Z,$t),e($t,dm),e($t,Fr),e(Fr,cm),e($t,pm),e($t,oi),e(oi,hm),e($t,mm),e(Z,fm),v(Co,Z,null),e(Z,um),e(Z,ni),e(ni,gm),e(Z,_m),v(ts,Z,null),m(t,bl,h),m(t,xt,h),e(xt,Lo),e(Lo,si),v(os,si,null),e(xt,vm),e(xt,ri),e(ri,Pm),m(t,Cl,h),m(t,Et,h),v(ns,Et,null),e(Et,Tm),e(Et,Q),v(ss,Q,null),e(Q,Im),e(Q,zt),e(zt,ym),e(zt,qr),e(qr,bm),e(zt,Cm),e(zt,ai),e(ai,Lm),e(zt,km),e(Q,wm),v(ko,Q,null),e(Q,$m),e(Q,ii),e(ii,xm),e(Q,Em),v(rs,Q,null),m(t,Ll,h),m(t,Mt,h),e(Mt,wo),e(wo,li),v(as,li,null),e(Mt,zm),e(Mt,di),e(di,Mm),m(t,kl,h),m(t,q,h),v(is,q,null),e(q,jm),e(q,ls),e(ls,Fm),e(ls,Dr),e(Dr,qm),e(ls,Dm),e(q,Om),e(q,ds),e(ds,Am),e(ds,cs),e(cs,Nm),e(ds,Vm),e(q,Sm),v($o,q,null),e(q,Wm),e(q,Y),v(ps,Y,null),e(Y,Bm),e(Y,jt),e(jt,Um),e(jt,Or),e(Or,Rm),e(jt,Hm),e(jt,ci),e(ci,Km),e(jt,Gm),e(Y,Jm),v(xo,Y,null),e(Y,Xm),e(Y,pi),e(pi,Zm),e(Y,Qm),v(hs,Y,null),e(q,Ym),e(q,ee),v(ms,ee,null),e(ee,ef),e(ee,Ft),e(Ft,tf),e(Ft,Ar),e(Ar,of),e(Ft,nf),e(Ft,hi),e(hi,sf),e(Ft,rf),e(ee,af),v(Eo,ee,null),e(ee,lf),e(ee,mi),e(mi,df),e(ee,cf),v(fs,ee,null),e(q,pf),e(q,te),v(us,te,null),e(te,hf),e(te,qt),e(qt,mf),e(qt,Nr),e(Nr,ff),e(qt,uf),e(qt,fi),e(fi,gf),e(qt,_f),e(te,vf),v(zo,te,null),e(te,Pf),e(te,ui),e(ui,Tf),e(te,If),v(gs,te,null),m(t,wl,h),m(t,Dt,h),e(Dt,Mo),e(Mo,gi),v(_s,gi,null),e(Dt,yf),e(Dt,_i),e(_i,bf),m(t,$l,h),m(t,Ot,h),v(vs,Ot,null),e(Ot,Cf),e(Ot,oe),v(Ps,oe,null),e(oe,Lf),e(oe,At),e(At,kf),e(At,Vr),e(Vr,wf),e(At,$f),e(At,vi),e(vi,xf),e(At,Ef),e(oe,zf),v(jo,oe,null),e(oe,Mf),e(oe,Pi),e(Pi,jf),e(oe,Ff),v(Ts,oe,null),m(t,xl,h),m(t,Nt,h),e(Nt,Fo),e(Fo,Ti),v(Is,Ti,null),e(Nt,qf),e(Nt,Ii),e(Ii,Df),m(t,El,h),m(t,Vt,h),v(ys,Vt,null),e(Vt,Of),e(Vt,ne),v(bs,ne,null),e(ne,Af),e(ne,St),e(St,Nf),e(St,Sr),e(Sr,Vf),e(St,Sf),e(St,yi),e(yi,Wf),e(St,Bf),e(ne,Uf),v(qo,ne,null),e(ne,Rf),e(ne,bi),e(bi,Hf),e(ne,Kf),v(Cs,ne,null),m(t,zl,h),m(t,Wt,h),e(Wt,Do),e(Do,Ci),v(Ls,Ci,null),e(Wt,Gf),e(Wt,Li),e(Li,Jf),m(t,Ml,h),m(t,z,h),v(ks,z,null),e(z,Xf),e(z,ws),e(ws,Zf),e(ws,Wr),e(Wr,Qf),e(ws,Yf),e(z,eu),e(z,$s),e($s,tu),e($s,xs),e(xs,ou),e($s,nu),e(z,su),e(z,ki),e(ki,ru),e(z,au),e(z,Ee),e(Ee,wi),e(wi,Es),e(Es,iu),e(Ee,lu),e(Ee,$i),e($i,zs),e(zs,du),e(Ee,cu),e(Ee,xi),e(xi,Ms),e(Ms,pu),e(Ee,hu),e(Ee,Ei),e(Ei,js),e(js,mu),e(z,fu),e(z,se),v(Fs,se,null),e(se,uu),e(se,Bt),e(Bt,gu),e(Bt,zi),e(zi,_u),e(Bt,vu),e(Bt,Mi),e(Mi,Pu),e(Bt,Tu),e(se,Iu),v(Oo,se,null),e(se,yu),e(se,ji),e(ji,bu),e(se,Cu),v(qs,se,null),e(z,Lu),e(z,Ne),v(Ds,Ne,null),e(Ne,ku),e(Ne,Fi),e(Fi,wu),e(Ne,$u),v(Os,Ne,null),e(z,xu),e(z,Ve),v(As,Ve,null),e(Ve,Eu),e(Ve,qi),e(qi,zu),e(Ve,Mu),v(Ns,Ve,null),m(t,jl,h),m(t,Ut,h),e(Ut,Ao),e(Ao,Di),v(Vs,Di,null),e(Ut,ju),e(Ut,Oi),e(Oi,Fu),m(t,Fl,h),m(t,Rt,h),v(Ss,Rt,null),e(Rt,qu),e(Rt,re),v(Ws,re,null),e(re,Du),e(re,Ht),e(Ht,Ou),e(Ht,Ai),e(Ai,Au),e(Ht,Nu),e(Ht,Ni),e(Ni,Vu),e(Ht,Su),e(re,Wu),v(No,re,null),e(re,Bu),e(re,Vi),e(Vi,Uu),e(re,Ru),v(Bs,re,null),m(t,ql,h),m(t,Kt,h),e(Kt,Vo),e(Vo,Si),v(Us,Si,null),e(Kt,Hu),e(Kt,Wi),e(Wi,Ku),m(t,Dl,h),m(t,Gt,h),v(Rs,Gt,null),e(Gt,Gu),e(Gt,ae),v(Hs,ae,null),e(ae,Ju),e(ae,Jt),e(Jt,Xu),e(Jt,Bi),e(Bi,Zu),e(Jt,Qu),e(Jt,Ui),e(Ui,Yu),e(Jt,eg),e(ae,tg),v(So,ae,null),e(ae,og),e(ae,Ri),e(Ri,ng),e(ae,sg),v(Ks,ae,null),Ol=!0},p(t,[h]){const Gs={};h&2&&(Gs.$$scope={dirty:h,ctx:t}),To.$set(Gs);const Hi={};h&2&&(Hi.$$scope={dirty:h,ctx:t}),Io.$set(Hi);const Ki={};h&2&&(Ki.$$scope={dirty:h,ctx:t}),yo.$set(Ki);const Gi={};h&2&&(Gi.$$scope={dirty:h,ctx:t}),Co.$set(Gi);const Js={};h&2&&(Js.$$scope={dirty:h,ctx:t}),ko.$set(Js);const Ji={};h&2&&(Ji.$$scope={dirty:h,ctx:t}),$o.$set(Ji);const Xi={};h&2&&(Xi.$$scope={dirty:h,ctx:t}),xo.$set(Xi);const Zi={};h&2&&(Zi.$$scope={dirty:h,ctx:t}),Eo.$set(Zi);const Xs={};h&2&&(Xs.$$scope={dirty:h,ctx:t}),zo.$set(Xs);const Qi={};h&2&&(Qi.$$scope={dirty:h,ctx:t}),jo.$set(Qi);const Yi={};h&2&&(Yi.$$scope={dirty:h,ctx:t}),qo.$set(Yi);const el={};h&2&&(el.$$scope={dirty:h,ctx:t}),Oo.$set(el);const tl={};h&2&&(tl.$$scope={dirty:h,ctx:t}),No.$set(tl);const Zs={};h&2&&(Zs.$$scope={dirty:h,ctx:t}),So.$set(Zs)},i(t){Ol||(P(y.$$.fragment,t),P(be.$$.fragment,t),P($e.$$.fragment,t),P(en.$$.fragment,t),P(nn.$$.fragment,t),P(sn.$$.fragment,t),P(rn.$$.fragment,t),P(ln.$$.fragment,t),P(dn.$$.fragment,t),P(pn.$$.fragment,t),P(hn.$$.fragment,t),P(mn.$$.fragment,t),P(un.$$.fragment,t),P(gn.$$.fragment,t),P(_n.$$.fragment,t),P(Pn.$$.fragment,t),P(Tn.$$.fragment,t),P(yn.$$.fragment,t),P(bn.$$.fragment,t),P(Cn.$$.fragment,t),P(Ln.$$.fragment,t),P($n.$$.fragment,t),P(xn.$$.fragment,t),P(En.$$.fragment,t),P(zn.$$.fragment,t),P(jn.$$.fragment,t),P(qn.$$.fragment,t),P(Dn.$$.fragment,t),P(On.$$.fragment,t),P(An.$$.fragment,t),P(Vn.$$.fragment,t),P(Wn.$$.fragment,t),P(Bn.$$.fragment,t),P(Hn.$$.fragment,t),P(To.$$.fragment,t),P(Kn.$$.fragment,t),P(Gn.$$.fragment,t),P(Io.$$.fragment,t),P(Jn.$$.fragment,t),P(Xn.$$.fragment,t),P(yo.$$.fragment,t),P(Zn.$$.fragment,t),P(Qn.$$.fragment,t),P(Yn.$$.fragment,t),P(es.$$.fragment,t),P(Co.$$.fragment,t),P(ts.$$.fragment,t),P(os.$$.fragment,t),P(ns.$$.fragment,t),P(ss.$$.fragment,t),P(ko.$$.fragment,t),P(rs.$$.fragment,t),P(as.$$.fragment,t),P(is.$$.fragment,t),P($o.$$.fragment,t),P(ps.$$.fragment,t),P(xo.$$.fragment,t),P(hs.$$.fragment,t),P(ms.$$.fragment,t),P(Eo.$$.fragment,t),P(fs.$$.fragment,t),P(us.$$.fragment,t),P(zo.$$.fragment,t),P(gs.$$.fragment,t),P(_s.$$.fragment,t),P(vs.$$.fragment,t),P(Ps.$$.fragment,t),P(jo.$$.fragment,t),P(Ts.$$.fragment,t),P(Is.$$.fragment,t),P(ys.$$.fragment,t),P(bs.$$.fragment,t),P(qo.$$.fragment,t),P(Cs.$$.fragment,t),P(Ls.$$.fragment,t),P(ks.$$.fragment,t),P(Fs.$$.fragment,t),P(Oo.$$.fragment,t),P(qs.$$.fragment,t),P(Ds.$$.fragment,t),P(Os.$$.fragment,t),P(As.$$.fragment,t),P(Ns.$$.fragment,t),P(Vs.$$.fragment,t),P(Ss.$$.fragment,t),P(Ws.$$.fragment,t),P(No.$$.fragment,t),P(Bs.$$.fragment,t),P(Us.$$.fragment,t),P(Rs.$$.fragment,t),P(Hs.$$.fragment,t),P(So.$$.fragment,t),P(Ks.$$.fragment,t),Ol=!0)},o(t){T(y.$$.fragment,t),T(be.$$.fragment,t),T($e.$$.fragment,t),T(en.$$.fragment,t),T(nn.$$.fragment,t),T(sn.$$.fragment,t),T(rn.$$.fragment,t),T(ln.$$.fragment,t),T(dn.$$.fragment,t),T(pn.$$.fragment,t),T(hn.$$.fragment,t),T(mn.$$.fragment,t),T(un.$$.fragment,t),T(gn.$$.fragment,t),T(_n.$$.fragment,t),T(Pn.$$.fragment,t),T(Tn.$$.fragment,t),T(yn.$$.fragment,t),T(bn.$$.fragment,t),T(Cn.$$.fragment,t),T(Ln.$$.fragment,t),T($n.$$.fragment,t),T(xn.$$.fragment,t),T(En.$$.fragment,t),T(zn.$$.fragment,t),T(jn.$$.fragment,t),T(qn.$$.fragment,t),T(Dn.$$.fragment,t),T(On.$$.fragment,t),T(An.$$.fragment,t),T(Vn.$$.fragment,t),T(Wn.$$.fragment,t),T(Bn.$$.fragment,t),T(Hn.$$.fragment,t),T(To.$$.fragment,t),T(Kn.$$.fragment,t),T(Gn.$$.fragment,t),T(Io.$$.fragment,t),T(Jn.$$.fragment,t),T(Xn.$$.fragment,t),T(yo.$$.fragment,t),T(Zn.$$.fragment,t),T(Qn.$$.fragment,t),T(Yn.$$.fragment,t),T(es.$$.fragment,t),T(Co.$$.fragment,t),T(ts.$$.fragment,t),T(os.$$.fragment,t),T(ns.$$.fragment,t),T(ss.$$.fragment,t),T(ko.$$.fragment,t),T(rs.$$.fragment,t),T(as.$$.fragment,t),T(is.$$.fragment,t),T($o.$$.fragment,t),T(ps.$$.fragment,t),T(xo.$$.fragment,t),T(hs.$$.fragment,t),T(ms.$$.fragment,t),T(Eo.$$.fragment,t),T(fs.$$.fragment,t),T(us.$$.fragment,t),T(zo.$$.fragment,t),T(gs.$$.fragment,t),T(_s.$$.fragment,t),T(vs.$$.fragment,t),T(Ps.$$.fragment,t),T(jo.$$.fragment,t),T(Ts.$$.fragment,t),T(Is.$$.fragment,t),T(ys.$$.fragment,t),T(bs.$$.fragment,t),T(qo.$$.fragment,t),T(Cs.$$.fragment,t),T(Ls.$$.fragment,t),T(ks.$$.fragment,t),T(Fs.$$.fragment,t),T(Oo.$$.fragment,t),T(qs.$$.fragment,t),T(Ds.$$.fragment,t),T(Os.$$.fragment,t),T(As.$$.fragment,t),T(Ns.$$.fragment,t),T(Vs.$$.fragment,t),T(Ss.$$.fragment,t),T(Ws.$$.fragment,t),T(No.$$.fragment,t),T(Bs.$$.fragment,t),T(Us.$$.fragment,t),T(Rs.$$.fragment,t),T(Hs.$$.fragment,t),T(So.$$.fragment,t),T(Ks.$$.fragment,t),Ol=!1},d(t){o(p),t&&o(L),t&&o(u),I(y),t&&o(at),t&&o(M),I(be),t&&o(Xt),t&&o(pe),t&&o(he),t&&o(je),t&&o(Qt),t&&o(Fe),t&&o(Yt),t&&o(O),I($e),t&&o(eo),t&&o(me),t&&o(qe),t&&o(_e),t&&o(Yo),t&&o(E),t&&o(nl),I(en,t),t&&o(sl),t&&o(Ae),t&&o(rl),t&&o(dt),I(nn),t&&o(al),t&&o(fe),I(sn),I(rn),t&&o(il),t&&o(pt),I(ln),t&&o(ll),t&&o(V),I(dn),I(pn),t&&o(dl),t&&o(ft),I(hn),t&&o(cl),t&&o(S),I(mn),I(un),t&&o(pl),t&&o(_t),I(gn),t&&o(hl),t&&o(F),I(_n),I(Pn),I(Tn),I(yn),I(bn),t&&o(ml),t&&o(vt),I(Cn),t&&o(fl),t&&o(W),I(Ln),I($n),I(xn),t&&o(ul),t&&o(Pt),I(En),t&&o(gl),t&&o(B),I(zn),I(jn),I(qn),t&&o(_l),t&&o(It),I(Dn),t&&o(vl),t&&o(U),I(On),I(An),I(Vn),t&&o(Pl),t&&o(yt),I(Wn),t&&o(Tl),t&&o(R),I(Bn),I(Hn),I(To),I(Kn),I(Gn),I(Io),I(Jn),I(Xn),I(yo),I(Zn),t&&o(Il),t&&o(kt),I(Qn),t&&o(yl),t&&o(wt),I(Yn),I(es),I(Co),I(ts),t&&o(bl),t&&o(xt),I(os),t&&o(Cl),t&&o(Et),I(ns),I(ss),I(ko),I(rs),t&&o(Ll),t&&o(Mt),I(as),t&&o(kl),t&&o(q),I(is),I($o),I(ps),I(xo),I(hs),I(ms),I(Eo),I(fs),I(us),I(zo),I(gs),t&&o(wl),t&&o(Dt),I(_s),t&&o($l),t&&o(Ot),I(vs),I(Ps),I(jo),I(Ts),t&&o(xl),t&&o(Nt),I(Is),t&&o(El),t&&o(Vt),I(ys),I(bs),I(qo),I(Cs),t&&o(zl),t&&o(Wt),I(Ls),t&&o(Ml),t&&o(z),I(ks),I(Fs),I(Oo),I(qs),I(Ds),I(Os),I(As),I(Ns),t&&o(jl),t&&o(Ut),I(Vs),t&&o(Fl),t&&o(Rt),I(Ss),I(Ws),I(No),I(Bs),t&&o(ql),t&&o(Kt),I(Us),t&&o(Dl),t&&o(Gt),I(Rs),I(Hs),I(So),I(Ks)}}}const C1={local:"clip",sections:[{local:"overview",title:"Overview"},{local:"usage",title:"Usage"},{local:"transformers.CLIPConfig",title:"CLIPConfig"},{local:"transformers.CLIPTextConfig",title:"CLIPTextConfig"},{local:"transformers.CLIPVisionConfig",title:"CLIPVisionConfig"},{local:"transformers.CLIPTokenizer",title:"CLIPTokenizer"},{local:"transformers.CLIPTokenizerFast",title:"CLIPTokenizerFast"},{local:"transformers.CLIPFeatureExtractor",title:"CLIPFeatureExtractor"},{local:"transformers.CLIPProcessor",title:"CLIPProcessor"},{local:"transformers.CLIPModel",title:"CLIPModel"},{local:"transformers.CLIPTextModel",title:"CLIPTextModel"},{local:"transformers.CLIPVisionModel",title:"CLIPVisionModel"},{local:"transformers.TFCLIPModel",title:"TFCLIPModel"},{local:"transformers.TFCLIPTextModel",title:"TFCLIPTextModel"},{local:"transformers.TFCLIPVisionModel",title:"TFCLIPVisionModel"},{local:"transformers.FlaxCLIPModel",title:"FlaxCLIPModel"},{local:"transformers.FlaxCLIPTextModel",title:"FlaxCLIPTextModel"},{local:"transformers.FlaxCLIPVisionModel",title:"FlaxCLIPVisionModel"}],title:"CLIP"};function L1(x,p,L){let{fw:u}=p;return x.$$set=b=>{"fw"in b&&L(0,u=b.fw)},[u]}class M1 extends r1{constructor(p){super();a1(this,p,L1,b1,i1,{fw:0})}}export{M1 as default,C1 as metadata};
280
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/mluke.mdx-0a6a8f21.js
import{S as tn,i as nn,s as on,e as o,k as p,w as E,t as r,M as sn,c as s,d as n,m as u,a as i,x as $,h as a,b as l,F as t,g as d,y as A,L as rn,q as P,o as N,B as I}from"../../chunks/vendor-4833417e.js";import{D as xt}from"../../chunks/Docstring-4f315ed9.js";import{C as en}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as zt}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function an(Se){let g,V,h,f,le,U,Ke,ce,De,_e,b,T,de,W,Re,pe,Fe,ge,k,Oe,S,Be,je,K,Xe,Ce,ke,J,Ye,ye,G,He,ve,Q,ue,Ve,be,Z,Je,we,D,Te,L,Ge,ee,Qe,Ze,Le,R,qe,q,et,te,tt,nt,xe,y,ot,F,st,it,O,rt,at,ze,w,x,me,B,lt,he,ct,Me,m,j,dt,_,pt,ne,ut,mt,oe,ht,ft,X,_t,gt,kt,C,yt,se,vt,bt,wt,z,Y,Tt,fe,Lt,qt,ie,H,Ee;return U=new zt({}),W=new zt({}),D=new en({props:{code:`from transformers import LukeModel model = LukeModel.from_pretrained("studio-ousia/mluke-base")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeModel model = LukeModel.from_pretrained(<span class="hljs-string">&quot;studio-ousia/mluke-base&quot;</span>)`}}),R=new en({props:{code:`from transformers import MLukeTokenizer tokenizer = MLukeTokenizer.from_pretrained("studio-ousia/mluke-base")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MLukeTokenizer tokenizer = MLukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/mluke-base&quot;</span>)`}}),B=new zt({}),j=new xt({props:{name:"class transformers.MLukeTokenizer",anchor:"transformers.MLukeTokenizer",parameters:[{name:"vocab_file",val:""},{name:"entity_vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"task",val:" = None"},{name:"max_entity_length",val:" = 32"},{name:"max_mention_length",val:" = 30"},{name:"entity_token_1",val:" = '<ent>'"},{name:"entity_token_2",val:" = '<ent2>'"},{name:"entity_unk_token",val:" = '[UNK]'"},{name:"entity_pad_token",val:" = '[PAD]'"},{name:"entity_mask_token",val:" = '[MASK]'"},{name:"entity_mask2_token",val:" = '[MASK2]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mluke/tokenization_mluke.py#L150",parametersDescription:[{anchor:"transformers.MLukeTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.MLukeTokenizer.entity_vocab_file",description:`<strong>entity_vocab_file</strong> (<code>str</code>) &#x2014; Path to the entity vocabulary file.`,name:"entity_vocab_file"},{anchor:"transformers.MLukeTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.MLukeTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.MLukeTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.MLukeTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.MLukeTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MLukeTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MLukeTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.MLukeTokenizer.task",description:`<strong>task</strong> (<code>str</code>, <em>optional</em>) &#x2014; Task for which you want to prepare sequences. One of <code>&quot;entity_classification&quot;</code>, <code>&quot;entity_pair_classification&quot;</code>, or <code>&quot;entity_span_classification&quot;</code>. If you specify this argument, the entity sequence is automatically created based on the given entity span(s).`,name:"task"},{anchor:"transformers.MLukeTokenizer.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.MLukeTokenizer.max_mention_length",description:`<strong>max_mention_length</strong> (<code>int</code>, <em>optional</em>, defaults to 30) &#x2014; The maximum number of tokens inside an entity span.`,name:"max_mention_length"},{anchor:"transformers.MLukeTokenizer.entity_token_1",description:`<strong>entity_token_1</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_1"},{anchor:"transformers.MLukeTokenizer.entity_token_2",description:`<strong>entity_token_2</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent2&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_2"},{anchor:"transformers.MLukeTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;s&gt;NOTUSED&quot;, &quot;&lt;/s&gt;NOTUSED&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.MLukeTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"},{anchor:"transformers.MLukeTokenizer.sp_model",description:`<strong>sp_model</strong> (<code>SentencePieceProcessor</code>) &#x2014; The <em>SentencePiece</em> processor that is used for every conversion (string, tokens and IDs).`,name:"sp_model"}]}}),Y=new xt({props:{name:"__call__",anchor:"transformers.MLukeTokenizer.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"entity_spans",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entity_spans_pair",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entities",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"entities_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"max_entity_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": typing.Optional[bool] = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mluke/tokenization_mluke.py#L366",parametersDescription:[{anchor:"transformers.MLukeTokenizer.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text"},{anchor:"transformers.MLukeTokenizer.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text_pair"},{anchor:"transformers.MLukeTokenizer.__call__.entity_spans",description:`<strong>entity_spans</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code> as the <code>task</code> argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify <code>entities</code>, the length of each sequence must be equal to the length of each sequence of <code>entities</code>.`,name:"entity_spans"},{anchor:"transformers.MLukeTokenizer.__call__.entity_spans_pair",description:`<strong>entity_spans_pair</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the <code>task</code> argument in the constructor, this argument is ignored. If you specify <code>entities_pair</code>, the length of each sequence must be equal to the length of each sequence of <code>entities_pair</code>.`,name:"entity_spans_pair"},{anchor:"transformers.MLukeTokenizer.__call__.entities",description:`<strong>entities</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans</code>. If you specify <code>entity_spans</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities"},{anchor:"transformers.MLukeTokenizer.__call__.entities_pair",description:`<strong>entities_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans_pair</code>. If you specify <code>entity_spans_pair</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities_pair"},{anchor:"transformers.MLukeTokenizer.__call__.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.MLukeTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.MLukeTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.MLukeTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.MLukeTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.MLukeTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.MLukeTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.MLukeTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.MLukeTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.MLukeTokenizer.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.MLukeTokenizer.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.MLukeTokenizer.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.MLukeTokenizer.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.MLukeTokenizer.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.MLukeTokenizer.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.MLukeTokenizer.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_ids</strong> \u2014 List of entity ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>entity_position_ids</strong> \u2014 List of entity positions in the input sequence to be fed to a model.</p> </li> <li> <p><strong>entity_token_type_ids</strong> \u2014 List of entity token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Centity_token_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>entity_attention_mask</strong> \u2014 List of indices specifying which entities should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Centity_attention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_start_positions</strong> \u2014 List of the start positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>entity_end_positions</strong> \u2014 List of the end positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),H=new xt({props:{name:"save_vocabulary",anchor:"transformers.MLukeTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/mluke/tokenization_mluke.py#L1493"}}),{c(){g=o("meta"),V=p(),h=o("h1"),f=o("a"),le=o("span"),E(U.$$.fragment),Ke=p(),ce=o("span"),De=r("mLUKE"),_e=p(),b=o("h2"),T=o("a"),de=o("span"),E(W.$$.fragment),Re=p(),pe=o("span"),Fe=r("Overview"),ge=p(),k=o("p"),Oe=r("The mLUKE model was proposed in "),S=o("a"),Be=r("mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),je=r(` by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka. It\u2019s a multilingual extension of the `),K=o("a"),Xe=r("LUKE model"),Ce=r(" trained on the basis of XLM-RoBERTa."),ke=p(),J=o("p"),Ye=r(`It is based on XLM-RoBERTa and adds entity embeddings, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive question answering, relation classification, cloze-style knowledge completion.`),ye=p(),G=o("p"),He=r("The abstract from the paper is the following:"),ve=p(),Q=o("p"),ue=o("em"),Ve=r(`Recent studies have shown that multilingual pretrained language models can be effectively improved with cross-lingual alignment information from Wikipedia entities. However, existing methods only exploit entity information in pretraining and do not explicitly use entities in downstream tasks. In this study, we explore the effectiveness of leveraging entity representations for downstream cross-lingual tasks. We train a multilingual language model with 24 languages with entity representations and show the model consistently outperforms word-based pretrained models in various cross-lingual transfer tasks. We also analyze the model and the key insight is that incorporating entity representations into the input allows us to extract more language-agnostic features. We also evaluate the model with a multilingual cloze prompt task with the mLAMA dataset. We show that entity-based prompt elicits correct factual knowledge more likely than using only word representations.`),be=p(),Z=o("p"),Je=r("One can directly plug in the weights of mLUKE into a LUKE model, like so:"),we=p(),E(D.$$.fragment),Te=p(),L=o("p"),Ge=r("Note that mLUKE has its own tokenizer, "),ee=o("a"),Qe=r("MLukeTokenizer"),Ze=r(". You can initialize it as follows:"),Le=p(),E(R.$$.fragment),qe=p(),q=o("p"),et=r("As mLUKE\u2019s architecture is equivalent to that of LUKE, one can refer to "),te=o("a"),tt=r("LUKE\u2019s documentation page"),nt=r(` for all tips, code examples and notebooks.`),xe=p(),y=o("p"),ot=r("This model was contributed by "),F=o("a"),st=r("ryo0634"),it=r(". The original code can be found "),O=o("a"),rt=r("here"),at=r("."),ze=p(),w=o("h2"),x=o("a"),me=o("span"),E(B.$$.fragment),lt=p(),he=o("span"),ct=r("MLukeTokenizer"),Me=p(),m=o("div"),E(j.$$.fragment),dt=p(),_=o("p"),pt=r("Adapted from "),ne=o("a"),ut=r("XLMRobertaTokenizer"),mt=r(" and "),oe=o("a"),ht=r("LukeTokenizer"),ft=r(`. Based on `),X=o("a"),_t=r("SentencePiece"),gt=r("."),kt=p(),C=o("p"),yt=r("This tokenizer inherits from "),se=o("a"),vt=r("PreTrainedTokenizer"),bt=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),wt=p(),z=o("div"),E(Y.$$.fragment),Tt=p(),fe=o("p"),Lt=r(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),qt=p(),ie=o("div"),E(H.$$.fragment),this.h()},l(e){const c=sn('[data-svelte="svelte-1phssyn"]',document.head);g=s(c,"META",{name:!0,content:!0}),c.forEach(n),V=u(e),h=s(e,"H1",{class:!0});var $e=i(h);f=s($e,"A",{id:!0,class:!0,href:!0});var Mt=i(f);le=s(Mt,"SPAN",{});var Et=i(le);$(U.$$.fragment,Et),Et.forEach(n),Mt.forEach(n),Ke=u($e),ce=s($e,"SPAN",{});var $t=i(ce);De=a($t,"mLUKE"),$t.forEach(n),$e.forEach(n),_e=u(e),b=s(e,"H2",{class:!0});var Ae=i(b);T=s(Ae,"A",{id:!0,class:!0,href:!0});var At=i(T);de=s(At,"SPAN",{});var Pt=i(de);$(W.$$.fragment,Pt),Pt.forEach(n),At.forEach(n),Re=u(Ae),pe=s(Ae,"SPAN",{});var Nt=i(pe);Fe=a(Nt,"Overview"),Nt.forEach(n),Ae.forEach(n),ge=u(e),k=s(e,"P",{});var re=i(k);Oe=a(re,"The mLUKE model was proposed in "),S=s(re,"A",{href:!0,rel:!0});var It=i(S);Be=a(It,"mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),It.forEach(n),je=a(re,` by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka. It\u2019s a multilingual extension of the `),K=s(re,"A",{href:!0,rel:!0});var Ut=i(K);Xe=a(Ut,"LUKE model"),Ut.forEach(n),Ce=a(re," trained on the basis of XLM-RoBERTa."),re.forEach(n),ke=u(e),J=s(e,"P",{});var Wt=i(J);Ye=a(Wt,`It is based on XLM-RoBERTa and adds entity embeddings, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive question answering, relation classification, cloze-style knowledge completion.`),Wt.forEach(n),ye=u(e),G=s(e,"P",{});var St=i(G);He=a(St,"The abstract from the paper is the following:"),St.forEach(n),ve=u(e),Q=s(e,"P",{});var Kt=i(Q);ue=s(Kt,"EM",{});var Dt=i(ue);Ve=a(Dt,`Recent studies have shown that multilingual pretrained language models can be effectively improved with cross-lingual alignment information from Wikipedia entities. However, existing methods only exploit entity information in pretraining and do not explicitly use entities in downstream tasks. In this study, we explore the effectiveness of leveraging entity representations for downstream cross-lingual tasks. We train a multilingual language model with 24 languages with entity representations and show the model consistently outperforms word-based pretrained models in various cross-lingual transfer tasks. We also analyze the model and the key insight is that incorporating entity representations into the input allows us to extract more language-agnostic features. We also evaluate the model with a multilingual cloze prompt task with the mLAMA dataset. We show that entity-based prompt elicits correct factual knowledge more likely than using only word representations.`),Dt.forEach(n),Kt.forEach(n),be=u(e),Z=s(e,"P",{});var Rt=i(Z);Je=a(Rt,"One can directly plug in the weights of mLUKE into a LUKE model, like so:"),Rt.forEach(n),we=u(e),$(D.$$.fragment,e),Te=u(e),L=s(e,"P",{});var Pe=i(L);Ge=a(Pe,"Note that mLUKE has its own tokenizer, "),ee=s(Pe,"A",{href:!0});var Ft=i(ee);Qe=a(Ft,"MLukeTokenizer"),Ft.forEach(n),Ze=a(Pe,". You can initialize it as follows:"),Pe.forEach(n),Le=u(e),$(R.$$.fragment,e),qe=u(e),q=s(e,"P",{});var Ne=i(q);et=a(Ne,"As mLUKE\u2019s architecture is equivalent to that of LUKE, one can refer to "),te=s(Ne,"A",{href:!0});var Ot=i(te);tt=a(Ot,"LUKE\u2019s documentation page"),Ot.forEach(n),nt=a(Ne,` for all tips, code examples and notebooks.`),Ne.forEach(n),xe=u(e),y=s(e,"P",{});var ae=i(y);ot=a(ae,"This model was contributed by "),F=s(ae,"A",{href:!0,rel:!0});var Bt=i(F);st=a(Bt,"ryo0634"),Bt.forEach(n),it=a(ae,". The original code can be found "),O=s(ae,"A",{href:!0,rel:!0});var jt=i(O);rt=a(jt,"here"),jt.forEach(n),at=a(ae,"."),ae.forEach(n),ze=u(e),w=s(e,"H2",{class:!0});var Ie=i(w);x=s(Ie,"A",{id:!0,class:!0,href:!0});var Xt=i(x);me=s(Xt,"SPAN",{});var Ct=i(me);$(B.$$.fragment,Ct),Ct.forEach(n),Xt.forEach(n),lt=u(Ie),he=s(Ie,"SPAN",{});var Yt=i(he);ct=a(Yt,"MLukeTokenizer"),Yt.forEach(n),Ie.forEach(n),Me=u(e),m=s(e,"DIV",{class:!0});var v=i(m);$(j.$$.fragment,v),dt=u(v),_=s(v,"P",{});var M=i(_);pt=a(M,"Adapted from "),ne=s(M,"A",{href:!0});var Ht=i(ne);ut=a(Ht,"XLMRobertaTokenizer"),Ht.forEach(n),mt=a(M," and "),oe=s(M,"A",{href:!0});var Vt=i(oe);ht=a(Vt,"LukeTokenizer"),Vt.forEach(n),ft=a(M,`. Based on `),X=s(M,"A",{href:!0,rel:!0});var Jt=i(X);_t=a(Jt,"SentencePiece"),Jt.forEach(n),gt=a(M,"."),M.forEach(n),kt=u(v),C=s(v,"P",{});var Ue=i(C);yt=a(Ue,"This tokenizer inherits from "),se=s(Ue,"A",{href:!0});var Gt=i(se);vt=a(Gt,"PreTrainedTokenizer"),Gt.forEach(n),bt=a(Ue,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ue.forEach(n),wt=u(v),z=s(v,"DIV",{class:!0});var We=i(z);$(Y.$$.fragment,We),Tt=u(We),fe=s(We,"P",{});var Qt=i(fe);Lt=a(Qt,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),Qt.forEach(n),We.forEach(n),qt=u(v),ie=s(v,"DIV",{class:!0});var Zt=i(ie);$(H.$$.fragment,Zt),Zt.forEach(n),v.forEach(n),this.h()},h(){l(g,"name","hf:doc:metadata"),l(g,"content",JSON.stringify(ln)),l(f,"id","mluke"),l(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(f,"href","#mluke"),l(h,"class","relative group"),l(T,"id","overview"),l(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(T,"href","#overview"),l(b,"class","relative group"),l(S,"href","https://arxiv.org/abs/2110.08151"),l(S,"rel","nofollow"),l(K,"href","https://arxiv.org/abs/2010.01057"),l(K,"rel","nofollow"),l(ee,"href","/docs/transformers/pr_16143/en/model_doc/mluke#transformers.MLukeTokenizer"),l(te,"href","luke"),l(F,"href","https://huggingface.co/ryo0634"),l(F,"rel","nofollow"),l(O,"href","https://github.com/studio-ousia/luke"),l(O,"rel","nofollow"),l(x,"id","transformers.MLukeTokenizer"),l(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(x,"href","#transformers.MLukeTokenizer"),l(w,"class","relative group"),l(ne,"href","/docs/transformers/pr_16143/en/model_doc/xlm-roberta#transformers.XLMRobertaTokenizer"),l(oe,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer"),l(X,"href","https://github.com/google/sentencepiece"),l(X,"rel","nofollow"),l(se,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(z,"class","docstring"),l(ie,"class","docstring"),l(m,"class","docstring")},m(e,c){t(document.head,g),d(e,V,c),d(e,h,c),t(h,f),t(f,le),A(U,le,null),t(h,Ke),t(h,ce),t(ce,De),d(e,_e,c),d(e,b,c),t(b,T),t(T,de),A(W,de,null),t(b,Re),t(b,pe),t(pe,Fe),d(e,ge,c),d(e,k,c),t(k,Oe),t(k,S),t(S,Be),t(k,je),t(k,K),t(K,Xe),t(k,Ce),d(e,ke,c),d(e,J,c),t(J,Ye),d(e,ye,c),d(e,G,c),t(G,He),d(e,ve,c),d(e,Q,c),t(Q,ue),t(ue,Ve),d(e,be,c),d(e,Z,c),t(Z,Je),d(e,we,c),A(D,e,c),d(e,Te,c),d(e,L,c),t(L,Ge),t(L,ee),t(ee,Qe),t(L,Ze),d(e,Le,c),A(R,e,c),d(e,qe,c),d(e,q,c),t(q,et),t(q,te),t(te,tt),t(q,nt),d(e,xe,c),d(e,y,c),t(y,ot),t(y,F),t(F,st),t(y,it),t(y,O),t(O,rt),t(y,at),d(e,ze,c),d(e,w,c),t(w,x),t(x,me),A(B,me,null),t(w,lt),t(w,he),t(he,ct),d(e,Me,c),d(e,m,c),A(j,m,null),t(m,dt),t(m,_),t(_,pt),t(_,ne),t(ne,ut),t(_,mt),t(_,oe),t(oe,ht),t(_,ft),t(_,X),t(X,_t),t(_,gt),t(m,kt),t(m,C),t(C,yt),t(C,se),t(se,vt),t(C,bt),t(m,wt),t(m,z),A(Y,z,null),t(z,Tt),t(z,fe),t(fe,Lt),t(m,qt),t(m,ie),A(H,ie,null),Ee=!0},p:rn,i(e){Ee||(P(U.$$.fragment,e),P(W.$$.fragment,e),P(D.$$.fragment,e),P(R.$$.fragment,e),P(B.$$.fragment,e),P(j.$$.fragment,e),P(Y.$$.fragment,e),P(H.$$.fragment,e),Ee=!0)},o(e){N(U.$$.fragment,e),N(W.$$.fragment,e),N(D.$$.fragment,e),N(R.$$.fragment,e),N(B.$$.fragment,e),N(j.$$.fragment,e),N(Y.$$.fragment,e),N(H.$$.fragment,e),Ee=!1},d(e){n(g),e&&n(V),e&&n(h),I(U),e&&n(_e),e&&n(b),I(W),e&&n(ge),e&&n(k),e&&n(ke),e&&n(J),e&&n(ye),e&&n(G),e&&n(ve),e&&n(Q),e&&n(be),e&&n(Z),e&&n(we),I(D,e),e&&n(Te),e&&n(L),e&&n(Le),I(R,e),e&&n(qe),e&&n(q),e&&n(xe),e&&n(y),e&&n(ze),e&&n(w),I(B),e&&n(Me),e&&n(m),I(j),I(Y),I(H)}}}const ln={local:"mluke",sections:[{local:"overview",title:"Overview"},{local:"transformers.MLukeTokenizer",title:"MLukeTokenizer"}],title:"mLUKE"};function cn(Se,g,V){let{fw:h}=g;return Se.$$set=f=>{"fw"in f&&V(0,h=f.fw)},[h]}class fn extends tn{constructor(g){super();nn(this,g,cn,an,on,{fw:0})}}export{fn as default,ln as metadata};
281
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/rag.mdx-2450ac40.js
import{S as oT,i as nT,s as rT,e as r,k as c,w as v,t,M as sT,c as s,d as n,m as l,a,x as b,h as o,b as i,F as e,g as _,y as T,q as w,o as k,B as q}from"../../chunks/vendor-4833417e.js";import{T as ge}from"../../chunks/Tip-fffd6df1.js";import{D as M}from"../../chunks/Docstring-4f315ed9.js";import{C as sn}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as _e}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function aT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function dT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function iT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function cT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function lT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function pT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function hT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function uT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function mT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function gT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function _T($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function fT($){let p,y,u,f,x;return{c(){p=r("p"),y=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=r("code"),f=t("Module"),x=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=s(g,"P",{});var h=a(p);y=o(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(h,"CODE",{});var R=a(u);f=o(R,"Module"),R.forEach(n),x=o(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(n)},m(g,h){_(g,p,h),e(p,y),e(p,u),e(u,f),e(p,x)},d(g){g&&n(p)}}}function vT($){let p,y,u,f,x,g,h,R,ti,Ia,$e,ot,Fr,St,oi,zr,ni,Oa,an,ri,Wa,nt,si,Dt,ai,di,Ha,dn,ii,Ba,cn,$r,ci,Qa,rt,li,Lt,pi,hi,Ua,Ee,st,Er,It,ui,jr,mi,Va,de,Ot,gi,me,ln,_i,fi,Mr,vi,bi,pn,Ti,wi,hn,ki,qi,yi,at,Wt,xi,Ht,Ri,un,Fi,zi,$i,dt,Bt,Ei,Qt,ji,mn,Mi,Gi,Ka,je,it,Gr,Ut,Ai,Ar,Ni,Ya,Me,Vt,Pi,ct,Kt,Ci,Nr,Si,Ja,Ge,lt,Pr,Yt,Di,Cr,Li,Xa,Ae,Jt,Ii,Sr,Oi,Za,Xt,Zt,ed,Ne,pt,Dr,eo,Wi,Lr,Hi,td,X,to,Bi,Ir,Qi,Ui,Or,Vi,Ki,oo,Yi,ht,no,Ji,Wr,Xi,Zi,ut,ro,ec,Pe,tc,Hr,oc,nc,Br,rc,sc,ac,mt,so,dc,ao,ic,Qr,cc,lc,od,Ce,gt,Ur,io,pc,Vr,hc,nd,D,co,uc,Se,mc,gn,gc,_c,Kr,fc,vc,bc,_t,Tc,Yr,wc,kc,ie,qc,Jr,yc,xc,_n,Rc,Fc,Xr,zc,$c,fn,Ec,jc,Mc,G,Gc,vn,Ac,Nc,Zr,Pc,Cc,es,Sc,Dc,ts,Lc,Ic,os,Oc,Wc,bn,Hc,Bc,ns,Qc,Uc,Tn,Vc,Kc,wn,Yc,Jc,rs,Xc,Zc,el,lo,tl,kn,ol,nl,rl,po,sl,ho,al,dl,il,ee,uo,cl,De,ll,qn,pl,hl,ss,ul,ml,gl,ft,_l,as,fl,vl,mo,rd,Le,vt,ds,go,bl,is,Tl,sd,E,_o,wl,Ie,kl,yn,ql,yl,cs,xl,Rl,Fl,bt,zl,ls,$l,El,ps,jl,Ml,ce,Gl,hs,Al,Nl,xn,Pl,Cl,us,Sl,Dl,Rn,Ll,Il,Ol,A,Wl,Fn,Hl,Bl,ms,Ql,Ul,gs,Vl,Kl,_s,Yl,Jl,fs,Xl,Zl,zn,ep,tp,vs,op,np,$n,rp,sp,En,ap,dp,bs,ip,cp,lp,fo,pp,jn,hp,up,mp,vo,gp,bo,_p,fp,vp,te,To,bp,Oe,Tp,Mn,wp,kp,Ts,qp,yp,xp,Tt,Rp,ws,Fp,zp,wo,$p,wt,ko,Ep,qo,jp,Gn,Mp,Gp,ad,We,kt,ks,yo,Ap,qs,Np,dd,j,xo,Pp,He,Cp,An,Sp,Dp,ys,Lp,Ip,Op,qt,Wp,xs,Hp,Bp,Rs,Qp,Up,le,Vp,Fs,Kp,Yp,Nn,Jp,Xp,zs,Zp,eh,Pn,th,oh,nh,N,rh,Cn,sh,ah,$s,dh,ih,Es,ch,lh,js,ph,hh,Ms,uh,mh,Sn,gh,_h,Gs,fh,vh,Dn,bh,Th,Ln,wh,kh,As,qh,yh,xh,Ro,Rh,In,Fh,zh,$h,Fo,Eh,zo,jh,Mh,Gh,oe,$o,Ah,Be,Nh,On,Ph,Ch,Ns,Sh,Dh,Lh,yt,Ih,Ps,Oh,Wh,Eo,Hh,xt,jo,Bh,Cs,Qh,id,Qe,Rt,Ss,Mo,Uh,Ds,Vh,cd,P,Go,Kh,Ue,Yh,Wn,Jh,Xh,Ls,Zh,eu,tu,Ft,ou,Is,nu,ru,pe,su,Os,au,du,Hn,iu,cu,Ws,lu,pu,Bn,hu,uu,mu,L,gu,Qn,_u,fu,Hs,vu,bu,Bs,Tu,wu,Qs,ku,qu,Us,yu,xu,Un,Ru,Fu,Vs,zu,$u,Vn,Eu,ju,Ks,Mu,Gu,Au,Ao,Nu,Kn,Pu,Cu,Su,No,Du,Po,Lu,Iu,Ou,Ys,Wu,Hu,ne,Co,Bu,Ve,Qu,Yn,Uu,Vu,Js,Ku,Yu,Ju,zt,Xu,Xs,Zu,em,So,ld,Ke,$t,Zs,Do,tm,ea,om,pd,F,Lo,nm,Ye,rm,Jn,sm,am,ta,dm,im,cm,Et,lm,oa,pm,hm,na,um,mm,he,gm,ra,_m,fm,Xn,vm,bm,sa,Tm,wm,Zn,km,qm,ym,I,xm,er,Rm,Fm,aa,zm,$m,da,Em,jm,ia,Mm,Gm,ca,Am,Nm,tr,Pm,Cm,la,Sm,Dm,or,Lm,Im,pa,Om,Wm,Hm,Io,Bm,nr,Qm,Um,Vm,Oo,Km,Wo,Ym,Jm,Xm,ha,Zm,eg,re,Ho,tg,Je,og,rr,ng,rg,ua,sg,ag,dg,jt,ig,ma,cg,lg,Bo,pg,Mt,Qo,hg,Uo,ug,sr,mg,gg,hd,Xe,Gt,ga,Vo,_g,_a,fg,ud,z,Ko,vg,Ze,bg,ar,Tg,wg,fa,kg,qg,yg,At,xg,va,Rg,Fg,ba,zg,$g,ue,Eg,Ta,jg,Mg,dr,Gg,Ag,wa,Ng,Pg,ir,Cg,Sg,Dg,O,Lg,cr,Ig,Og,ka,Wg,Hg,qa,Bg,Qg,ya,Ug,Vg,xa,Kg,Yg,lr,Jg,Xg,Ra,Zg,e_,pr,t_,o_,Fa,n_,r_,s_,Yo,a_,hr,d_,i_,c_,Jo,l_,Xo,p_,h_,u_,za,m_,g_,se,Zo,__,et,f_,ur,v_,b_,$a,T_,w_,k_,Nt,q_,Ea,y_,x_,en,R_,Pt,tn,F_,ja,z_,md;return g=new _e({}),St=new _e({}),It=new _e({}),Ot=new M({props:{name:"class transformers.RagConfig",anchor:"transformers.RagConfig",parameters:[{name:"vocab_size",val:" = None"},{name:"is_encoder_decoder",val:" = True"},{name:"prefix",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"title_sep",val:" = ' / '"},{name:"doc_sep",val:" = ' // '"},{name:"n_docs",val:" = 5"},{name:"max_combined_length",val:" = 300"},{name:"retrieval_vector_size",val:" = 768"},{name:"retrieval_batch_size",val:" = 8"},{name:"dataset",val:" = 'wiki_dpr'"},{name:"dataset_split",val:" = 'train'"},{name:"index_name",val:" = 'compressed'"},{name:"index_path",val:" = None"},{name:"passages_path",val:" = None"},{name:"use_dummy_dataset",val:" = False"},{name:"reduce_loss",val:" = False"},{name:"label_smoothing",val:" = 0.0"},{name:"do_deduplication",val:" = True"},{name:"exclude_bos_score",val:" = False"},{name:"do_marginalize",val:" = False"},{name:"output_retrieved",val:" = False"},{name:"use_cache",val:" = True"},{name:"forced_eos_token_id",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/configuration_rag.py#L82",parametersDescription:[{anchor:"transformers.RagConfig.title_sep",description:`<strong>title_sep</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot; / &quot;</code>) &#x2014; Separator inserted between the title and the text of the retrieved document when calling <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"title_sep"},{anchor:"transformers.RagConfig.doc_sep",description:`<strong>doc_sep</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot; // &quot;</code>) &#x2014; Separator inserted between the the text of the retrieved document and the original input when calling <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"doc_sep"},{anchor:"transformers.RagConfig.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Number of documents to retrieve.`,name:"n_docs"},{anchor:"transformers.RagConfig.max_combined_length",description:`<strong>max_combined_length</strong> (<code>int</code>, <em>optional</em>, defaults to 300) &#x2014; Max length of contextualized input returned by <code>__call__()</code>`,name:"max_combined_length"},{anchor:"transformers.RagConfig.retrieval_vector_size",description:`<strong>retrieval_vector_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the document embeddings indexed by <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"retrieval_vector_size"},{anchor:"transformers.RagConfig.retrieval_batch_size",description:`<strong>retrieval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>.`,name:"retrieval_batch_size"},{anchor:"transformers.RagConfig.dataset",description:`<strong>dataset</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;wiki_dpr&quot;</code>) &#x2014; A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using <code>datasets.list_datasets()</code>).`,name:"dataset"},{anchor:"transformers.RagConfig.dataset_split",description:`<strong>dataset_split</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;train&quot;</code>) &#x2014; Which split of the <code>dataset</code> to load.`,name:"dataset_split"},{anchor:"transformers.RagConfig.index_name",description:`<strong>index_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;compressed&quot;</code>) &#x2014; The index name of the index associated with the <code>dataset</code>. One can choose between <code>&quot;legacy&quot;</code>, <code>&quot;exact&quot;</code> and <code>&quot;compressed&quot;</code>.`,name:"index_name"},{anchor:"transformers.RagConfig.index_path",description:`<strong>index_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to the serialized faiss index on disk. passages_path &#x2014; (<code>str</code>, <em>optional</em>): A path to text passages compatible with the faiss index. Required if using <code>LegacyIndex</code>- <strong>use_dummy_dataset</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to load a &#x201C;dummy&#x201D; variant of the dataset specified by <code>dataset</code>.`,name:"index_path"},{anchor:"transformers.RagConfig.label_smoothing",description:`<strong>label_smoothing</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Only relevant if <code>return_loss</code> is set to <code>True</code>. Controls the <code>epsilon</code> parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed.`,name:"label_smoothing"},{anchor:"transformers.RagConfig.do_marginalize",description:`<strong>do_marginalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, the logits are marginalized over all documents by making use of <code>torch.nn.functional.log_softmax</code>.`,name:"do_marginalize"},{anchor:"transformers.RagConfig.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to reduce the NLL loss using the <code>torch.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.RagConfig.do_deduplication",description:`<strong>do_deduplication</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to <code>False</code> if used while training with distributed backend.`,name:"do_deduplication"},{anchor:"transformers.RagConfig.exclude_bos_score",description:`<strong>exclude_bos_score</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to disregard the BOS token when computing the loss.`,name:"exclude_bos_score"},{anchor:"transformers.RagConfig.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; If set to <code>True</code>, <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code> are returned. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.RagConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),Wt=new M({props:{name:"from_question_encoder_generator_configs",anchor:"transformers.RagConfig.from_question_encoder_generator_configs",parameters:[{name:"question_encoder_config",val:": PretrainedConfig"},{name:"generator_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/configuration_rag.py#L170",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderConfig" >EncoderDecoderConfig</a></p> `}}),Bt=new M({props:{name:"to_dict",anchor:"transformers.RagConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/configuration_rag.py#L183",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),Ut=new _e({}),Vt=new M({props:{name:"class transformers.RagTokenizer",anchor:"transformers.RagTokenizer",parameters:[{name:"question_encoder",val:""},{name:"generator",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/tokenization_rag.py#L29"}}),Kt=new M({props:{name:"as_target_tokenizer",anchor:"transformers.RagTokenizer.as_target_tokenizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/tokenization_rag.py#L71"}}),Yt=new _e({}),Jt=new M({props:{name:"class transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput",anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"doc_scores",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"retrieved_doc_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"retrieved_doc_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"question_encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"question_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"question_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"generator_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L41",parametersDescription:[{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.`,name:"logits"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.`,name:"doc_scores"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.retrieved_doc_embeds",description:`<strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.`,name:"retrieved_doc_embeds"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.retrieved_doc_ids",description:`<strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; The indexes of the embedded documents retrieved by the retriever.`,name:"retrieved_doc_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.`,name:"context_attention_mask"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.question_encoder_last_hidden_state",description:`<strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.`,name:"question_encoder_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.question_enc_hidden_states",description:`<strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.`,name:"question_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.question_enc_attentions",description:`<strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"question_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_enc_last_hidden_state",description:`<strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the generator encoder of the model.`,name:"generator_enc_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_enc_hidden_states",description:`<strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.`,name:"generator_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_enc_attentions",description:`<strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_dec_hidden_states",description:`<strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.`,name:"generator_dec_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_dec_attentions",description:`<strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_dec_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput.generator_cross_attentions",description:`<strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"generator_cross_attentions"}]}}),Zt=new M({props:{name:"class transformers.models.rag.modeling_rag.RetrievAugLMOutput",anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput",parameters:[{name:"logits",val:": FloatTensor = None"},{name:"doc_scores",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"retrieved_doc_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"retrieved_doc_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"question_encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"question_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"question_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"generator_enc_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_enc_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_dec_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"generator_cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L136",parametersDescription:[{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.`,name:"logits"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.`,name:"doc_scores"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.retrieved_doc_embeds",description:`<strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.`,name:"retrieved_doc_embeds"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.retrieved_doc_ids",description:`<strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; The indexes of the embedded documents retrieved by the retriever.`,name:"retrieved_doc_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.`,name:"context_attention_mask"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.question_encoder_last_hidden_state",description:`<strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.`,name:"question_encoder_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.question_enc_hidden_states",description:`<strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.`,name:"question_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.question_enc_attentions",description:`<strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"question_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_enc_last_hidden_state",description:`<strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the generator encoder of the model.`,name:"generator_enc_last_hidden_state"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_enc_hidden_states",description:`<strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.`,name:"generator_enc_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_enc_attentions",description:`<strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_enc_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_dec_hidden_states",description:`<strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.`,name:"generator_dec_hidden_states"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_dec_attentions",description:`<strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"generator_dec_attentions"},{anchor:"transformers.models.rag.modeling_rag.RetrievAugLMOutput.generator_cross_attentions",description:`<strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"generator_cross_attentions"}]}}),eo=new _e({}),to=new M({props:{name:"class transformers.RagRetriever",anchor:"transformers.RagRetriever",parameters:[{name:"config",val:""},{name:"question_encoder_tokenizer",val:""},{name:"generator_tokenizer",val:""},{name:"index",val:" = None"},{name:"init_retrieval",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/retrieval_rag.py#L325",parametersDescription:[{anchor:"transformers.RagRetriever.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; The configuration of the RAG model this Retriever is used with. Contains parameters indicating which <code>Index</code> to build. You can load your own custom dataset with <code>config.index_name=&quot;custom&quot;</code> or use a canonical one (default) from the datasets library with <code>config.index_name=&quot;wiki_dpr&quot;</code> for example.`,name:"config"},{anchor:"transformers.RagRetriever.question_encoder_tokenizer",description:`<strong>question_encoder_tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer.`,name:"question_encoder_tokenizer"},{anchor:"transformers.RagRetriever.generator_tokenizer",description:`<strong>generator_tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer used for the generator part of the RagModel.`,name:"generator_tokenizer"},{anchor:"transformers.RagRetriever.index",description:`<strong>index</strong> (<code>Index</code> optional, defaults to the one defined by the configuration) &#x2014; If specified, use this index instead of the one built using the configuration`,name:"index"}]}}),oo=new sn({props:{code:`# To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') from transformers import RagRetriever retriever = RagRetriever.from_pretrained( "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed" ) # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py from transformers import RagRetriever dataset = ( ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset) # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py from transformers import RagRetriever dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)* index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)* retriever = RagRetriever.from_pretrained( "facebook/dpr-ctx_encoder-single-nq-base", index_name="custom", passages_path=dataset_path, index_path=index_path, ) # To load the legacy index built originally for Rag's paper from transformers import RagRetriever retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load the default &quot;wiki_dpr&quot; dataset with 21M passages from wikipedia (index name is &#x27;compressed&#x27; or &#x27;exact&#x27;)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>, dataset=<span class="hljs-string">&quot;wiki_dpr&quot;</span>, index_name=<span class="hljs-string">&quot;compressed&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = ( <span class="hljs-meta">... </span> ... <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># dataset must be a datasets.Datasets object with columns &quot;title&quot;, &quot;text&quot; and &quot;embeddings&quot;, and it must have a faiss index</span> <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>, indexed_dataset=dataset) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>dataset_path = <span class="hljs-string">&quot;path/to/my/dataset&quot;</span> <span class="hljs-comment"># dataset saved via *dataset.save_to_disk(...)*</span> <span class="hljs-meta">&gt;&gt;&gt; </span>index_path = <span class="hljs-string">&quot;path/to/my/index.faiss&quot;</span> <span class="hljs-comment"># faiss index saved via *dataset.get_index(&quot;embeddings&quot;).save(...)*</span> <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>, <span class="hljs-meta">... </span> index_name=<span class="hljs-string">&quot;custom&quot;</span>, <span class="hljs-meta">... </span> passages_path=dataset_path, <span class="hljs-meta">... </span> index_path=index_path, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To load the legacy index built originally for Rag&#x27;s paper</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagRetriever <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained(<span class="hljs-string">&quot;facebook/dpr-ctx_encoder-single-nq-base&quot;</span>, index_name=<span class="hljs-string">&quot;legacy&quot;</span>)`}}),no=new M({props:{name:"init_retrieval",anchor:"transformers.RagRetriever.init_retrieval",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/retrieval_rag.py#L459"}}),ro=new M({props:{name:"postprocess_docs",anchor:"transformers.RagRetriever.postprocess_docs",parameters:[{name:"docs",val:""},{name:"input_strings",val:""},{name:"prefix",val:""},{name:"n_docs",val:""},{name:"return_tensors",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/retrieval_rag.py#L467",parametersDescription:[{anchor:"transformers.RagRetriever.postprocess_docs.docs",description:`<strong>docs</strong> (<code>dict</code>) &#x2014; Retrieved documents.`,name:"docs"},{anchor:"transformers.RagRetriever.postprocess_docs.input_strings",description:`<strong>input_strings</strong> (<code>str</code>) &#x2014; Input strings decoded by <code>preprocess_query</code>.`,name:"input_strings"},{anchor:"transformers.RagRetriever.postprocess_docs.prefix",description:`<strong>prefix</strong> (<code>str</code>) &#x2014; Prefix added at the beginning of each input, typically used with T5-based models.`,name:"prefix"}],returnDescription:` <p>a tuple consisting of two elements: contextualized <code>input_ids</code> and a compatible <code>attention_mask</code>.</p> `,returnType:` <p><code>tuple(tensors)</code></p> `}}),so=new M({props:{name:"retrieve",anchor:"transformers.RagRetriever.retrieve",parameters:[{name:"question_hidden_states",val:": ndarray"},{name:"n_docs",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/retrieval_rag.py#L539",parametersDescription:[{anchor:"transformers.RagRetriever.retrieve.question_hidden_states",description:`<strong>question_hidden_states</strong> (<code>np.ndarray</code> of shape <code>(batch_size, vector_size)</code>) &#x2014; A batch of query vectors to retrieve with.`,name:"question_hidden_states"},{anchor:"transformers.RagRetriever.retrieve.n_docs",description:`<strong>n_docs</strong> (<code>int</code>) &#x2014; The number of docs retrieved per query.`,name:"n_docs"}],returnDescription:` <p>A tuple with the following objects:</p> <ul> <li><strong>retrieved_doc_embeds</strong> (<code>np.ndarray</code> of shape <code>(batch_size, n_docs, dim)</code>) \u2014 The retrieval embeddings of the retrieved docs per query.</li> <li><strong>doc_ids</strong> (<code>np.ndarray</code> of shape <code>(batch_size, n_docs)</code>) \u2014 The ids of the documents in the index</li> <li><strong>doc_dicts</strong> (<code>List[dict]</code>): The <code>retrieved_doc_embeds</code> examples per query.</li> </ul> `,returnType:` <p><code>Tuple[np.ndarray, np.ndarray, List[dict]]</code></p> `}}),io=new _e({}),co=new M({props:{name:"class transformers.RagModel",anchor:"transformers.RagModel",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"question_encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"generator",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"retriever",val:": typing.Optional = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L494",parametersDescription:[{anchor:"transformers.RagModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.RagModel.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.RagModel.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.RagModel.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),_t=new ge({props:{$$slots:{default:[aT]},$$scope:{ctx:$}}}),uo=new M({props:{name:"forward",anchor:"transformers.RagModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L537",parametersDescription:[{anchor:"transformers.RagModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RagModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagModel">RagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.RagModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <code>None</code> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.RagModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.RagModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.RagModel.forward.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.RagModel.forward.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code> context_attention_mask (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.RagModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.RagModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RagModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RagModel.forward.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagModel.forward.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMOutput" >transformers.models.rag.modeling_rag.RetrievAugLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMOutput" >transformers.models.rag.modeling_rag.RetrievAugLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ft=new ge({props:{$$slots:{default:[dT]},$$scope:{ctx:$}}}),mo=new sn({props:{code:`from transformers import RagTokenizer, RagRetriever, RagModel import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base") retriever = RagRetriever.from_pretrained( "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ) # initialize with RagRetriever to do everything in one forward call model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever) inputs = tokenizer("How many people live in Paris?", return_tensors="pt") outputs = model(input_ids=inputs["input_ids"])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, RagModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagModel.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, retriever=retriever) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs[<span class="hljs-string">&quot;input_ids&quot;</span>])`}}),go=new _e({}),_o=new M({props:{name:"class transformers.RagSequenceForGeneration",anchor:"transformers.RagSequenceForGeneration",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"question_encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"generator",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"retriever",val:": typing.Optional = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L738",parametersDescription:[{anchor:"transformers.RagSequenceForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.RagSequenceForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.RagSequenceForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.RagSequenceForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),bt=new ge({props:{$$slots:{default:[iT]},$$scope:{ctx:$}}}),To=new M({props:{name:"forward",anchor:"transformers.RagSequenceForGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"exclude_bos_score",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"labels",val:" = None"},{name:"n_docs",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L767",parametersDescription:[{anchor:"transformers.RagSequenceForGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RagSequenceForGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagSequenceForGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagModel">RagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.RagSequenceForGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <code>None</code> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.RagSequenceForGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.RagSequenceForGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.RagSequenceForGeneration.forward.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.RagSequenceForGeneration.forward.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code> context_attention_mask (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.RagSequenceForGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.RagSequenceForGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RagSequenceForGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RagSequenceForGeneration.forward.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagSequenceForGeneration.forward.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.RagSequenceForGeneration.forward.exclude_bos_score",description:`<strong>exclude_bos_score</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the score of the BOS token is disregarded when computing the loss.`,name:"exclude_bos_score"},{anchor:"transformers.RagSequenceForGeneration.forward.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>torch.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.RagSequenceForGeneration.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Tt=new ge({props:{$$slots:{default:[cT]},$$scope:{ctx:$}}}),wo=new sn({props:{code:`from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ) # initialize with RagRetriever to do everything in one forward call model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) inputs = tokenizer("How many people live in Paris?", return_tensors="pt") with tokenizer.as_target_tokenizer(): targets = tokenizer("In Paris, there are 10 million people.", return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] outputs = model(input_ids=input_ids, labels=labels) # or use retriever separately model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True) # 1. Encode question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") doc_scores = torch.bmm( question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ).squeeze(1) # 3. Forward to generator outputs = model( context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=labels, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, RagSequenceForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagSequenceForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, retriever=retriever) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> targets = tokenizer(<span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>labels = targets[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagSequenceForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = torch.bmm( <span class="hljs-meta">... </span> question_hidden_states.unsqueeze(<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>].<span class="hljs-built_in">float</span>().transpose(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>).squeeze(<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], <span class="hljs-meta">... </span> context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], <span class="hljs-meta">... </span> doc_scores=doc_scores, <span class="hljs-meta">... </span> decoder_input_ids=labels, <span class="hljs-meta">... </span>)`}}),ko=new M({props:{name:"generate",anchor:"transformers.RagSequenceForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"do_deduplication",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"num_beams",val:" = None"},{name:"n_docs",val:" = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L909",parametersDescription:[{anchor:"transformers.RagSequenceForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.RagSequenceForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagSequenceForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.RagSequenceForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>context_input_ids</code> and <code>context_attention_mask</code> have to be provided to the forward pass. They are returned by <code>__call__()</code>`,name:"context_attention_mask"},{anchor:"transformers.RagSequenceForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> <p>If the model is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> are returned by <code>__call__()</code>`,name:"doc_scores"},{anchor:"transformers.RagSequenceForGeneration.generate.do_deduplication",description:`<strong>do_deduplication</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to <code>False</code> if used while training with distributed backend.`,name:"do_deduplication"},{anchor:"transformers.RagSequenceForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate)</code> function, where we set <code>num_return_sequences</code> to <code>num_beams</code>.`,name:"num_return_sequences(int,"},{anchor:"transformers.RagSequenceForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.RagSequenceForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs &#x2014; Additional kwargs will be passed to <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>.`,name:"n_docs"}],returnDescription:` <p>The generated sequences. The second dimension (sequence length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),yo=new _e({}),xo=new M({props:{name:"class transformers.RagTokenForGeneration",anchor:"transformers.RagTokenForGeneration",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"question_encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"generator",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"retriever",val:": typing.Optional = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L1135",parametersDescription:[{anchor:"transformers.RagTokenForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.RagTokenForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.RagTokenForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.RagTokenForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),qt=new ge({props:{$$slots:{default:[lT]},$$scope:{ctx:$}}}),$o=new M({props:{name:"forward",anchor:"transformers.RagTokenForGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"do_marginalize",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"labels",val:" = None"},{name:"n_docs",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L1234",parametersDescription:[{anchor:"transformers.RagTokenForGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RagTokenForGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagTokenForGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagModel">RagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.RagTokenForGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <code>None</code> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.RagTokenForGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.RagTokenForGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.RagTokenForGeneration.forward.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.RagTokenForGeneration.forward.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code> context_attention_mask (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.RagTokenForGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.RagTokenForGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RagTokenForGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RagTokenForGeneration.forward.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.RagTokenForGeneration.forward.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.RagTokenForGeneration.forward.do_marginalize",description:`<strong>do_marginalize</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If <code>True</code>, the logits are marginalized over all documents by making use of <code>torch.nn.functional.log_softmax</code>.`,name:"do_marginalize"},{anchor:"transformers.RagTokenForGeneration.forward.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>torch.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.RagTokenForGeneration.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[torch.FloatTensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>torch.FloatTensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput" >transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),yt=new ge({props:{$$slots:{default:[pT]},$$scope:{ctx:$}}}),Eo=new sn({props:{code:`from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ) # initialize with RagRetriever to do everything in one forward call model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) inputs = tokenizer("How many people live in Paris?", return_tensors="pt") with tokenizer.as_target_tokenizer(): targets = tokenizer("In Paris, there are 10 million people.", return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] outputs = model(input_ids=input_ids, labels=labels) # or use retriever separately model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True) # 1. Encode question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") doc_scores = torch.bmm( question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ).squeeze(1) # 3. Forward to generator outputs = model( context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=labels, ) # or directly generate generated = model.generate( context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, ) generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, RagTokenForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagTokenForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, retriever=retriever) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> targets = tokenizer(<span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>labels = targets[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RagTokenForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = torch.bmm( <span class="hljs-meta">... </span> question_hidden_states.unsqueeze(<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>].<span class="hljs-built_in">float</span>().transpose(<span class="hljs-number">1</span>, <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>).squeeze(<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], <span class="hljs-meta">... </span> context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], <span class="hljs-meta">... </span> doc_scores=doc_scores, <span class="hljs-meta">... </span> decoder_input_ids=labels, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or directly generate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate( <span class="hljs-meta">... </span> context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], <span class="hljs-meta">... </span> context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], <span class="hljs-meta">... </span> doc_scores=doc_scores, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_string = tokenizer.batch_decode(generated, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),jo=new M({props:{name:"generate",anchor:"transformers.RagTokenForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"max_length",val:" = None"},{name:"min_length",val:" = None"},{name:"early_stopping",val:" = None"},{name:"use_cache",val:" = None"},{name:"num_beams",val:" = None"},{name:"num_beam_groups",val:" = None"},{name:"diversity_penalty",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"length_penalty",val:" = None"},{name:"no_repeat_ngram_size",val:" = None"},{name:"encoder_no_repeat_ngram_size",val:" = None"},{name:"repetition_penalty",val:" = None"},{name:"bad_words_ids",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"n_docs",val:" = None"},{name:"prefix_allowed_tokens_fn",val:": typing.Callable[[int, torch.Tensor], typing.List[int]] = None"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = []"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = []"},{name:"forced_bos_token_id",val:": typing.Optional[int] = None"},{name:"forced_eos_token_id",val:": typing.Optional[int] = None"},{name:"remove_invalid_values",val:": typing.Optional[bool] = None"},{name:"exponential_decay_length_penalty",val:": typing.Union[typing.Tuple[typing.Union[int, float]], NoneType] = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_rag.py#L1376",parametersDescription:[{anchor:"transformers.RagTokenForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.RagTokenForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RagTokenForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.RagTokenForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>`,name:"context_attention_mask"},{anchor:"transformers.RagTokenForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>`,name:"doc_scores"},{anchor:"transformers.RagTokenForGeneration.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.RagTokenForGeneration.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.RagTokenForGeneration.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"early_stopping"},{anchor:"transformers.RagTokenForGeneration.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty.</p> <p>Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.RagTokenForGeneration.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.RagTokenForGeneration.generate.encoder_no_repeat_ngram_size",description:`<strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.`,name:"encoder_no_repeat_ngram_size"},{anchor:"transformers.RagTokenForGeneration.generate.bad_words_ids(List[int],",description:`<strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.`,name:"bad_words_ids(List[int],"},{anchor:"transformers.RagTokenForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.RagTokenForGeneration.generate.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"},{anchor:"transformers.RagTokenForGeneration.generate.diversity_penalty",description:`<strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.`,name:"diversity_penalty"},{anchor:"transformers.RagTokenForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate) function, where we set </code>num_return_sequences<code>to</code>num_beams<code>. decoder_start_token_id (</code>int\`, <em>optional</em>): If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"num_return_sequences(int,"},{anchor:"transformers.RagTokenForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer. prefix_allowed_tokens_fn &#x2014; (<code>Callable[[int, torch.Tensor], List[int]]</code>, <em>optional</em>): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments <code>inputs_ids</code> and the batch ID <code>batch_id</code>. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens <code>inputs_ids</code> and the batch ID <code>batch_id</code>. This argument is useful for constrained generation conditioned on the prefix, as described in <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a>.`,name:"n_docs"},{anchor:"transformers.RagTokenForGeneration.generate.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; Custom logits processors that complement the default logits processors built from arguments and a model&#x2019;s config. If a logit processor is passed that is already created with the arguments or a model&#x2019;s config an error is thrown.`,name:"logits_processor"},{anchor:"transformers.RagTokenForGeneration.generate.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; Custom stopping criteria that complement the default stopping criteria built from arguments and a model&#x2019;s config. If a stopping criteria is passed that is already created with the arguments or a model&#x2019;s config an error is thrown.`,name:"stopping_criteria"},{anchor:"transformers.RagTokenForGeneration.generate.forced_bos_token_id",description:`<strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.`,name:"forced_bos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"forced_eos_token_id"},{anchor:"transformers.RagTokenForGeneration.generate.remove_invalid_values",description:`<strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.`,name:"remove_invalid_values"}],returnDescription:` <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),Mo=new _e({}),Go=new M({props:{name:"class transformers.TFRagModel",anchor:"transformers.TFRagModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L484",parametersDescription:[{anchor:"transformers.TFRagModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.TFRagModel.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.TFRagModel.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.TFRagModel.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),Ft=new ge({props:{$$slots:{default:[hT]},$$scope:{ctx:$}}}),Co=new M({props:{name:"call",anchor:"transformers.TFRagModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L535",parametersDescription:[{anchor:"transformers.TFRagModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.`,name:"input_ids"},{anchor:"transformers.TFRagModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagModel">TFRagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.TFRagModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <code>None</code> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.TFRagModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFRagModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.TFRagModel.call.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.TFRagModel.call.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code> context_attention_mask (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.TFRagModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFRagModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFRagModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFRagModel.call.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.TFRagModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>TFRetrievAugLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFRagModel.call.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"}],returnDescription:` <p>A <code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMOutput</code>or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMOutput</code>or <code>tuple(tf.Tensor)</code></p> `}}),zt=new ge({props:{$$slots:{default:[uT]},$$scope:{ctx:$}}}),So=new sn({props:{code:`from transformers import RagTokenizer, RagRetriever, TFRagModel import torch tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base") retriever = RagRetriever.from_pretrained( "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ) # initialize with RagRetriever to do everything in one forward call model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True) input_dict = tokenizer.prepare_seq2seq_batch( "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ) input_ids = input_dict["input_ids"] outputs = model(input_ids)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, TFRagModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRagModel.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-base&quot;</span>, retriever=retriever, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer.prepare_seq2seq_batch( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, <span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids)`}}),Do=new _e({}),Lo=new M({props:{name:"class transformers.TFRagSequenceForGeneration",anchor:"transformers.TFRagSequenceForGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L1416",parametersDescription:[{anchor:"transformers.TFRagSequenceForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.TFRagSequenceForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.TFRagSequenceForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.TFRagSequenceForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),Et=new ge({props:{$$slots:{default:[mT]},$$scope:{ctx:$}}}),Ho=new M({props:{name:"call",anchor:"transformers.TFRagSequenceForGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"},{name:"exclude_bos_score",val:" = None"},{name:"labels",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L1464",parametersDescription:[{anchor:"transformers.TFRagSequenceForGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.`,name:"input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagModel">TFRagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.TFRagSequenceForGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <code>None</code> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.TFRagSequenceForGeneration.call.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.TFRagSequenceForGeneration.call.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code> context_attention_mask (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFRagSequenceForGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFRagSequenceForGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFRagSequenceForGeneration.call.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.TFRagSequenceForGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>TFRetrievAugLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFRagSequenceForGeneration.call.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.TFRagSequenceForGeneration.call.exclude_bos_score",description:`<strong>exclude_bos_score</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the score of the BOS token is disregarded when computing the loss.`,name:"exclude_bos_score"},{anchor:"transformers.TFRagSequenceForGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See <a href="https://arxiv.org/pdf/2005.11401.pdf" rel="nofollow">https://arxiv.org/pdf/2005.11401.pdf</a> Section 2.1 for details about Rag-Sequence formulation. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"},{anchor:"transformers.TFRagSequenceForGeneration.call.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>tf.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.TFRagSequenceForGeneration.call.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code>or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>tf.Tensor</code>(int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code>or <code>tuple(tf.Tensor)</code></p> `}}),jt=new ge({props:{$$slots:{default:[gT]},$$scope:{ctx:$}}}),Bo=new sn({props:{code:`from transformers import RagTokenizer, RagRetriever, TFRagSequenceForGeneration tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ) # initialize with RagRetriever to do everything in one forward call model = TFRagSequenceForGeneration.from_pretrained( "facebook/rag-sequence-nq", retriever=retriever, from_pt=True ) input_dict = tokenizer.prepare_seq2seq_batch( "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ) outputs = model(input_dict, output_retrieved=True) # or use retriever separately # 1. Encode input_ids = input_dict["input_ids"] question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") doc_scores = tf.squeeze( tf.matmul( tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ), axis=1, ) # 3. Forward to generator outputs = model( inputs=None, context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=input_dict["labels"], ) # or directly generate generated = model.generate( context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, ) generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, TFRagSequenceForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRagSequenceForGeneration.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/rag-sequence-nq&quot;</span>, retriever=retriever, from_pt=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer.prepare_seq2seq_batch( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, <span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict, output_retrieved=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = tf.squeeze( <span class="hljs-meta">... </span> tf.matmul( <span class="hljs-meta">... </span> tf.expand_dims(question_hidden_states, axis=<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>], transpose_b=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ), <span class="hljs-meta">... </span> axis=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> inputs=<span class="hljs-literal">None</span>, <span class="hljs-meta">... </span> context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], <span class="hljs-meta">... </span> context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], <span class="hljs-meta">... </span> doc_scores=doc_scores, <span class="hljs-meta">... </span> decoder_input_ids=input_dict[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or directly generate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate( <span class="hljs-meta">... </span> context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], <span class="hljs-meta">... </span> context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], <span class="hljs-meta">... </span> doc_scores=doc_scores, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_string = tokenizer.batch_decode(generated, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),Qo=new M({props:{name:"generate",anchor:"transformers.TFRagSequenceForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"do_deduplication",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"num_beams",val:" = None"},{name:"n_docs",val:" = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L1722",parametersDescription:[{anchor:"transformers.TFRagSequenceForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>: - 1 for tokens that are <strong>not masked</strong>, - 0 for tokens that are <strong>masked</strong>. <a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever.`,name:"context_input_ids"},{anchor:"transformers.TFRagSequenceForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever. If the model has is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>context_input_ids</code> and <code>context_attention_mask</code> have to be provided to the forward pass. They are returned by <code>__call__()</code>`,name:"context_attention_mask"},{anchor:"transformers.TFRagSequenceForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> or <code>input_ids</code> is not given, <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> are returned by <code>__call__()</code>`,name:"doc_scores"},{anchor:"transformers.TFRagSequenceForGeneration.generate.do_deduplication",description:`<strong>do_deduplication</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to <code>False</code> if used while training with distributed backend.`,name:"do_deduplication"},{anchor:"transformers.TFRagSequenceForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate)</code> function, where we set <code>num_return_sequences</code> to <code>num_beams</code>.`,name:"num_return_sequences(int,"},{anchor:"transformers.TFRagSequenceForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.TFRagSequenceForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs &#x2014; Additional kwargs will be passed to <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>`,name:"n_docs"}],returnDescription:` <p>The generated sequences. The second dimension (sequence length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),Vo=new _e({}),Ko=new M({props:{name:"class transformers.TFRagTokenForGeneration",anchor:"transformers.TFRagTokenForGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L752",parametersDescription:[{anchor:"transformers.TFRagTokenForGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.TFRagTokenForGeneration.question_encoder",description:`<strong>question_encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; An encoder model compatible with the faiss index encapsulated by the <code>retriever</code>.`,name:"question_encoder"},{anchor:"transformers.TFRagTokenForGeneration.generator",description:`<strong>generator</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; A seq2seq model used as the generator in the RAG architecture.`,name:"generator"},{anchor:"transformers.TFRagTokenForGeneration.retriever",description:`<strong>retriever</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever">RagRetriever</a>) &#x2014; A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.`,name:"retriever"}]}}),At=new ge({props:{$$slots:{default:[_T]},$$scope:{ctx:$}}}),Zo=new M({props:{name:"call",anchor:"transformers.TFRagTokenForGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"doc_scores",val:" = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"output_retrieved",val:" = None"},{name:"n_docs",val:" = None"},{name:"do_marginalize",val:" = None"},{name:"labels",val:" = None"},{name:"reduce_loss",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L858",parametersDescription:[{anchor:"transformers.TFRagTokenForGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">RagConfig</a>, used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices.`,name:"input_ids"},{anchor:"transformers.TFRagTokenForGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>generator_enc_last_hidden_state</code>, <em>optional</em>: <code>generator_enc_hidden_states</code>, <em>optional</em>: <code>generator_enc_attentions</code>). <code>generator_enc_last_hidden_state</code> of shape <code>(batch_size, n_docs * sequence_length, hidden_size)</code> is a sequence of hidden-states at the output of the last layer of the generator&#x2019;s encoder.</p> <p>Used by the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagModel">TFRagModel</a>) model during decoding.`,name:"encoder_outputs"},{anchor:"transformers.TFRagTokenForGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Provide for generation tasks. <code>None</code> by default, construct as per instructions for the generator model you&#x2019;re using with your RAG instance.`,name:"decoder_input_ids"},{anchor:"transformers.TFRagTokenForGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code>) &#x2014; Tuple consists of two elements: <code>encoder_outputs</code> of the RAG model (see <code>encoder_outputs</code>) and <code>past_key_values</code> of the underlying generator. Can be used to speed up decoding. <code>past_key_values</code> are used in the (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration">RagTokenForGeneration</a>) model during decoding.`,name:"past_key_values"},{anchor:"transformers.TFRagTokenForGeneration.call.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>. If the model has is not initialized with a <code>retriever</code> <code>doc_scores</code> has to be provided to the forward pass. <code>doc_scores</code> can be computed via <code>question_encoder_last_hidden_state</code> and <code>retrieved_doc_embeds</code>, see examples for more information.`,name:"doc_scores"},{anchor:"transformers.TFRagTokenForGeneration.call.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> \`<code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code> context_attention_mask (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>): Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code> <code>context_attention_mask</code> has to be provided to the forward pass. <code>context_attention_mask</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.TFRagTokenForGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFRagTokenForGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFRagTokenForGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFRagTokenForGeneration.call.output_retrieved(bool,",description:`<strong>output_retrieved(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Whether or not to return the <code>retrieved_doc_embeds</code>, <code>retrieved_doc_ids</code>, <code>context_input_ids</code> and <code>context_attention_mask</code>. See returned tensors for more detail.`,name:"output_retrieved(bool,"},{anchor:"transformers.TFRagTokenForGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <code>TFRetrievAugLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFRagTokenForGeneration.call.n_docs",description:"<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to `config.n_docs&#x201C;) &#x2014;\nNumber of documents to retrieve and/or number of documents for which to generate an answer.",name:"n_docs"},{anchor:"transformers.TFRagTokenForGeneration.call.do_marginalize",description:`<strong>do_marginalize</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If <code>True</code>, the logits are marginalized over all documents by making use of <code>torch.nn.functional.log_softmax</code>.`,name:"do_marginalize"},{anchor:"transformers.TFRagTokenForGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss according to Rag-Token model formulation See <a href="https://arxiv.org/pdf/2005.11401.pdf" rel="nofollow">https://arxiv.org/pdf/2005.11401.pdf</a> Section 2.1 for details about Rag-Token formulation. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"},{anchor:"transformers.TFRagTokenForGeneration.call.reduce_loss",description:`<strong>reduce_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only relevant if <code>labels</code> is passed. If <code>True</code>, the NLL loss is reduced using the <code>tf.Tensor.sum</code> operation.`,name:"reduce_loss"},{anchor:"transformers.TFRagTokenForGeneration.call.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Legacy dictionary, which is required so that model can use <em>generate()</em> function.`,name:"kwargs"}],returnDescription:` <p>A <code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code>or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig" >RagConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) \u2014 Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> </li> <li> <p><strong>retrieved_doc_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs, hidden_size)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Embedded documents retrieved by the retriever. Is used with <code>question_encoder_last_hidden_state</code> to compute the <code>doc_scores</code>.</p> </li> <li> <p><strong>retrieved_doc_ids</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size, config.n_docs)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 The indexes of the embedded documents retrieved by the retriever.</p> </li> <li> <p><strong>context_input_ids</strong> (<code>tf.Tensor</code>(int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.</p> </li> <li> <p><strong>context_attention_mask</strong> (<code>tf.Tensor</code> (int32) of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) \u2014 Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> </li> <li> <p><strong>question_encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model.</p> </li> <li> <p><strong>question_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>question_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_enc_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the generator encoder of the model.</p> </li> <li> <p><strong>generator_enc_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_enc_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>generator_dec_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings and one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>generator_dec_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.rag.modeling_tf_rag.TFRetrievAugLMMarginOutput</code>or <code>tuple(tf.Tensor)</code></p> `}}),Nt=new ge({props:{$$slots:{default:[fT]},$$scope:{ctx:$}}}),en=new sn({props:{code:`import tensorflow as tf from transformers import RagTokenizer, RagRetriever, TFRagTokenForGeneration tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ) # initialize with RagRetriever to do everything in one forward call model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) input_dict = tokenizer.prepare_seq2seq_batch( "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ) outputs = model(input_dict, output_retrieved=True) # or use retriever separately # 1. Encode input_ids = input_dict["input_ids"] question_hidden_states = model.question_encoder(input_ids)[0] # 2. Retrieve docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") doc_scores = tf.squeeze( tf.matmul( tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ), axis=1, ) # 3. Forward to generator outputs = model( inputs=None, context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, decoder_input_ids=input_dict["labels"], ) # or directly generate generated = model.generate( context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, ) generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RagTokenizer, RagRetriever, TFRagTokenForGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RagTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RagRetriever.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, index_name=<span class="hljs-string">&quot;exact&quot;</span>, use_dummy_dataset=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize with RagRetriever to do everything in one forward call</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRagTokenForGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/rag-token-nq&quot;</span>, retriever=retriever, from_pt=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer.prepare_seq2seq_batch( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;How many people live in Paris?&quot;</span>, <span class="hljs-string">&quot;In Paris, there are 10 million people.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict, output_retrieved=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or use retriever separately</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 1. Encode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>question_hidden_states = model.question_encoder(input_ids)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 2. Retrieve</span> <span class="hljs-meta">&gt;&gt;&gt; </span>docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>doc_scores = tf.squeeze( <span class="hljs-meta">... </span> tf.matmul( <span class="hljs-meta">... </span> tf.expand_dims(question_hidden_states, axis=<span class="hljs-number">1</span>), docs_dict[<span class="hljs-string">&quot;retrieved_doc_embeds&quot;</span>], transpose_b=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ), <span class="hljs-meta">... </span> axis=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># 3. Forward to generator</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> inputs=<span class="hljs-literal">None</span>, <span class="hljs-meta">... </span> context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], <span class="hljs-meta">... </span> context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], <span class="hljs-meta">... </span> doc_scores=doc_scores, <span class="hljs-meta">... </span> decoder_input_ids=input_dict[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># or directly generate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate( <span class="hljs-meta">... </span> context_input_ids=docs_dict[<span class="hljs-string">&quot;context_input_ids&quot;</span>], <span class="hljs-meta">... </span> context_attention_mask=docs_dict[<span class="hljs-string">&quot;context_attention_mask&quot;</span>], <span class="hljs-meta">... </span> doc_scores=doc_scores, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_string = tokenizer.batch_decode(generated, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),tn=new M({props:{name:"generate",anchor:"transformers.TFRagTokenForGeneration.generate",parameters:[{name:"input_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"context_input_ids",val:" = None"},{name:"context_attention_mask",val:" = None"},{name:"doc_scores",val:" = None"},{name:"max_length",val:" = None"},{name:"min_length",val:" = None"},{name:"early_stopping",val:" = None"},{name:"use_cache",val:" = None"},{name:"num_beams",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"length_penalty",val:" = None"},{name:"no_repeat_ngram_size",val:" = None"},{name:"bad_words_ids",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"n_docs",val:" = None"},{name:"output_scores",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict_in_generate",val:" = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/rag/modeling_tf_rag.py#L1037",parametersDescription:[{anchor:"transformers.TFRagTokenForGeneration.generate.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation. If <code>input_ids</code> is not passed, then <code>context_input_ids</code> has to be provided.`,name:"input_ids"},{anchor:"transformers.TFRagTokenForGeneration.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.generate.context_input_ids",description:`<strong>context_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Input IDs post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>`,name:"context_input_ids"},{anchor:"transformers.TFRagTokenForGeneration.generate.context_attention_mask",description:`<strong>context_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size * config.n_docs, config.max_combined_length)</code>, <em>optional</em>, returned when <em>output_retrieved=True</em>) &#x2014; Attention mask post-processed from the retrieved documents and the question encoder <code>input_ids</code> by the retriever.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>`,name:"context_attention_mask"},{anchor:"transformers.TFRagTokenForGeneration.generate.doc_scores",description:`<strong>doc_scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.n_docs)</code>) &#x2014; Score between each retrieved document embeddings (see <code>retrieved_doc_embeds</code>) and <code>question_encoder_last_hidden_state</code>.</p> <p>If the model has is not initialized with a <code>retriever</code>, <code>context_input_ids</code> has to be provided to the forward pass. <code>context_input_ids</code> are returned by <code>__call__()</code>`,name:"doc_scores"},{anchor:"transformers.TFRagTokenForGeneration.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.TFRagTokenForGeneration.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.TFRagTokenForGeneration.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"early_stopping"},{anchor:"transformers.TFRagTokenForGeneration.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.TFRagTokenForGeneration.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.TFRagTokenForGeneration.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.TFRagTokenForGeneration.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty.</p> <p>Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.TFRagTokenForGeneration.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.TFRagTokenForGeneration.generate.bad_words_ids(List[int],",description:`<strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.`,name:"bad_words_ids(List[int],"},{anchor:"transformers.TFRagTokenForGeneration.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.TFRagTokenForGeneration.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the <code>generator</code>&#x2019;s <code>[generate()](/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate) function, where we set </code>num_return_sequences<code>to</code>num_beams<code>. decoder_start_token_id (</code>int\`, <em>optional</em>): If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"num_return_sequences(int,"},{anchor:"transformers.TFRagTokenForGeneration.generate.n_docs",description:`<strong>n_docs</strong> (<code>int</code>, <em>optional</em>, defaults to <code>config.n_docs</code>) &#x2014; Number of documents to retrieve and/or number of documents for which to generate an answer.`,name:"n_docs"},{anchor:"transformers.TFRagTokenForGeneration.generate.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.TFRagTokenForGeneration.generate.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.TFRagTokenForGeneration.generate.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.TFRagTokenForGeneration.generate.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. model_specific_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model.`,name:"return_dict_in_generate"}],returnDescription:` <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),{c(){p=r("meta"),y=c(),u=r("h1"),f=r("a"),x=r("span"),v(g.$$.fragment),h=c(),R=r("span"),ti=t("RAG"),Ia=c(),$e=r("h2"),ot=r("a"),Fr=r("span"),v(St.$$.fragment),oi=c(),zr=r("span"),ni=t("Overview"),Oa=c(),an=r("p"),ri=t(`Retrieval-augmented generation (\u201CRAG\u201D) models combine the powers of pretrained dense retrieval (DPR) and sequence-to-sequence models. RAG models retrieve documents, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks.`),Wa=c(),nt=r("p"),si=t("It is based on the paper "),Dt=r("a"),ai=t("Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks"),di=t(` by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\xFCttler, Mike Lewis, Wen-tau Yih, Tim Rockt\xE4schel, Sebastian Riedel, Douwe Kiela.`),Ha=c(),dn=r("p"),ii=t("The abstract from the paper is the following:"),Ba=c(),cn=r("p"),$r=r("em"),ci=t(`Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures. Additionally, providing provenance for their decisions and updating their world knowledge remain open research problems. Pre-trained models with a differentiable access mechanism to explicit nonparametric memory can overcome this issue, but have so far been only investigated for extractive downstream tasks. We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) \u2014 models which combine pre-trained parametric and non-parametric memory for language generation. We introduce RAG models where the parametric memory is a pre-trained seq2seq model and the non-parametric memory is a dense vector index of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG formulations, one which conditions on the same retrieved passages across the whole generated sequence, the other can use different passages per token. We fine-tune and evaluate our models on a wide range of knowledge-intensive NLP tasks and set the state-of-the-art on three open domain QA tasks, outperforming parametric seq2seq models and task-specific retrieve-and-extract architectures. For language generation tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art parametric-only seq2seq baseline.`),Qa=c(),rt=r("p"),li=t("This model was contributed by "),Lt=r("a"),pi=t("ola13"),hi=t("."),Ua=c(),Ee=r("h2"),st=r("a"),Er=r("span"),v(It.$$.fragment),ui=c(),jr=r("span"),mi=t("RagConfig"),Va=c(),de=r("div"),v(Ot.$$.fragment),gi=c(),me=r("p"),ln=r("a"),_i=t("RagConfig"),fi=t(" stores the configuration of a "),Mr=r("em"),vi=t("RagModel"),bi=t(". Configuration objects inherit from "),pn=r("a"),Ti=t("PretrainedConfig"),wi=t(` and can be used to control the model outputs. Read the documentation from `),hn=r("a"),ki=t("PretrainedConfig"),qi=t(" for more information."),yi=c(),at=r("div"),v(Wt.$$.fragment),xi=c(),Ht=r("p"),Ri=t("Instantiate a "),un=r("a"),Fi=t("EncoderDecoderConfig"),zi=t(` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),$i=c(),dt=r("div"),v(Bt.$$.fragment),Ei=c(),Qt=r("p"),ji=t("Serializes this instance to a Python dictionary. Override the default "),mn=r("a"),Mi=t("to_dict()"),Gi=t("."),Ka=c(),je=r("h2"),it=r("a"),Gr=r("span"),v(Ut.$$.fragment),Ai=c(),Ar=r("span"),Ni=t("RagTokenizer"),Ya=c(),Me=r("div"),v(Vt.$$.fragment),Pi=c(),ct=r("div"),v(Kt.$$.fragment),Ci=c(),Nr=r("p"),Si=t(`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),Ja=c(),Ge=r("h2"),lt=r("a"),Pr=r("span"),v(Yt.$$.fragment),Di=c(),Cr=r("span"),Li=t("Rag specific outputs"),Xa=c(),Ae=r("div"),v(Jt.$$.fragment),Ii=c(),Sr=r("p"),Oi=t("Base class for retriever augmented marginalized models outputs."),Za=c(),Xt=r("div"),v(Zt.$$.fragment),ed=c(),Ne=r("h2"),pt=r("a"),Dr=r("span"),v(eo.$$.fragment),Wi=c(),Lr=r("span"),Hi=t("RagRetriever"),td=c(),X=r("div"),v(to.$$.fragment),Bi=c(),Ir=r("p"),Qi=t(`Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel.`),Ui=c(),Or=r("p"),Vi=t("Examples:"),Ki=c(),v(oo.$$.fragment),Yi=c(),ht=r("div"),v(no.$$.fragment),Ji=c(),Wr=r("p"),Xi=t("Retriever initialization function. It loads the index into memory."),Zi=c(),ut=r("div"),v(ro.$$.fragment),ec=c(),Pe=r("p"),tc=t("Postprocessing retrieved "),Hr=r("code"),oc=t("docs"),nc=t(" and combining them with "),Br=r("code"),rc=t("input_strings"),sc=t("."),ac=c(),mt=r("div"),v(so.$$.fragment),dc=c(),ao=r("p"),ic=t("Retrieves documents for specified "),Qr=r("code"),cc=t("question_hidden_states"),lc=t("."),od=c(),Ce=r("h2"),gt=r("a"),Ur=r("span"),v(io.$$.fragment),pc=c(),Vr=r("span"),hc=t("RagModel"),nd=c(),D=r("div"),v(co.$$.fragment),uc=c(),Se=r("p"),mc=t("The "),gn=r("a"),gc=t("RagModel"),_c=t(" forward method, overrides the "),Kr=r("code"),fc=t("__call__"),vc=t(" special method."),bc=c(),v(_t.$$.fragment),Tc=c(),Yr=r("p"),wc=t(`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),kc=c(),ie=r("p"),qc=t("The question encoder can be any "),Jr=r("em"),yc=t("autoencoding"),xc=t(" model, preferably "),_n=r("a"),Rc=t("DPRQuestionEncoder"),Fc=t(`, and the generator can be any `),Xr=r("em"),zc=t("seq2seq"),$c=t(" model, preferably "),fn=r("a"),Ec=t("BartForConditionalGeneration"),jc=t("."),Mc=c(),G=r("p"),Gc=t("The model can be initialized with a "),vn=r("a"),Ac=t("RagRetriever"),Nc=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Zr=r("em"),Pc=t("autoencoding"),Cc=t(" model as the "),es=r("code"),Sc=t("question_encoder"),Dc=t(" and any "),ts=r("em"),Lc=t("seq2seq"),Ic=t(" model with language model head as the "),os=r("code"),Oc=t("generator"),Wc=t(`. It has been tested with `),bn=r("a"),Hc=t("DPRQuestionEncoder"),Bc=t(" as the "),ns=r("code"),Qc=t("question_encoder"),Uc=t(" and "),Tn=r("a"),Vc=t("BartForConditionalGeneration"),Kc=t(` or `),wn=r("a"),Yc=t("T5ForConditionalGeneration"),Jc=t(" as the "),rs=r("code"),Xc=t("generator"),Zc=t("."),el=c(),lo=r("p"),tl=t("This model inherits from "),kn=r("a"),ol=t("PreTrainedModel"),nl=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rl=c(),po=r("p"),sl=t("This model is also a PyTorch "),ho=r("a"),al=t("torch.nn.Module"),dl=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),il=c(),ee=r("div"),v(uo.$$.fragment),cl=c(),De=r("p"),ll=t("The "),qn=r("a"),pl=t("RagModel"),hl=t(" forward method, overrides the "),ss=r("code"),ul=t("__call__"),ml=t(" special method."),gl=c(),v(ft.$$.fragment),_l=c(),as=r("p"),fl=t("Example:"),vl=c(),v(mo.$$.fragment),rd=c(),Le=r("h2"),vt=r("a"),ds=r("span"),v(go.$$.fragment),bl=c(),is=r("span"),Tl=t("RagSequenceForGeneration"),sd=c(),E=r("div"),v(_o.$$.fragment),wl=c(),Ie=r("p"),kl=t("The "),yn=r("a"),ql=t("RagSequenceForGeneration"),yl=t(" forward method, overrides the "),cs=r("code"),xl=t("__call__"),Rl=t(" special method."),Fl=c(),v(bt.$$.fragment),zl=c(),ls=r("p"),$l=t("A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),El=c(),ps=r("p"),jl=t(`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Ml=c(),ce=r("p"),Gl=t("The question encoder can be any "),hs=r("em"),Al=t("autoencoding"),Nl=t(" model, preferably "),xn=r("a"),Pl=t("DPRQuestionEncoder"),Cl=t(`, and the generator can be any `),us=r("em"),Sl=t("seq2seq"),Dl=t(" model, preferably "),Rn=r("a"),Ll=t("BartForConditionalGeneration"),Il=t("."),Ol=c(),A=r("p"),Wl=t("The model can be initialized with a "),Fn=r("a"),Hl=t("RagRetriever"),Bl=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),ms=r("em"),Ql=t("autoencoding"),Ul=t(" model as the "),gs=r("code"),Vl=t("question_encoder"),Kl=t(" and any "),_s=r("em"),Yl=t("seq2seq"),Jl=t(" model with language model head as the "),fs=r("code"),Xl=t("generator"),Zl=t(`. It has been tested with `),zn=r("a"),ep=t("DPRQuestionEncoder"),tp=t(" as the "),vs=r("code"),op=t("question_encoder"),np=t(" and "),$n=r("a"),rp=t("BartForConditionalGeneration"),sp=t(` or `),En=r("a"),ap=t("T5ForConditionalGeneration"),dp=t(" as the "),bs=r("code"),ip=t("generator"),cp=t("."),lp=c(),fo=r("p"),pp=t("This model inherits from "),jn=r("a"),hp=t("PreTrainedModel"),up=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mp=c(),vo=r("p"),gp=t("This model is also a PyTorch "),bo=r("a"),_p=t("torch.nn.Module"),fp=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vp=c(),te=r("div"),v(To.$$.fragment),bp=c(),Oe=r("p"),Tp=t("The "),Mn=r("a"),wp=t("RagSequenceForGeneration"),kp=t(" forward method, overrides the "),Ts=r("code"),qp=t("__call__"),yp=t(" special method."),xp=c(),v(Tt.$$.fragment),Rp=c(),ws=r("p"),Fp=t("Example:"),zp=c(),v(wo.$$.fragment),$p=c(),wt=r("div"),v(ko.$$.fragment),Ep=c(),qo=r("p"),jp=t("Implements RAG sequence \u201Cthorough\u201D decoding. Read the "),Gn=r("a"),Mp=t("generate()"),Gp=t("`\ndocumentation for more information on how to set other generate input parameters."),ad=c(),We=r("h2"),kt=r("a"),ks=r("span"),v(yo.$$.fragment),Ap=c(),qs=r("span"),Np=t("RagTokenForGeneration"),dd=c(),j=r("div"),v(xo.$$.fragment),Pp=c(),He=r("p"),Cp=t("The "),An=r("a"),Sp=t("RagTokenForGeneration"),Dp=t(" forward method, overrides the "),ys=r("code"),Lp=t("__call__"),Ip=t(" special method."),Op=c(),v(qt.$$.fragment),Wp=c(),xs=r("p"),Hp=t("A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),Bp=c(),Rs=r("p"),Qp=t(`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Up=c(),le=r("p"),Vp=t("The question encoder can be any "),Fs=r("em"),Kp=t("autoencoding"),Yp=t(" model, preferably "),Nn=r("a"),Jp=t("DPRQuestionEncoder"),Xp=t(`, and the generator can be any `),zs=r("em"),Zp=t("seq2seq"),eh=t(" model, preferably "),Pn=r("a"),th=t("BartForConditionalGeneration"),oh=t("."),nh=c(),N=r("p"),rh=t("The model can be initialized with a "),Cn=r("a"),sh=t("RagRetriever"),ah=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),$s=r("em"),dh=t("autoencoding"),ih=t(" model as the "),Es=r("code"),ch=t("question_encoder"),lh=t(" and any "),js=r("em"),ph=t("seq2seq"),hh=t(" model with language model head as the "),Ms=r("code"),uh=t("generator"),mh=t(`. It has been tested with `),Sn=r("a"),gh=t("DPRQuestionEncoder"),_h=t(" as the "),Gs=r("code"),fh=t("question_encoder"),vh=t(" and "),Dn=r("a"),bh=t("BartForConditionalGeneration"),Th=t(` or `),Ln=r("a"),wh=t("T5ForConditionalGeneration"),kh=t(" as the "),As=r("code"),qh=t("generator"),yh=t("."),xh=c(),Ro=r("p"),Rh=t("This model inherits from "),In=r("a"),Fh=t("PreTrainedModel"),zh=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$h=c(),Fo=r("p"),Eh=t("This model is also a PyTorch "),zo=r("a"),jh=t("torch.nn.Module"),Mh=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gh=c(),oe=r("div"),v($o.$$.fragment),Ah=c(),Be=r("p"),Nh=t("The "),On=r("a"),Ph=t("RagTokenForGeneration"),Ch=t(" forward method, overrides the "),Ns=r("code"),Sh=t("__call__"),Dh=t(" special method."),Lh=c(),v(yt.$$.fragment),Ih=c(),Ps=r("p"),Oh=t("Example:"),Wh=c(),v(Eo.$$.fragment),Hh=c(),xt=r("div"),v(jo.$$.fragment),Bh=c(),Cs=r("p"),Qh=t("Implements RAG token decoding."),id=c(),Qe=r("h2"),Rt=r("a"),Ss=r("span"),v(Mo.$$.fragment),Uh=c(),Ds=r("span"),Vh=t("TFRagModel"),cd=c(),P=r("div"),v(Go.$$.fragment),Kh=c(),Ue=r("p"),Yh=t("The "),Wn=r("a"),Jh=t("TFRagModel"),Xh=t(" forward method, overrides the "),Ls=r("code"),Zh=t("__call__"),eu=t(" special method."),tu=c(),v(Ft.$$.fragment),ou=c(),Is=r("p"),nu=t(`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),ru=c(),pe=r("p"),su=t("The question encoder can be any "),Os=r("em"),au=t("autoencoding"),du=t(" model, preferably "),Hn=r("a"),iu=t("TFDPRQuestionEncoder"),cu=t(`, and the generator can be any `),Ws=r("em"),lu=t("seq2seq"),pu=t(" model, preferably "),Bn=r("a"),hu=t("TFBartForConditionalGeneration"),uu=t("."),mu=c(),L=r("p"),gu=t("The model can be initialized with a "),Qn=r("a"),_u=t("RagRetriever"),fu=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Hs=r("em"),vu=t("autoencoding"),bu=t(" model as the "),Bs=r("code"),Tu=t("question_encoder"),wu=t(" and any "),Qs=r("em"),ku=t("seq2seq"),qu=t(" model with language model head as the "),Us=r("code"),yu=t("generator"),xu=t(`. It has been tested with `),Un=r("a"),Ru=t("TFDPRQuestionEncoder"),Fu=t(" as the "),Vs=r("code"),zu=t("question_encoder"),$u=t(" and "),Vn=r("a"),Eu=t("TFBartForConditionalGeneration"),ju=t(` as the `),Ks=r("code"),Mu=t("generator"),Gu=t("."),Au=c(),Ao=r("p"),Nu=t("This model inherits from "),Kn=r("a"),Pu=t("TFPreTrainedModel"),Cu=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Su=c(),No=r("p"),Du=t("This model is also a Tensorflow "),Po=r("a"),Lu=t("tf.keras.Model"),Iu=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ou=c(),Ys=r("p"),Wu=t(`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),Hu=c(),ne=r("div"),v(Co.$$.fragment),Bu=c(),Ve=r("p"),Qu=t("The "),Yn=r("a"),Uu=t("TFRagModel"),Vu=t(" forward method, overrides the "),Js=r("code"),Ku=t("__call__"),Yu=t(" special method."),Ju=c(),v(zt.$$.fragment),Xu=c(),Xs=r("p"),Zu=t("Example:"),em=c(),v(So.$$.fragment),ld=c(),Ke=r("h2"),$t=r("a"),Zs=r("span"),v(Do.$$.fragment),tm=c(),ea=r("span"),om=t("TFRagSequenceForGeneration"),pd=c(),F=r("div"),v(Lo.$$.fragment),nm=c(),Ye=r("p"),rm=t("The "),Jn=r("a"),sm=t("TFRagSequenceForGeneration"),am=t(" forward method, overrides the "),ta=r("code"),dm=t("__call__"),im=t(" special method."),cm=c(),v(Et.$$.fragment),lm=c(),oa=r("p"),pm=t("A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),hm=c(),na=r("p"),um=t(`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),mm=c(),he=r("p"),gm=t("The question encoder can be any "),ra=r("em"),_m=t("autoencoding"),fm=t(" model, preferably "),Xn=r("a"),vm=t("TFDPRQuestionEncoder"),bm=t(`, and the generator can be any `),sa=r("em"),Tm=t("seq2seq"),wm=t(" model, preferably "),Zn=r("a"),km=t("TFBartForConditionalGeneration"),qm=t("."),ym=c(),I=r("p"),xm=t("The model can be initialized with a "),er=r("a"),Rm=t("RagRetriever"),Fm=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),aa=r("em"),zm=t("autoencoding"),$m=t(" model as the "),da=r("code"),Em=t("question_encoder"),jm=t(" and any "),ia=r("em"),Mm=t("seq2seq"),Gm=t(" model with language model head as the "),ca=r("code"),Am=t("generator"),Nm=t(`. It has been tested with `),tr=r("a"),Pm=t("TFDPRQuestionEncoder"),Cm=t(" as the "),la=r("code"),Sm=t("question_encoder"),Dm=t(" and "),or=r("a"),Lm=t("TFBartForConditionalGeneration"),Im=t(` as the `),pa=r("code"),Om=t("generator"),Wm=t("."),Hm=c(),Io=r("p"),Bm=t("This model inherits from "),nr=r("a"),Qm=t("TFPreTrainedModel"),Um=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Vm=c(),Oo=r("p"),Km=t("This model is also a Tensorflow "),Wo=r("a"),Ym=t("tf.keras.Model"),Jm=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Xm=c(),ha=r("p"),Zm=t(`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),eg=c(),re=r("div"),v(Ho.$$.fragment),tg=c(),Je=r("p"),og=t("The "),rr=r("a"),ng=t("TFRagSequenceForGeneration"),rg=t(" forward method, overrides the "),ua=r("code"),sg=t("__call__"),ag=t(" special method."),dg=c(),v(jt.$$.fragment),ig=c(),ma=r("p"),cg=t("Example:"),lg=c(),v(Bo.$$.fragment),pg=c(),Mt=r("div"),v(Qo.$$.fragment),hg=c(),Uo=r("p"),ug=t("Implements RAG sequence \u201Cthorough\u201D decoding. Read the "),sr=r("a"),mg=t("generate()"),gg=t("`\ndocumentation for more information on how to set other generate input parameters"),hd=c(),Xe=r("h2"),Gt=r("a"),ga=r("span"),v(Vo.$$.fragment),_g=c(),_a=r("span"),fg=t("TFRagTokenForGeneration"),ud=c(),z=r("div"),v(Ko.$$.fragment),vg=c(),Ze=r("p"),bg=t("The "),ar=r("a"),Tg=t("TFRagTokenForGeneration"),wg=t(" forward method, overrides the "),fa=r("code"),kg=t("__call__"),qg=t(" special method."),yg=c(),v(At.$$.fragment),xg=c(),va=r("p"),Rg=t("A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),Fg=c(),ba=r("p"),zg=t(`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),$g=c(),ue=r("p"),Eg=t("The question encoder can be any "),Ta=r("em"),jg=t("autoencoding"),Mg=t(" model, preferably "),dr=r("a"),Gg=t("TFDPRQuestionEncoder"),Ag=t(`, and the generator can be any `),wa=r("em"),Ng=t("seq2seq"),Pg=t(" model, preferably "),ir=r("a"),Cg=t("TFBartForConditionalGeneration"),Sg=t("."),Dg=c(),O=r("p"),Lg=t("The model can be initialized with a "),cr=r("a"),Ig=t("RagRetriever"),Og=t(` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),ka=r("em"),Wg=t("autoencoding"),Hg=t(" model as the "),qa=r("code"),Bg=t("question_encoder"),Qg=t(" and any "),ya=r("em"),Ug=t("seq2seq"),Vg=t(" model with language model head as the "),xa=r("code"),Kg=t("generator"),Yg=t(`. It has been tested with `),lr=r("a"),Jg=t("TFDPRQuestionEncoder"),Xg=t(" as the "),Ra=r("code"),Zg=t("question_encoder"),e_=t(" and "),pr=r("a"),t_=t("TFBartForConditionalGeneration"),o_=t(` as the `),Fa=r("code"),n_=t("generator"),r_=t("."),s_=c(),Yo=r("p"),a_=t("This model inherits from "),hr=r("a"),d_=t("TFPreTrainedModel"),i_=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),c_=c(),Jo=r("p"),l_=t("This model is also a Tensorflow "),Xo=r("a"),p_=t("tf.keras.Model"),h_=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),u_=c(),za=r("p"),m_=t(`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),g_=c(),se=r("div"),v(Zo.$$.fragment),__=c(),et=r("p"),f_=t("The "),ur=r("a"),v_=t("TFRagTokenForGeneration"),b_=t(" forward method, overrides the "),$a=r("code"),T_=t("__call__"),w_=t(" special method."),k_=c(),v(Nt.$$.fragment),q_=c(),Ea=r("p"),y_=t("Example:"),x_=c(),v(en.$$.fragment),R_=c(),Pt=r("div"),v(tn.$$.fragment),F_=c(),ja=r("p"),z_=t("Implements TFRAG token decoding."),this.h()},l(d){const m=sT('[data-svelte="svelte-1phssyn"]',document.head);p=s(m,"META",{name:!0,content:!0}),m.forEach(n),y=l(d),u=s(d,"H1",{class:!0});var on=a(u);f=s(on,"A",{id:!0,class:!0,href:!0});var Ma=a(f);x=s(Ma,"SPAN",{});var Ga=a(x);b(g.$$.fragment,Ga),Ga.forEach(n),Ma.forEach(n),h=l(on),R=s(on,"SPAN",{});var Aa=a(R);ti=o(Aa,"RAG"),Aa.forEach(n),on.forEach(n),Ia=l(d),$e=s(d,"H2",{class:!0});var nn=a($e);ot=s(nn,"A",{id:!0,class:!0,href:!0});var Na=a(ot);Fr=s(Na,"SPAN",{});var Pa=a(Fr);b(St.$$.fragment,Pa),Pa.forEach(n),Na.forEach(n),oi=l(nn),zr=s(nn,"SPAN",{});var Ca=a(zr);ni=o(Ca,"Overview"),Ca.forEach(n),nn.forEach(n),Oa=l(d),an=s(d,"P",{});var Sa=a(an);ri=o(Sa,`Retrieval-augmented generation (\u201CRAG\u201D) models combine the powers of pretrained dense retrieval (DPR) and sequence-to-sequence models. RAG models retrieve documents, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks.`),Sa.forEach(n),Wa=l(d),nt=s(d,"P",{});var rn=a(nt);si=o(rn,"It is based on the paper "),Dt=s(rn,"A",{href:!0,rel:!0});var Da=a(Dt);ai=o(Da,"Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks"),Da.forEach(n),di=o(rn,` by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\xFCttler, Mike Lewis, Wen-tau Yih, Tim Rockt\xE4schel, Sebastian Riedel, Douwe Kiela.`),rn.forEach(n),Ha=l(d),dn=s(d,"P",{});var La=a(dn);ii=o(La,"The abstract from the paper is the following:"),La.forEach(n),Ba=l(d),cn=s(d,"P",{});var $_=a(cn);$r=s($_,"EM",{});var E_=a($r);ci=o(E_,`Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures. Additionally, providing provenance for their decisions and updating their world knowledge remain open research problems. Pre-trained models with a differentiable access mechanism to explicit nonparametric memory can overcome this issue, but have so far been only investigated for extractive downstream tasks. We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) \u2014 models which combine pre-trained parametric and non-parametric memory for language generation. We introduce RAG models where the parametric memory is a pre-trained seq2seq model and the non-parametric memory is a dense vector index of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG formulations, one which conditions on the same retrieved passages across the whole generated sequence, the other can use different passages per token. We fine-tune and evaluate our models on a wide range of knowledge-intensive NLP tasks and set the state-of-the-art on three open domain QA tasks, outperforming parametric seq2seq models and task-specific retrieve-and-extract architectures. For language generation tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art parametric-only seq2seq baseline.`),E_.forEach(n),$_.forEach(n),Qa=l(d),rt=s(d,"P",{});var gd=a(rt);li=o(gd,"This model was contributed by "),Lt=s(gd,"A",{href:!0,rel:!0});var j_=a(Lt);pi=o(j_,"ola13"),j_.forEach(n),hi=o(gd,"."),gd.forEach(n),Ua=l(d),Ee=s(d,"H2",{class:!0});var _d=a(Ee);st=s(_d,"A",{id:!0,class:!0,href:!0});var M_=a(st);Er=s(M_,"SPAN",{});var G_=a(Er);b(It.$$.fragment,G_),G_.forEach(n),M_.forEach(n),ui=l(_d),jr=s(_d,"SPAN",{});var A_=a(jr);mi=o(A_,"RagConfig"),A_.forEach(n),_d.forEach(n),Va=l(d),de=s(d,"DIV",{class:!0});var Ct=a(de);b(Ot.$$.fragment,Ct),gi=l(Ct),me=s(Ct,"P",{});var tt=a(me);ln=s(tt,"A",{href:!0});var N_=a(ln);_i=o(N_,"RagConfig"),N_.forEach(n),fi=o(tt," stores the configuration of a "),Mr=s(tt,"EM",{});var P_=a(Mr);vi=o(P_,"RagModel"),P_.forEach(n),bi=o(tt,". Configuration objects inherit from "),pn=s(tt,"A",{href:!0});var C_=a(pn);Ti=o(C_,"PretrainedConfig"),C_.forEach(n),wi=o(tt,` and can be used to control the model outputs. Read the documentation from `),hn=s(tt,"A",{href:!0});var S_=a(hn);ki=o(S_,"PretrainedConfig"),S_.forEach(n),qi=o(tt," for more information."),tt.forEach(n),yi=l(Ct),at=s(Ct,"DIV",{class:!0});var fd=a(at);b(Wt.$$.fragment,fd),xi=l(fd),Ht=s(fd,"P",{});var vd=a(Ht);Ri=o(vd,"Instantiate a "),un=s(vd,"A",{href:!0});var D_=a(un);Fi=o(D_,"EncoderDecoderConfig"),D_.forEach(n),zi=o(vd,` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),vd.forEach(n),fd.forEach(n),$i=l(Ct),dt=s(Ct,"DIV",{class:!0});var bd=a(dt);b(Bt.$$.fragment,bd),Ei=l(bd),Qt=s(bd,"P",{});var Td=a(Qt);ji=o(Td,"Serializes this instance to a Python dictionary. Override the default "),mn=s(Td,"A",{href:!0});var L_=a(mn);Mi=o(L_,"to_dict()"),L_.forEach(n),Gi=o(Td,"."),Td.forEach(n),bd.forEach(n),Ct.forEach(n),Ka=l(d),je=s(d,"H2",{class:!0});var wd=a(je);it=s(wd,"A",{id:!0,class:!0,href:!0});var I_=a(it);Gr=s(I_,"SPAN",{});var O_=a(Gr);b(Ut.$$.fragment,O_),O_.forEach(n),I_.forEach(n),Ai=l(wd),Ar=s(wd,"SPAN",{});var W_=a(Ar);Ni=o(W_,"RagTokenizer"),W_.forEach(n),wd.forEach(n),Ya=l(d),Me=s(d,"DIV",{class:!0});var kd=a(Me);b(Vt.$$.fragment,kd),Pi=l(kd),ct=s(kd,"DIV",{class:!0});var qd=a(ct);b(Kt.$$.fragment,qd),Ci=l(qd),Nr=s(qd,"P",{});var H_=a(Nr);Si=o(H_,`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),H_.forEach(n),qd.forEach(n),kd.forEach(n),Ja=l(d),Ge=s(d,"H2",{class:!0});var yd=a(Ge);lt=s(yd,"A",{id:!0,class:!0,href:!0});var B_=a(lt);Pr=s(B_,"SPAN",{});var Q_=a(Pr);b(Yt.$$.fragment,Q_),Q_.forEach(n),B_.forEach(n),Di=l(yd),Cr=s(yd,"SPAN",{});var U_=a(Cr);Li=o(U_,"Rag specific outputs"),U_.forEach(n),yd.forEach(n),Xa=l(d),Ae=s(d,"DIV",{class:!0});var xd=a(Ae);b(Jt.$$.fragment,xd),Ii=l(xd),Sr=s(xd,"P",{});var V_=a(Sr);Oi=o(V_,"Base class for retriever augmented marginalized models outputs."),V_.forEach(n),xd.forEach(n),Za=l(d),Xt=s(d,"DIV",{class:!0});var K_=a(Xt);b(Zt.$$.fragment,K_),K_.forEach(n),ed=l(d),Ne=s(d,"H2",{class:!0});var Rd=a(Ne);pt=s(Rd,"A",{id:!0,class:!0,href:!0});var Y_=a(pt);Dr=s(Y_,"SPAN",{});var J_=a(Dr);b(eo.$$.fragment,J_),J_.forEach(n),Y_.forEach(n),Wi=l(Rd),Lr=s(Rd,"SPAN",{});var X_=a(Lr);Hi=o(X_,"RagRetriever"),X_.forEach(n),Rd.forEach(n),td=l(d),X=s(d,"DIV",{class:!0});var ae=a(X);b(to.$$.fragment,ae),Bi=l(ae),Ir=s(ae,"P",{});var Z_=a(Ir);Qi=o(Z_,`Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel.`),Z_.forEach(n),Ui=l(ae),Or=s(ae,"P",{});var ef=a(Or);Vi=o(ef,"Examples:"),ef.forEach(n),Ki=l(ae),b(oo.$$.fragment,ae),Yi=l(ae),ht=s(ae,"DIV",{class:!0});var Fd=a(ht);b(no.$$.fragment,Fd),Ji=l(Fd),Wr=s(Fd,"P",{});var tf=a(Wr);Xi=o(tf,"Retriever initialization function. It loads the index into memory."),tf.forEach(n),Fd.forEach(n),Zi=l(ae),ut=s(ae,"DIV",{class:!0});var zd=a(ut);b(ro.$$.fragment,zd),ec=l(zd),Pe=s(zd,"P",{});var mr=a(Pe);tc=o(mr,"Postprocessing retrieved "),Hr=s(mr,"CODE",{});var of=a(Hr);oc=o(of,"docs"),of.forEach(n),nc=o(mr," and combining them with "),Br=s(mr,"CODE",{});var nf=a(Br);rc=o(nf,"input_strings"),nf.forEach(n),sc=o(mr,"."),mr.forEach(n),zd.forEach(n),ac=l(ae),mt=s(ae,"DIV",{class:!0});var $d=a(mt);b(so.$$.fragment,$d),dc=l($d),ao=s($d,"P",{});var Ed=a(ao);ic=o(Ed,"Retrieves documents for specified "),Qr=s(Ed,"CODE",{});var rf=a(Qr);cc=o(rf,"question_hidden_states"),rf.forEach(n),lc=o(Ed,"."),Ed.forEach(n),$d.forEach(n),ae.forEach(n),od=l(d),Ce=s(d,"H2",{class:!0});var jd=a(Ce);gt=s(jd,"A",{id:!0,class:!0,href:!0});var sf=a(gt);Ur=s(sf,"SPAN",{});var af=a(Ur);b(io.$$.fragment,af),af.forEach(n),sf.forEach(n),pc=l(jd),Vr=s(jd,"SPAN",{});var df=a(Vr);hc=o(df,"RagModel"),df.forEach(n),jd.forEach(n),nd=l(d),D=s(d,"DIV",{class:!0});var Z=a(D);b(co.$$.fragment,Z),uc=l(Z),Se=s(Z,"P",{});var gr=a(Se);mc=o(gr,"The "),gn=s(gr,"A",{href:!0});var cf=a(gn);gc=o(cf,"RagModel"),cf.forEach(n),_c=o(gr," forward method, overrides the "),Kr=s(gr,"CODE",{});var lf=a(Kr);fc=o(lf,"__call__"),lf.forEach(n),vc=o(gr," special method."),gr.forEach(n),bc=l(Z),b(_t.$$.fragment,Z),Tc=l(Z),Yr=s(Z,"P",{});var pf=a(Yr);wc=o(pf,`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),pf.forEach(n),kc=l(Z),ie=s(Z,"P",{});var fe=a(ie);qc=o(fe,"The question encoder can be any "),Jr=s(fe,"EM",{});var hf=a(Jr);yc=o(hf,"autoencoding"),hf.forEach(n),xc=o(fe," model, preferably "),_n=s(fe,"A",{href:!0});var uf=a(_n);Rc=o(uf,"DPRQuestionEncoder"),uf.forEach(n),Fc=o(fe,`, and the generator can be any `),Xr=s(fe,"EM",{});var mf=a(Xr);zc=o(mf,"seq2seq"),mf.forEach(n),$c=o(fe," model, preferably "),fn=s(fe,"A",{href:!0});var gf=a(fn);Ec=o(gf,"BartForConditionalGeneration"),gf.forEach(n),jc=o(fe,"."),fe.forEach(n),Mc=l(Z),G=s(Z,"P",{});var W=a(G);Gc=o(W,"The model can be initialized with a "),vn=s(W,"A",{href:!0});var _f=a(vn);Ac=o(_f,"RagRetriever"),_f.forEach(n),Nc=o(W,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Zr=s(W,"EM",{});var ff=a(Zr);Pc=o(ff,"autoencoding"),ff.forEach(n),Cc=o(W," model as the "),es=s(W,"CODE",{});var vf=a(es);Sc=o(vf,"question_encoder"),vf.forEach(n),Dc=o(W," and any "),ts=s(W,"EM",{});var bf=a(ts);Lc=o(bf,"seq2seq"),bf.forEach(n),Ic=o(W," model with language model head as the "),os=s(W,"CODE",{});var Tf=a(os);Oc=o(Tf,"generator"),Tf.forEach(n),Wc=o(W,`. It has been tested with `),bn=s(W,"A",{href:!0});var wf=a(bn);Hc=o(wf,"DPRQuestionEncoder"),wf.forEach(n),Bc=o(W," as the "),ns=s(W,"CODE",{});var kf=a(ns);Qc=o(kf,"question_encoder"),kf.forEach(n),Uc=o(W," and "),Tn=s(W,"A",{href:!0});var qf=a(Tn);Vc=o(qf,"BartForConditionalGeneration"),qf.forEach(n),Kc=o(W,` or `),wn=s(W,"A",{href:!0});var yf=a(wn);Yc=o(yf,"T5ForConditionalGeneration"),yf.forEach(n),Jc=o(W," as the "),rs=s(W,"CODE",{});var xf=a(rs);Xc=o(xf,"generator"),xf.forEach(n),Zc=o(W,"."),W.forEach(n),el=l(Z),lo=s(Z,"P",{});var Md=a(lo);tl=o(Md,"This model inherits from "),kn=s(Md,"A",{href:!0});var Rf=a(kn);ol=o(Rf,"PreTrainedModel"),Rf.forEach(n),nl=o(Md,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Md.forEach(n),rl=l(Z),po=s(Z,"P",{});var Gd=a(po);sl=o(Gd,"This model is also a PyTorch "),ho=s(Gd,"A",{href:!0,rel:!0});var Ff=a(ho);al=o(Ff,"torch.nn.Module"),Ff.forEach(n),dl=o(Gd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gd.forEach(n),il=l(Z),ee=s(Z,"DIV",{class:!0});var ve=a(ee);b(uo.$$.fragment,ve),cl=l(ve),De=s(ve,"P",{});var _r=a(De);ll=o(_r,"The "),qn=s(_r,"A",{href:!0});var zf=a(qn);pl=o(zf,"RagModel"),zf.forEach(n),hl=o(_r," forward method, overrides the "),ss=s(_r,"CODE",{});var $f=a(ss);ul=o($f,"__call__"),$f.forEach(n),ml=o(_r," special method."),_r.forEach(n),gl=l(ve),b(ft.$$.fragment,ve),_l=l(ve),as=s(ve,"P",{});var Ef=a(as);fl=o(Ef,"Example:"),Ef.forEach(n),vl=l(ve),b(mo.$$.fragment,ve),ve.forEach(n),Z.forEach(n),rd=l(d),Le=s(d,"H2",{class:!0});var Ad=a(Le);vt=s(Ad,"A",{id:!0,class:!0,href:!0});var jf=a(vt);ds=s(jf,"SPAN",{});var Mf=a(ds);b(go.$$.fragment,Mf),Mf.forEach(n),jf.forEach(n),bl=l(Ad),is=s(Ad,"SPAN",{});var Gf=a(is);Tl=o(Gf,"RagSequenceForGeneration"),Gf.forEach(n),Ad.forEach(n),sd=l(d),E=s(d,"DIV",{class:!0});var H=a(E);b(_o.$$.fragment,H),wl=l(H),Ie=s(H,"P",{});var fr=a(Ie);kl=o(fr,"The "),yn=s(fr,"A",{href:!0});var Af=a(yn);ql=o(Af,"RagSequenceForGeneration"),Af.forEach(n),yl=o(fr," forward method, overrides the "),cs=s(fr,"CODE",{});var Nf=a(cs);xl=o(Nf,"__call__"),Nf.forEach(n),Rl=o(fr," special method."),fr.forEach(n),Fl=l(H),b(bt.$$.fragment,H),zl=l(H),ls=s(H,"P",{});var Pf=a(ls);$l=o(Pf,"A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),Pf.forEach(n),El=l(H),ps=s(H,"P",{});var Cf=a(ps);jl=o(Cf,`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Cf.forEach(n),Ml=l(H),ce=s(H,"P",{});var be=a(ce);Gl=o(be,"The question encoder can be any "),hs=s(be,"EM",{});var Sf=a(hs);Al=o(Sf,"autoencoding"),Sf.forEach(n),Nl=o(be," model, preferably "),xn=s(be,"A",{href:!0});var Df=a(xn);Pl=o(Df,"DPRQuestionEncoder"),Df.forEach(n),Cl=o(be,`, and the generator can be any `),us=s(be,"EM",{});var Lf=a(us);Sl=o(Lf,"seq2seq"),Lf.forEach(n),Dl=o(be," model, preferably "),Rn=s(be,"A",{href:!0});var If=a(Rn);Ll=o(If,"BartForConditionalGeneration"),If.forEach(n),Il=o(be,"."),be.forEach(n),Ol=l(H),A=s(H,"P",{});var B=a(A);Wl=o(B,"The model can be initialized with a "),Fn=s(B,"A",{href:!0});var Of=a(Fn);Hl=o(Of,"RagRetriever"),Of.forEach(n),Bl=o(B,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),ms=s(B,"EM",{});var Wf=a(ms);Ql=o(Wf,"autoencoding"),Wf.forEach(n),Ul=o(B," model as the "),gs=s(B,"CODE",{});var Hf=a(gs);Vl=o(Hf,"question_encoder"),Hf.forEach(n),Kl=o(B," and any "),_s=s(B,"EM",{});var Bf=a(_s);Yl=o(Bf,"seq2seq"),Bf.forEach(n),Jl=o(B," model with language model head as the "),fs=s(B,"CODE",{});var Qf=a(fs);Xl=o(Qf,"generator"),Qf.forEach(n),Zl=o(B,`. It has been tested with `),zn=s(B,"A",{href:!0});var Uf=a(zn);ep=o(Uf,"DPRQuestionEncoder"),Uf.forEach(n),tp=o(B," as the "),vs=s(B,"CODE",{});var Vf=a(vs);op=o(Vf,"question_encoder"),Vf.forEach(n),np=o(B," and "),$n=s(B,"A",{href:!0});var Kf=a($n);rp=o(Kf,"BartForConditionalGeneration"),Kf.forEach(n),sp=o(B,` or `),En=s(B,"A",{href:!0});var Yf=a(En);ap=o(Yf,"T5ForConditionalGeneration"),Yf.forEach(n),dp=o(B," as the "),bs=s(B,"CODE",{});var Jf=a(bs);ip=o(Jf,"generator"),Jf.forEach(n),cp=o(B,"."),B.forEach(n),lp=l(H),fo=s(H,"P",{});var Nd=a(fo);pp=o(Nd,"This model inherits from "),jn=s(Nd,"A",{href:!0});var Xf=a(jn);hp=o(Xf,"PreTrainedModel"),Xf.forEach(n),up=o(Nd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nd.forEach(n),mp=l(H),vo=s(H,"P",{});var Pd=a(vo);gp=o(Pd,"This model is also a PyTorch "),bo=s(Pd,"A",{href:!0,rel:!0});var Zf=a(bo);_p=o(Zf,"torch.nn.Module"),Zf.forEach(n),fp=o(Pd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pd.forEach(n),vp=l(H),te=s(H,"DIV",{class:!0});var Te=a(te);b(To.$$.fragment,Te),bp=l(Te),Oe=s(Te,"P",{});var vr=a(Oe);Tp=o(vr,"The "),Mn=s(vr,"A",{href:!0});var ev=a(Mn);wp=o(ev,"RagSequenceForGeneration"),ev.forEach(n),kp=o(vr," forward method, overrides the "),Ts=s(vr,"CODE",{});var tv=a(Ts);qp=o(tv,"__call__"),tv.forEach(n),yp=o(vr," special method."),vr.forEach(n),xp=l(Te),b(Tt.$$.fragment,Te),Rp=l(Te),ws=s(Te,"P",{});var ov=a(ws);Fp=o(ov,"Example:"),ov.forEach(n),zp=l(Te),b(wo.$$.fragment,Te),Te.forEach(n),$p=l(H),wt=s(H,"DIV",{class:!0});var Cd=a(wt);b(ko.$$.fragment,Cd),Ep=l(Cd),qo=s(Cd,"P",{});var Sd=a(qo);jp=o(Sd,"Implements RAG sequence \u201Cthorough\u201D decoding. Read the "),Gn=s(Sd,"A",{href:!0});var nv=a(Gn);Mp=o(nv,"generate()"),nv.forEach(n),Gp=o(Sd,"`\ndocumentation for more information on how to set other generate input parameters."),Sd.forEach(n),Cd.forEach(n),H.forEach(n),ad=l(d),We=s(d,"H2",{class:!0});var Dd=a(We);kt=s(Dd,"A",{id:!0,class:!0,href:!0});var rv=a(kt);ks=s(rv,"SPAN",{});var sv=a(ks);b(yo.$$.fragment,sv),sv.forEach(n),rv.forEach(n),Ap=l(Dd),qs=s(Dd,"SPAN",{});var av=a(qs);Np=o(av,"RagTokenForGeneration"),av.forEach(n),Dd.forEach(n),dd=l(d),j=s(d,"DIV",{class:!0});var Q=a(j);b(xo.$$.fragment,Q),Pp=l(Q),He=s(Q,"P",{});var br=a(He);Cp=o(br,"The "),An=s(br,"A",{href:!0});var dv=a(An);Sp=o(dv,"RagTokenForGeneration"),dv.forEach(n),Dp=o(br," forward method, overrides the "),ys=s(br,"CODE",{});var iv=a(ys);Lp=o(iv,"__call__"),iv.forEach(n),Ip=o(br," special method."),br.forEach(n),Op=l(Q),b(qt.$$.fragment,Q),Wp=l(Q),xs=s(Q,"P",{});var cv=a(xs);Hp=o(cv,"A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),cv.forEach(n),Bp=l(Q),Rs=s(Q,"P",{});var lv=a(Rs);Qp=o(lv,`RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),lv.forEach(n),Up=l(Q),le=s(Q,"P",{});var we=a(le);Vp=o(we,"The question encoder can be any "),Fs=s(we,"EM",{});var pv=a(Fs);Kp=o(pv,"autoencoding"),pv.forEach(n),Yp=o(we," model, preferably "),Nn=s(we,"A",{href:!0});var hv=a(Nn);Jp=o(hv,"DPRQuestionEncoder"),hv.forEach(n),Xp=o(we,`, and the generator can be any `),zs=s(we,"EM",{});var uv=a(zs);Zp=o(uv,"seq2seq"),uv.forEach(n),eh=o(we," model, preferably "),Pn=s(we,"A",{href:!0});var mv=a(Pn);th=o(mv,"BartForConditionalGeneration"),mv.forEach(n),oh=o(we,"."),we.forEach(n),nh=l(Q),N=s(Q,"P",{});var U=a(N);rh=o(U,"The model can be initialized with a "),Cn=s(U,"A",{href:!0});var gv=a(Cn);sh=o(gv,"RagRetriever"),gv.forEach(n),ah=o(U,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),$s=s(U,"EM",{});var _v=a($s);dh=o(_v,"autoencoding"),_v.forEach(n),ih=o(U," model as the "),Es=s(U,"CODE",{});var fv=a(Es);ch=o(fv,"question_encoder"),fv.forEach(n),lh=o(U," and any "),js=s(U,"EM",{});var vv=a(js);ph=o(vv,"seq2seq"),vv.forEach(n),hh=o(U," model with language model head as the "),Ms=s(U,"CODE",{});var bv=a(Ms);uh=o(bv,"generator"),bv.forEach(n),mh=o(U,`. It has been tested with `),Sn=s(U,"A",{href:!0});var Tv=a(Sn);gh=o(Tv,"DPRQuestionEncoder"),Tv.forEach(n),_h=o(U," as the "),Gs=s(U,"CODE",{});var wv=a(Gs);fh=o(wv,"question_encoder"),wv.forEach(n),vh=o(U," and "),Dn=s(U,"A",{href:!0});var kv=a(Dn);bh=o(kv,"BartForConditionalGeneration"),kv.forEach(n),Th=o(U,` or `),Ln=s(U,"A",{href:!0});var qv=a(Ln);wh=o(qv,"T5ForConditionalGeneration"),qv.forEach(n),kh=o(U," as the "),As=s(U,"CODE",{});var yv=a(As);qh=o(yv,"generator"),yv.forEach(n),yh=o(U,"."),U.forEach(n),xh=l(Q),Ro=s(Q,"P",{});var Ld=a(Ro);Rh=o(Ld,"This model inherits from "),In=s(Ld,"A",{href:!0});var xv=a(In);Fh=o(xv,"PreTrainedModel"),xv.forEach(n),zh=o(Ld,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ld.forEach(n),$h=l(Q),Fo=s(Q,"P",{});var Id=a(Fo);Eh=o(Id,"This model is also a PyTorch "),zo=s(Id,"A",{href:!0,rel:!0});var Rv=a(zo);jh=o(Rv,"torch.nn.Module"),Rv.forEach(n),Mh=o(Id,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Id.forEach(n),Gh=l(Q),oe=s(Q,"DIV",{class:!0});var ke=a(oe);b($o.$$.fragment,ke),Ah=l(ke),Be=s(ke,"P",{});var Tr=a(Be);Nh=o(Tr,"The "),On=s(Tr,"A",{href:!0});var Fv=a(On);Ph=o(Fv,"RagTokenForGeneration"),Fv.forEach(n),Ch=o(Tr," forward method, overrides the "),Ns=s(Tr,"CODE",{});var zv=a(Ns);Sh=o(zv,"__call__"),zv.forEach(n),Dh=o(Tr," special method."),Tr.forEach(n),Lh=l(ke),b(yt.$$.fragment,ke),Ih=l(ke),Ps=s(ke,"P",{});var $v=a(Ps);Oh=o($v,"Example:"),$v.forEach(n),Wh=l(ke),b(Eo.$$.fragment,ke),ke.forEach(n),Hh=l(Q),xt=s(Q,"DIV",{class:!0});var Od=a(xt);b(jo.$$.fragment,Od),Bh=l(Od),Cs=s(Od,"P",{});var Ev=a(Cs);Qh=o(Ev,"Implements RAG token decoding."),Ev.forEach(n),Od.forEach(n),Q.forEach(n),id=l(d),Qe=s(d,"H2",{class:!0});var Wd=a(Qe);Rt=s(Wd,"A",{id:!0,class:!0,href:!0});var jv=a(Rt);Ss=s(jv,"SPAN",{});var Mv=a(Ss);b(Mo.$$.fragment,Mv),Mv.forEach(n),jv.forEach(n),Uh=l(Wd),Ds=s(Wd,"SPAN",{});var Gv=a(Ds);Vh=o(Gv,"TFRagModel"),Gv.forEach(n),Wd.forEach(n),cd=l(d),P=s(d,"DIV",{class:!0});var V=a(P);b(Go.$$.fragment,V),Kh=l(V),Ue=s(V,"P",{});var wr=a(Ue);Yh=o(wr,"The "),Wn=s(wr,"A",{href:!0});var Av=a(Wn);Jh=o(Av,"TFRagModel"),Av.forEach(n),Xh=o(wr," forward method, overrides the "),Ls=s(wr,"CODE",{});var Nv=a(Ls);Zh=o(Nv,"__call__"),Nv.forEach(n),eu=o(wr," special method."),wr.forEach(n),tu=l(V),b(Ft.$$.fragment,V),ou=l(V),Is=s(V,"P",{});var Pv=a(Is);nu=o(Pv,`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Pv.forEach(n),ru=l(V),pe=s(V,"P",{});var qe=a(pe);su=o(qe,"The question encoder can be any "),Os=s(qe,"EM",{});var Cv=a(Os);au=o(Cv,"autoencoding"),Cv.forEach(n),du=o(qe," model, preferably "),Hn=s(qe,"A",{href:!0});var Sv=a(Hn);iu=o(Sv,"TFDPRQuestionEncoder"),Sv.forEach(n),cu=o(qe,`, and the generator can be any `),Ws=s(qe,"EM",{});var Dv=a(Ws);lu=o(Dv,"seq2seq"),Dv.forEach(n),pu=o(qe," model, preferably "),Bn=s(qe,"A",{href:!0});var Lv=a(Bn);hu=o(Lv,"TFBartForConditionalGeneration"),Lv.forEach(n),uu=o(qe,"."),qe.forEach(n),mu=l(V),L=s(V,"P",{});var K=a(L);gu=o(K,"The model can be initialized with a "),Qn=s(K,"A",{href:!0});var Iv=a(Qn);_u=o(Iv,"RagRetriever"),Iv.forEach(n),fu=o(K,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),Hs=s(K,"EM",{});var Ov=a(Hs);vu=o(Ov,"autoencoding"),Ov.forEach(n),bu=o(K," model as the "),Bs=s(K,"CODE",{});var Wv=a(Bs);Tu=o(Wv,"question_encoder"),Wv.forEach(n),wu=o(K," and any "),Qs=s(K,"EM",{});var Hv=a(Qs);ku=o(Hv,"seq2seq"),Hv.forEach(n),qu=o(K," model with language model head as the "),Us=s(K,"CODE",{});var Bv=a(Us);yu=o(Bv,"generator"),Bv.forEach(n),xu=o(K,`. It has been tested with `),Un=s(K,"A",{href:!0});var Qv=a(Un);Ru=o(Qv,"TFDPRQuestionEncoder"),Qv.forEach(n),Fu=o(K," as the "),Vs=s(K,"CODE",{});var Uv=a(Vs);zu=o(Uv,"question_encoder"),Uv.forEach(n),$u=o(K," and "),Vn=s(K,"A",{href:!0});var Vv=a(Vn);Eu=o(Vv,"TFBartForConditionalGeneration"),Vv.forEach(n),ju=o(K,` as the `),Ks=s(K,"CODE",{});var Kv=a(Ks);Mu=o(Kv,"generator"),Kv.forEach(n),Gu=o(K,"."),K.forEach(n),Au=l(V),Ao=s(V,"P",{});var Hd=a(Ao);Nu=o(Hd,"This model inherits from "),Kn=s(Hd,"A",{href:!0});var Yv=a(Kn);Pu=o(Yv,"TFPreTrainedModel"),Yv.forEach(n),Cu=o(Hd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Hd.forEach(n),Su=l(V),No=s(V,"P",{});var Bd=a(No);Du=o(Bd,"This model is also a Tensorflow "),Po=s(Bd,"A",{href:!0,rel:!0});var Jv=a(Po);Lu=o(Jv,"tf.keras.Model"),Jv.forEach(n),Iu=o(Bd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Bd.forEach(n),Ou=l(V),Ys=s(V,"P",{});var Xv=a(Ys);Wu=o(Xv,`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),Xv.forEach(n),Hu=l(V),ne=s(V,"DIV",{class:!0});var ye=a(ne);b(Co.$$.fragment,ye),Bu=l(ye),Ve=s(ye,"P",{});var kr=a(Ve);Qu=o(kr,"The "),Yn=s(kr,"A",{href:!0});var Zv=a(Yn);Uu=o(Zv,"TFRagModel"),Zv.forEach(n),Vu=o(kr," forward method, overrides the "),Js=s(kr,"CODE",{});var eb=a(Js);Ku=o(eb,"__call__"),eb.forEach(n),Yu=o(kr," special method."),kr.forEach(n),Ju=l(ye),b(zt.$$.fragment,ye),Xu=l(ye),Xs=s(ye,"P",{});var tb=a(Xs);Zu=o(tb,"Example:"),tb.forEach(n),em=l(ye),b(So.$$.fragment,ye),ye.forEach(n),V.forEach(n),ld=l(d),Ke=s(d,"H2",{class:!0});var Qd=a(Ke);$t=s(Qd,"A",{id:!0,class:!0,href:!0});var ob=a($t);Zs=s(ob,"SPAN",{});var nb=a(Zs);b(Do.$$.fragment,nb),nb.forEach(n),ob.forEach(n),tm=l(Qd),ea=s(Qd,"SPAN",{});var rb=a(ea);om=o(rb,"TFRagSequenceForGeneration"),rb.forEach(n),Qd.forEach(n),pd=l(d),F=s(d,"DIV",{class:!0});var C=a(F);b(Lo.$$.fragment,C),nm=l(C),Ye=s(C,"P",{});var qr=a(Ye);rm=o(qr,"The "),Jn=s(qr,"A",{href:!0});var sb=a(Jn);sm=o(sb,"TFRagSequenceForGeneration"),sb.forEach(n),am=o(qr," forward method, overrides the "),ta=s(qr,"CODE",{});var ab=a(ta);dm=o(ab,"__call__"),ab.forEach(n),im=o(qr," special method."),qr.forEach(n),cm=l(C),b(Et.$$.fragment,C),lm=l(C),oa=s(C,"P",{});var db=a(oa);pm=o(db,"A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass."),db.forEach(n),hm=l(C),na=s(C,"P",{});var ib=a(na);um=o(ib,`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),ib.forEach(n),mm=l(C),he=s(C,"P",{});var xe=a(he);gm=o(xe,"The question encoder can be any "),ra=s(xe,"EM",{});var cb=a(ra);_m=o(cb,"autoencoding"),cb.forEach(n),fm=o(xe," model, preferably "),Xn=s(xe,"A",{href:!0});var lb=a(Xn);vm=o(lb,"TFDPRQuestionEncoder"),lb.forEach(n),bm=o(xe,`, and the generator can be any `),sa=s(xe,"EM",{});var pb=a(sa);Tm=o(pb,"seq2seq"),pb.forEach(n),wm=o(xe," model, preferably "),Zn=s(xe,"A",{href:!0});var hb=a(Zn);km=o(hb,"TFBartForConditionalGeneration"),hb.forEach(n),qm=o(xe,"."),xe.forEach(n),ym=l(C),I=s(C,"P",{});var Y=a(I);xm=o(Y,"The model can be initialized with a "),er=s(Y,"A",{href:!0});var ub=a(er);Rm=o(ub,"RagRetriever"),ub.forEach(n),Fm=o(Y,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),aa=s(Y,"EM",{});var mb=a(aa);zm=o(mb,"autoencoding"),mb.forEach(n),$m=o(Y," model as the "),da=s(Y,"CODE",{});var gb=a(da);Em=o(gb,"question_encoder"),gb.forEach(n),jm=o(Y," and any "),ia=s(Y,"EM",{});var _b=a(ia);Mm=o(_b,"seq2seq"),_b.forEach(n),Gm=o(Y," model with language model head as the "),ca=s(Y,"CODE",{});var fb=a(ca);Am=o(fb,"generator"),fb.forEach(n),Nm=o(Y,`. It has been tested with `),tr=s(Y,"A",{href:!0});var vb=a(tr);Pm=o(vb,"TFDPRQuestionEncoder"),vb.forEach(n),Cm=o(Y," as the "),la=s(Y,"CODE",{});var bb=a(la);Sm=o(bb,"question_encoder"),bb.forEach(n),Dm=o(Y," and "),or=s(Y,"A",{href:!0});var Tb=a(or);Lm=o(Tb,"TFBartForConditionalGeneration"),Tb.forEach(n),Im=o(Y,` as the `),pa=s(Y,"CODE",{});var wb=a(pa);Om=o(wb,"generator"),wb.forEach(n),Wm=o(Y,"."),Y.forEach(n),Hm=l(C),Io=s(C,"P",{});var Ud=a(Io);Bm=o(Ud,"This model inherits from "),nr=s(Ud,"A",{href:!0});var kb=a(nr);Qm=o(kb,"TFPreTrainedModel"),kb.forEach(n),Um=o(Ud,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ud.forEach(n),Vm=l(C),Oo=s(C,"P",{});var Vd=a(Oo);Km=o(Vd,"This model is also a Tensorflow "),Wo=s(Vd,"A",{href:!0,rel:!0});var qb=a(Wo);Ym=o(qb,"tf.keras.Model"),qb.forEach(n),Jm=o(Vd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Vd.forEach(n),Xm=l(C),ha=s(C,"P",{});var yb=a(ha);Zm=o(yb,`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),yb.forEach(n),eg=l(C),re=s(C,"DIV",{class:!0});var Re=a(re);b(Ho.$$.fragment,Re),tg=l(Re),Je=s(Re,"P",{});var yr=a(Je);og=o(yr,"The "),rr=s(yr,"A",{href:!0});var xb=a(rr);ng=o(xb,"TFRagSequenceForGeneration"),xb.forEach(n),rg=o(yr," forward method, overrides the "),ua=s(yr,"CODE",{});var Rb=a(ua);sg=o(Rb,"__call__"),Rb.forEach(n),ag=o(yr," special method."),yr.forEach(n),dg=l(Re),b(jt.$$.fragment,Re),ig=l(Re),ma=s(Re,"P",{});var Fb=a(ma);cg=o(Fb,"Example:"),Fb.forEach(n),lg=l(Re),b(Bo.$$.fragment,Re),Re.forEach(n),pg=l(C),Mt=s(C,"DIV",{class:!0});var Kd=a(Mt);b(Qo.$$.fragment,Kd),hg=l(Kd),Uo=s(Kd,"P",{});var Yd=a(Uo);ug=o(Yd,"Implements RAG sequence \u201Cthorough\u201D decoding. Read the "),sr=s(Yd,"A",{href:!0});var zb=a(sr);mg=o(zb,"generate()"),zb.forEach(n),gg=o(Yd,"`\ndocumentation for more information on how to set other generate input parameters"),Yd.forEach(n),Kd.forEach(n),C.forEach(n),hd=l(d),Xe=s(d,"H2",{class:!0});var Jd=a(Xe);Gt=s(Jd,"A",{id:!0,class:!0,href:!0});var $b=a(Gt);ga=s($b,"SPAN",{});var Eb=a(ga);b(Vo.$$.fragment,Eb),Eb.forEach(n),$b.forEach(n),_g=l(Jd),_a=s(Jd,"SPAN",{});var jb=a(_a);fg=o(jb,"TFRagTokenForGeneration"),jb.forEach(n),Jd.forEach(n),ud=l(d),z=s(d,"DIV",{class:!0});var S=a(z);b(Ko.$$.fragment,S),vg=l(S),Ze=s(S,"P",{});var xr=a(Ze);bg=o(xr,"The "),ar=s(xr,"A",{href:!0});var Mb=a(ar);Tg=o(Mb,"TFRagTokenForGeneration"),Mb.forEach(n),wg=o(xr," forward method, overrides the "),fa=s(xr,"CODE",{});var Gb=a(fa);kg=o(Gb,"__call__"),Gb.forEach(n),qg=o(xr," special method."),xr.forEach(n),yg=l(S),b(At.$$.fragment,S),xg=l(S),va=s(S,"P",{});var Ab=a(va);Rg=o(Ab,"A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass."),Ab.forEach(n),Fg=l(S),ba=s(S,"P",{});var Nb=a(ba);zg=o(Nb,`RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.`),Nb.forEach(n),$g=l(S),ue=s(S,"P",{});var Fe=a(ue);Eg=o(Fe,"The question encoder can be any "),Ta=s(Fe,"EM",{});var Pb=a(Ta);jg=o(Pb,"autoencoding"),Pb.forEach(n),Mg=o(Fe," model, preferably "),dr=s(Fe,"A",{href:!0});var Cb=a(dr);Gg=o(Cb,"TFDPRQuestionEncoder"),Cb.forEach(n),Ag=o(Fe,`, and the generator can be any `),wa=s(Fe,"EM",{});var Sb=a(wa);Ng=o(Sb,"seq2seq"),Sb.forEach(n),Pg=o(Fe," model, preferably "),ir=s(Fe,"A",{href:!0});var Db=a(ir);Cg=o(Db,"TFBartForConditionalGeneration"),Db.forEach(n),Sg=o(Fe,"."),Fe.forEach(n),Dg=l(S),O=s(S,"P",{});var J=a(O);Lg=o(J,"The model can be initialized with a "),cr=s(J,"A",{href:!0});var Lb=a(cr);Ig=o(Lb,"RagRetriever"),Lb.forEach(n),Og=o(J,` for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any `),ka=s(J,"EM",{});var Ib=a(ka);Wg=o(Ib,"autoencoding"),Ib.forEach(n),Hg=o(J," model as the "),qa=s(J,"CODE",{});var Ob=a(qa);Bg=o(Ob,"question_encoder"),Ob.forEach(n),Qg=o(J," and any "),ya=s(J,"EM",{});var Wb=a(ya);Ug=o(Wb,"seq2seq"),Wb.forEach(n),Vg=o(J," model with language model head as the "),xa=s(J,"CODE",{});var Hb=a(xa);Kg=o(Hb,"generator"),Hb.forEach(n),Yg=o(J,`. It has been tested with `),lr=s(J,"A",{href:!0});var Bb=a(lr);Jg=o(Bb,"TFDPRQuestionEncoder"),Bb.forEach(n),Xg=o(J," as the "),Ra=s(J,"CODE",{});var Qb=a(Ra);Zg=o(Qb,"question_encoder"),Qb.forEach(n),e_=o(J," and "),pr=s(J,"A",{href:!0});var Ub=a(pr);t_=o(Ub,"TFBartForConditionalGeneration"),Ub.forEach(n),o_=o(J,` as the `),Fa=s(J,"CODE",{});var Vb=a(Fa);n_=o(Vb,"generator"),Vb.forEach(n),r_=o(J,"."),J.forEach(n),s_=l(S),Yo=s(S,"P",{});var Xd=a(Yo);a_=o(Xd,"This model inherits from "),hr=s(Xd,"A",{href:!0});var Kb=a(hr);d_=o(Kb,"TFPreTrainedModel"),Kb.forEach(n),i_=o(Xd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xd.forEach(n),c_=l(S),Jo=s(S,"P",{});var Zd=a(Jo);l_=o(Zd,"This model is also a Tensorflow "),Xo=s(Zd,"A",{href:!0,rel:!0});var Yb=a(Xo);p_=o(Yb,"tf.keras.Model"),Yb.forEach(n),h_=o(Zd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Zd.forEach(n),u_=l(S),za=s(S,"P",{});var Jb=a(za);m_=o(Jb,`The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format.`),Jb.forEach(n),g_=l(S),se=s(S,"DIV",{class:!0});var ze=a(se);b(Zo.$$.fragment,ze),__=l(ze),et=s(ze,"P",{});var Rr=a(et);f_=o(Rr,"The "),ur=s(Rr,"A",{href:!0});var Xb=a(ur);v_=o(Xb,"TFRagTokenForGeneration"),Xb.forEach(n),b_=o(Rr," forward method, overrides the "),$a=s(Rr,"CODE",{});var Zb=a($a);T_=o(Zb,"__call__"),Zb.forEach(n),w_=o(Rr," special method."),Rr.forEach(n),k_=l(ze),b(Nt.$$.fragment,ze),q_=l(ze),Ea=s(ze,"P",{});var eT=a(Ea);y_=o(eT,"Example:"),eT.forEach(n),x_=l(ze),b(en.$$.fragment,ze),ze.forEach(n),R_=l(S),Pt=s(S,"DIV",{class:!0});var ei=a(Pt);b(tn.$$.fragment,ei),F_=l(ei),ja=s(ei,"P",{});var tT=a(ja);z_=o(tT,"Implements TFRAG token decoding."),tT.forEach(n),ei.forEach(n),S.forEach(n),this.h()},h(){i(p,"name","hf:doc:metadata"),i(p,"content",JSON.stringify(bT)),i(f,"id","rag"),i(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(f,"href","#rag"),i(u,"class","relative group"),i(ot,"id","overview"),i(ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(ot,"href","#overview"),i($e,"class","relative group"),i(Dt,"href","https://arxiv.org/abs/2005.11401"),i(Dt,"rel","nofollow"),i(Lt,"href","https://huggingface.co/ola13"),i(Lt,"rel","nofollow"),i(st,"id","transformers.RagConfig"),i(st,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(st,"href","#transformers.RagConfig"),i(Ee,"class","relative group"),i(ln,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig"),i(pn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),i(hn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),i(un,"href","/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderConfig"),i(at,"class","docstring"),i(mn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.to_dict"),i(dt,"class","docstring"),i(de,"class","docstring"),i(it,"id","transformers.RagTokenizer"),i(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(it,"href","#transformers.RagTokenizer"),i(je,"class","relative group"),i(ct,"class","docstring"),i(Me,"class","docstring"),i(lt,"id","transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput"),i(lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(lt,"href","#transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput"),i(Ge,"class","relative group"),i(Ae,"class","docstring"),i(Xt,"class","docstring"),i(pt,"id","transformers.RagRetriever"),i(pt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(pt,"href","#transformers.RagRetriever"),i(Ne,"class","relative group"),i(ht,"class","docstring"),i(ut,"class","docstring"),i(mt,"class","docstring"),i(X,"class","docstring"),i(gt,"id","transformers.RagModel"),i(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(gt,"href","#transformers.RagModel"),i(Ce,"class","relative group"),i(gn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagModel"),i(_n,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(fn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(vn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever"),i(bn,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(Tn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(wn,"href","/docs/transformers/pr_16143/en/model_doc/t5#transformers.T5ForConditionalGeneration"),i(kn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),i(ho,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(ho,"rel","nofollow"),i(qn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagModel"),i(ee,"class","docstring"),i(D,"class","docstring"),i(vt,"id","transformers.RagSequenceForGeneration"),i(vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(vt,"href","#transformers.RagSequenceForGeneration"),i(Le,"class","relative group"),i(yn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagSequenceForGeneration"),i(xn,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(Rn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(Fn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever"),i(zn,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i($n,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(En,"href","/docs/transformers/pr_16143/en/model_doc/t5#transformers.T5ForConditionalGeneration"),i(jn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),i(bo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(bo,"rel","nofollow"),i(Mn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagSequenceForGeneration"),i(te,"class","docstring"),i(Gn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),i(wt,"class","docstring"),i(E,"class","docstring"),i(kt,"id","transformers.RagTokenForGeneration"),i(kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(kt,"href","#transformers.RagTokenForGeneration"),i(We,"class","relative group"),i(An,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration"),i(Nn,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(Pn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(Cn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever"),i(Sn,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.DPRQuestionEncoder"),i(Dn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),i(Ln,"href","/docs/transformers/pr_16143/en/model_doc/t5#transformers.T5ForConditionalGeneration"),i(In,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),i(zo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(zo,"rel","nofollow"),i(On,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagTokenForGeneration"),i(oe,"class","docstring"),i(xt,"class","docstring"),i(j,"class","docstring"),i(Rt,"id","transformers.TFRagModel"),i(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Rt,"href","#transformers.TFRagModel"),i(Qe,"class","relative group"),i(Wn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagModel"),i(Hn,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(Bn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(Qn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever"),i(Un,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(Vn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(Kn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),i(Po,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),i(Po,"rel","nofollow"),i(Yn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagModel"),i(ne,"class","docstring"),i(P,"class","docstring"),i($t,"id","transformers.TFRagSequenceForGeneration"),i($t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i($t,"href","#transformers.TFRagSequenceForGeneration"),i(Ke,"class","relative group"),i(Jn,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagSequenceForGeneration"),i(Xn,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(Zn,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(er,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever"),i(tr,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(or,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(nr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),i(Wo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),i(Wo,"rel","nofollow"),i(rr,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagSequenceForGeneration"),i(re,"class","docstring"),i(sr,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),i(Mt,"class","docstring"),i(F,"class","docstring"),i(Gt,"id","transformers.TFRagTokenForGeneration"),i(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Gt,"href","#transformers.TFRagTokenForGeneration"),i(Xe,"class","relative group"),i(ar,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagTokenForGeneration"),i(dr,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(ir,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(cr,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagRetriever"),i(lr,"href","/docs/transformers/pr_16143/en/model_doc/dpr#transformers.TFDPRQuestionEncoder"),i(pr,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.TFBartForConditionalGeneration"),i(hr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),i(Xo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),i(Xo,"rel","nofollow"),i(ur,"href","/docs/transformers/pr_16143/en/model_doc/rag#transformers.TFRagTokenForGeneration"),i(se,"class","docstring"),i(Pt,"class","docstring"),i(z,"class","docstring")},m(d,m){e(document.head,p),_(d,y,m),_(d,u,m),e(u,f),e(f,x),T(g,x,null),e(u,h),e(u,R),e(R,ti),_(d,Ia,m),_(d,$e,m),e($e,ot),e(ot,Fr),T(St,Fr,null),e($e,oi),e($e,zr),e(zr,ni),_(d,Oa,m),_(d,an,m),e(an,ri),_(d,Wa,m),_(d,nt,m),e(nt,si),e(nt,Dt),e(Dt,ai),e(nt,di),_(d,Ha,m),_(d,dn,m),e(dn,ii),_(d,Ba,m),_(d,cn,m),e(cn,$r),e($r,ci),_(d,Qa,m),_(d,rt,m),e(rt,li),e(rt,Lt),e(Lt,pi),e(rt,hi),_(d,Ua,m),_(d,Ee,m),e(Ee,st),e(st,Er),T(It,Er,null),e(Ee,ui),e(Ee,jr),e(jr,mi),_(d,Va,m),_(d,de,m),T(Ot,de,null),e(de,gi),e(de,me),e(me,ln),e(ln,_i),e(me,fi),e(me,Mr),e(Mr,vi),e(me,bi),e(me,pn),e(pn,Ti),e(me,wi),e(me,hn),e(hn,ki),e(me,qi),e(de,yi),e(de,at),T(Wt,at,null),e(at,xi),e(at,Ht),e(Ht,Ri),e(Ht,un),e(un,Fi),e(Ht,zi),e(de,$i),e(de,dt),T(Bt,dt,null),e(dt,Ei),e(dt,Qt),e(Qt,ji),e(Qt,mn),e(mn,Mi),e(Qt,Gi),_(d,Ka,m),_(d,je,m),e(je,it),e(it,Gr),T(Ut,Gr,null),e(je,Ai),e(je,Ar),e(Ar,Ni),_(d,Ya,m),_(d,Me,m),T(Vt,Me,null),e(Me,Pi),e(Me,ct),T(Kt,ct,null),e(ct,Ci),e(ct,Nr),e(Nr,Si),_(d,Ja,m),_(d,Ge,m),e(Ge,lt),e(lt,Pr),T(Yt,Pr,null),e(Ge,Di),e(Ge,Cr),e(Cr,Li),_(d,Xa,m),_(d,Ae,m),T(Jt,Ae,null),e(Ae,Ii),e(Ae,Sr),e(Sr,Oi),_(d,Za,m),_(d,Xt,m),T(Zt,Xt,null),_(d,ed,m),_(d,Ne,m),e(Ne,pt),e(pt,Dr),T(eo,Dr,null),e(Ne,Wi),e(Ne,Lr),e(Lr,Hi),_(d,td,m),_(d,X,m),T(to,X,null),e(X,Bi),e(X,Ir),e(Ir,Qi),e(X,Ui),e(X,Or),e(Or,Vi),e(X,Ki),T(oo,X,null),e(X,Yi),e(X,ht),T(no,ht,null),e(ht,Ji),e(ht,Wr),e(Wr,Xi),e(X,Zi),e(X,ut),T(ro,ut,null),e(ut,ec),e(ut,Pe),e(Pe,tc),e(Pe,Hr),e(Hr,oc),e(Pe,nc),e(Pe,Br),e(Br,rc),e(Pe,sc),e(X,ac),e(X,mt),T(so,mt,null),e(mt,dc),e(mt,ao),e(ao,ic),e(ao,Qr),e(Qr,cc),e(ao,lc),_(d,od,m),_(d,Ce,m),e(Ce,gt),e(gt,Ur),T(io,Ur,null),e(Ce,pc),e(Ce,Vr),e(Vr,hc),_(d,nd,m),_(d,D,m),T(co,D,null),e(D,uc),e(D,Se),e(Se,mc),e(Se,gn),e(gn,gc),e(Se,_c),e(Se,Kr),e(Kr,fc),e(Se,vc),e(D,bc),T(_t,D,null),e(D,Tc),e(D,Yr),e(Yr,wc),e(D,kc),e(D,ie),e(ie,qc),e(ie,Jr),e(Jr,yc),e(ie,xc),e(ie,_n),e(_n,Rc),e(ie,Fc),e(ie,Xr),e(Xr,zc),e(ie,$c),e(ie,fn),e(fn,Ec),e(ie,jc),e(D,Mc),e(D,G),e(G,Gc),e(G,vn),e(vn,Ac),e(G,Nc),e(G,Zr),e(Zr,Pc),e(G,Cc),e(G,es),e(es,Sc),e(G,Dc),e(G,ts),e(ts,Lc),e(G,Ic),e(G,os),e(os,Oc),e(G,Wc),e(G,bn),e(bn,Hc),e(G,Bc),e(G,ns),e(ns,Qc),e(G,Uc),e(G,Tn),e(Tn,Vc),e(G,Kc),e(G,wn),e(wn,Yc),e(G,Jc),e(G,rs),e(rs,Xc),e(G,Zc),e(D,el),e(D,lo),e(lo,tl),e(lo,kn),e(kn,ol),e(lo,nl),e(D,rl),e(D,po),e(po,sl),e(po,ho),e(ho,al),e(po,dl),e(D,il),e(D,ee),T(uo,ee,null),e(ee,cl),e(ee,De),e(De,ll),e(De,qn),e(qn,pl),e(De,hl),e(De,ss),e(ss,ul),e(De,ml),e(ee,gl),T(ft,ee,null),e(ee,_l),e(ee,as),e(as,fl),e(ee,vl),T(mo,ee,null),_(d,rd,m),_(d,Le,m),e(Le,vt),e(vt,ds),T(go,ds,null),e(Le,bl),e(Le,is),e(is,Tl),_(d,sd,m),_(d,E,m),T(_o,E,null),e(E,wl),e(E,Ie),e(Ie,kl),e(Ie,yn),e(yn,ql),e(Ie,yl),e(Ie,cs),e(cs,xl),e(Ie,Rl),e(E,Fl),T(bt,E,null),e(E,zl),e(E,ls),e(ls,$l),e(E,El),e(E,ps),e(ps,jl),e(E,Ml),e(E,ce),e(ce,Gl),e(ce,hs),e(hs,Al),e(ce,Nl),e(ce,xn),e(xn,Pl),e(ce,Cl),e(ce,us),e(us,Sl),e(ce,Dl),e(ce,Rn),e(Rn,Ll),e(ce,Il),e(E,Ol),e(E,A),e(A,Wl),e(A,Fn),e(Fn,Hl),e(A,Bl),e(A,ms),e(ms,Ql),e(A,Ul),e(A,gs),e(gs,Vl),e(A,Kl),e(A,_s),e(_s,Yl),e(A,Jl),e(A,fs),e(fs,Xl),e(A,Zl),e(A,zn),e(zn,ep),e(A,tp),e(A,vs),e(vs,op),e(A,np),e(A,$n),e($n,rp),e(A,sp),e(A,En),e(En,ap),e(A,dp),e(A,bs),e(bs,ip),e(A,cp),e(E,lp),e(E,fo),e(fo,pp),e(fo,jn),e(jn,hp),e(fo,up),e(E,mp),e(E,vo),e(vo,gp),e(vo,bo),e(bo,_p),e(vo,fp),e(E,vp),e(E,te),T(To,te,null),e(te,bp),e(te,Oe),e(Oe,Tp),e(Oe,Mn),e(Mn,wp),e(Oe,kp),e(Oe,Ts),e(Ts,qp),e(Oe,yp),e(te,xp),T(Tt,te,null),e(te,Rp),e(te,ws),e(ws,Fp),e(te,zp),T(wo,te,null),e(E,$p),e(E,wt),T(ko,wt,null),e(wt,Ep),e(wt,qo),e(qo,jp),e(qo,Gn),e(Gn,Mp),e(qo,Gp),_(d,ad,m),_(d,We,m),e(We,kt),e(kt,ks),T(yo,ks,null),e(We,Ap),e(We,qs),e(qs,Np),_(d,dd,m),_(d,j,m),T(xo,j,null),e(j,Pp),e(j,He),e(He,Cp),e(He,An),e(An,Sp),e(He,Dp),e(He,ys),e(ys,Lp),e(He,Ip),e(j,Op),T(qt,j,null),e(j,Wp),e(j,xs),e(xs,Hp),e(j,Bp),e(j,Rs),e(Rs,Qp),e(j,Up),e(j,le),e(le,Vp),e(le,Fs),e(Fs,Kp),e(le,Yp),e(le,Nn),e(Nn,Jp),e(le,Xp),e(le,zs),e(zs,Zp),e(le,eh),e(le,Pn),e(Pn,th),e(le,oh),e(j,nh),e(j,N),e(N,rh),e(N,Cn),e(Cn,sh),e(N,ah),e(N,$s),e($s,dh),e(N,ih),e(N,Es),e(Es,ch),e(N,lh),e(N,js),e(js,ph),e(N,hh),e(N,Ms),e(Ms,uh),e(N,mh),e(N,Sn),e(Sn,gh),e(N,_h),e(N,Gs),e(Gs,fh),e(N,vh),e(N,Dn),e(Dn,bh),e(N,Th),e(N,Ln),e(Ln,wh),e(N,kh),e(N,As),e(As,qh),e(N,yh),e(j,xh),e(j,Ro),e(Ro,Rh),e(Ro,In),e(In,Fh),e(Ro,zh),e(j,$h),e(j,Fo),e(Fo,Eh),e(Fo,zo),e(zo,jh),e(Fo,Mh),e(j,Gh),e(j,oe),T($o,oe,null),e(oe,Ah),e(oe,Be),e(Be,Nh),e(Be,On),e(On,Ph),e(Be,Ch),e(Be,Ns),e(Ns,Sh),e(Be,Dh),e(oe,Lh),T(yt,oe,null),e(oe,Ih),e(oe,Ps),e(Ps,Oh),e(oe,Wh),T(Eo,oe,null),e(j,Hh),e(j,xt),T(jo,xt,null),e(xt,Bh),e(xt,Cs),e(Cs,Qh),_(d,id,m),_(d,Qe,m),e(Qe,Rt),e(Rt,Ss),T(Mo,Ss,null),e(Qe,Uh),e(Qe,Ds),e(Ds,Vh),_(d,cd,m),_(d,P,m),T(Go,P,null),e(P,Kh),e(P,Ue),e(Ue,Yh),e(Ue,Wn),e(Wn,Jh),e(Ue,Xh),e(Ue,Ls),e(Ls,Zh),e(Ue,eu),e(P,tu),T(Ft,P,null),e(P,ou),e(P,Is),e(Is,nu),e(P,ru),e(P,pe),e(pe,su),e(pe,Os),e(Os,au),e(pe,du),e(pe,Hn),e(Hn,iu),e(pe,cu),e(pe,Ws),e(Ws,lu),e(pe,pu),e(pe,Bn),e(Bn,hu),e(pe,uu),e(P,mu),e(P,L),e(L,gu),e(L,Qn),e(Qn,_u),e(L,fu),e(L,Hs),e(Hs,vu),e(L,bu),e(L,Bs),e(Bs,Tu),e(L,wu),e(L,Qs),e(Qs,ku),e(L,qu),e(L,Us),e(Us,yu),e(L,xu),e(L,Un),e(Un,Ru),e(L,Fu),e(L,Vs),e(Vs,zu),e(L,$u),e(L,Vn),e(Vn,Eu),e(L,ju),e(L,Ks),e(Ks,Mu),e(L,Gu),e(P,Au),e(P,Ao),e(Ao,Nu),e(Ao,Kn),e(Kn,Pu),e(Ao,Cu),e(P,Su),e(P,No),e(No,Du),e(No,Po),e(Po,Lu),e(No,Iu),e(P,Ou),e(P,Ys),e(Ys,Wu),e(P,Hu),e(P,ne),T(Co,ne,null),e(ne,Bu),e(ne,Ve),e(Ve,Qu),e(Ve,Yn),e(Yn,Uu),e(Ve,Vu),e(Ve,Js),e(Js,Ku),e(Ve,Yu),e(ne,Ju),T(zt,ne,null),e(ne,Xu),e(ne,Xs),e(Xs,Zu),e(ne,em),T(So,ne,null),_(d,ld,m),_(d,Ke,m),e(Ke,$t),e($t,Zs),T(Do,Zs,null),e(Ke,tm),e(Ke,ea),e(ea,om),_(d,pd,m),_(d,F,m),T(Lo,F,null),e(F,nm),e(F,Ye),e(Ye,rm),e(Ye,Jn),e(Jn,sm),e(Ye,am),e(Ye,ta),e(ta,dm),e(Ye,im),e(F,cm),T(Et,F,null),e(F,lm),e(F,oa),e(oa,pm),e(F,hm),e(F,na),e(na,um),e(F,mm),e(F,he),e(he,gm),e(he,ra),e(ra,_m),e(he,fm),e(he,Xn),e(Xn,vm),e(he,bm),e(he,sa),e(sa,Tm),e(he,wm),e(he,Zn),e(Zn,km),e(he,qm),e(F,ym),e(F,I),e(I,xm),e(I,er),e(er,Rm),e(I,Fm),e(I,aa),e(aa,zm),e(I,$m),e(I,da),e(da,Em),e(I,jm),e(I,ia),e(ia,Mm),e(I,Gm),e(I,ca),e(ca,Am),e(I,Nm),e(I,tr),e(tr,Pm),e(I,Cm),e(I,la),e(la,Sm),e(I,Dm),e(I,or),e(or,Lm),e(I,Im),e(I,pa),e(pa,Om),e(I,Wm),e(F,Hm),e(F,Io),e(Io,Bm),e(Io,nr),e(nr,Qm),e(Io,Um),e(F,Vm),e(F,Oo),e(Oo,Km),e(Oo,Wo),e(Wo,Ym),e(Oo,Jm),e(F,Xm),e(F,ha),e(ha,Zm),e(F,eg),e(F,re),T(Ho,re,null),e(re,tg),e(re,Je),e(Je,og),e(Je,rr),e(rr,ng),e(Je,rg),e(Je,ua),e(ua,sg),e(Je,ag),e(re,dg),T(jt,re,null),e(re,ig),e(re,ma),e(ma,cg),e(re,lg),T(Bo,re,null),e(F,pg),e(F,Mt),T(Qo,Mt,null),e(Mt,hg),e(Mt,Uo),e(Uo,ug),e(Uo,sr),e(sr,mg),e(Uo,gg),_(d,hd,m),_(d,Xe,m),e(Xe,Gt),e(Gt,ga),T(Vo,ga,null),e(Xe,_g),e(Xe,_a),e(_a,fg),_(d,ud,m),_(d,z,m),T(Ko,z,null),e(z,vg),e(z,Ze),e(Ze,bg),e(Ze,ar),e(ar,Tg),e(Ze,wg),e(Ze,fa),e(fa,kg),e(Ze,qg),e(z,yg),T(At,z,null),e(z,xg),e(z,va),e(va,Rg),e(z,Fg),e(z,ba),e(ba,zg),e(z,$g),e(z,ue),e(ue,Eg),e(ue,Ta),e(Ta,jg),e(ue,Mg),e(ue,dr),e(dr,Gg),e(ue,Ag),e(ue,wa),e(wa,Ng),e(ue,Pg),e(ue,ir),e(ir,Cg),e(ue,Sg),e(z,Dg),e(z,O),e(O,Lg),e(O,cr),e(cr,Ig),e(O,Og),e(O,ka),e(ka,Wg),e(O,Hg),e(O,qa),e(qa,Bg),e(O,Qg),e(O,ya),e(ya,Ug),e(O,Vg),e(O,xa),e(xa,Kg),e(O,Yg),e(O,lr),e(lr,Jg),e(O,Xg),e(O,Ra),e(Ra,Zg),e(O,e_),e(O,pr),e(pr,t_),e(O,o_),e(O,Fa),e(Fa,n_),e(O,r_),e(z,s_),e(z,Yo),e(Yo,a_),e(Yo,hr),e(hr,d_),e(Yo,i_),e(z,c_),e(z,Jo),e(Jo,l_),e(Jo,Xo),e(Xo,p_),e(Jo,h_),e(z,u_),e(z,za),e(za,m_),e(z,g_),e(z,se),T(Zo,se,null),e(se,__),e(se,et),e(et,f_),e(et,ur),e(ur,v_),e(et,b_),e(et,$a),e($a,T_),e(et,w_),e(se,k_),T(Nt,se,null),e(se,q_),e(se,Ea),e(Ea,y_),e(se,x_),T(en,se,null),e(z,R_),e(z,Pt),T(tn,Pt,null),e(Pt,F_),e(Pt,ja),e(ja,z_),md=!0},p(d,[m]){const on={};m&2&&(on.$$scope={dirty:m,ctx:d}),_t.$set(on);const Ma={};m&2&&(Ma.$$scope={dirty:m,ctx:d}),ft.$set(Ma);const Ga={};m&2&&(Ga.$$scope={dirty:m,ctx:d}),bt.$set(Ga);const Aa={};m&2&&(Aa.$$scope={dirty:m,ctx:d}),Tt.$set(Aa);const nn={};m&2&&(nn.$$scope={dirty:m,ctx:d}),qt.$set(nn);const Na={};m&2&&(Na.$$scope={dirty:m,ctx:d}),yt.$set(Na);const Pa={};m&2&&(Pa.$$scope={dirty:m,ctx:d}),Ft.$set(Pa);const Ca={};m&2&&(Ca.$$scope={dirty:m,ctx:d}),zt.$set(Ca);const Sa={};m&2&&(Sa.$$scope={dirty:m,ctx:d}),Et.$set(Sa);const rn={};m&2&&(rn.$$scope={dirty:m,ctx:d}),jt.$set(rn);const Da={};m&2&&(Da.$$scope={dirty:m,ctx:d}),At.$set(Da);const La={};m&2&&(La.$$scope={dirty:m,ctx:d}),Nt.$set(La)},i(d){md||(w(g.$$.fragment,d),w(St.$$.fragment,d),w(It.$$.fragment,d),w(Ot.$$.fragment,d),w(Wt.$$.fragment,d),w(Bt.$$.fragment,d),w(Ut.$$.fragment,d),w(Vt.$$.fragment,d),w(Kt.$$.fragment,d),w(Yt.$$.fragment,d),w(Jt.$$.fragment,d),w(Zt.$$.fragment,d),w(eo.$$.fragment,d),w(to.$$.fragment,d),w(oo.$$.fragment,d),w(no.$$.fragment,d),w(ro.$$.fragment,d),w(so.$$.fragment,d),w(io.$$.fragment,d),w(co.$$.fragment,d),w(_t.$$.fragment,d),w(uo.$$.fragment,d),w(ft.$$.fragment,d),w(mo.$$.fragment,d),w(go.$$.fragment,d),w(_o.$$.fragment,d),w(bt.$$.fragment,d),w(To.$$.fragment,d),w(Tt.$$.fragment,d),w(wo.$$.fragment,d),w(ko.$$.fragment,d),w(yo.$$.fragment,d),w(xo.$$.fragment,d),w(qt.$$.fragment,d),w($o.$$.fragment,d),w(yt.$$.fragment,d),w(Eo.$$.fragment,d),w(jo.$$.fragment,d),w(Mo.$$.fragment,d),w(Go.$$.fragment,d),w(Ft.$$.fragment,d),w(Co.$$.fragment,d),w(zt.$$.fragment,d),w(So.$$.fragment,d),w(Do.$$.fragment,d),w(Lo.$$.fragment,d),w(Et.$$.fragment,d),w(Ho.$$.fragment,d),w(jt.$$.fragment,d),w(Bo.$$.fragment,d),w(Qo.$$.fragment,d),w(Vo.$$.fragment,d),w(Ko.$$.fragment,d),w(At.$$.fragment,d),w(Zo.$$.fragment,d),w(Nt.$$.fragment,d),w(en.$$.fragment,d),w(tn.$$.fragment,d),md=!0)},o(d){k(g.$$.fragment,d),k(St.$$.fragment,d),k(It.$$.fragment,d),k(Ot.$$.fragment,d),k(Wt.$$.fragment,d),k(Bt.$$.fragment,d),k(Ut.$$.fragment,d),k(Vt.$$.fragment,d),k(Kt.$$.fragment,d),k(Yt.$$.fragment,d),k(Jt.$$.fragment,d),k(Zt.$$.fragment,d),k(eo.$$.fragment,d),k(to.$$.fragment,d),k(oo.$$.fragment,d),k(no.$$.fragment,d),k(ro.$$.fragment,d),k(so.$$.fragment,d),k(io.$$.fragment,d),k(co.$$.fragment,d),k(_t.$$.fragment,d),k(uo.$$.fragment,d),k(ft.$$.fragment,d),k(mo.$$.fragment,d),k(go.$$.fragment,d),k(_o.$$.fragment,d),k(bt.$$.fragment,d),k(To.$$.fragment,d),k(Tt.$$.fragment,d),k(wo.$$.fragment,d),k(ko.$$.fragment,d),k(yo.$$.fragment,d),k(xo.$$.fragment,d),k(qt.$$.fragment,d),k($o.$$.fragment,d),k(yt.$$.fragment,d),k(Eo.$$.fragment,d),k(jo.$$.fragment,d),k(Mo.$$.fragment,d),k(Go.$$.fragment,d),k(Ft.$$.fragment,d),k(Co.$$.fragment,d),k(zt.$$.fragment,d),k(So.$$.fragment,d),k(Do.$$.fragment,d),k(Lo.$$.fragment,d),k(Et.$$.fragment,d),k(Ho.$$.fragment,d),k(jt.$$.fragment,d),k(Bo.$$.fragment,d),k(Qo.$$.fragment,d),k(Vo.$$.fragment,d),k(Ko.$$.fragment,d),k(At.$$.fragment,d),k(Zo.$$.fragment,d),k(Nt.$$.fragment,d),k(en.$$.fragment,d),k(tn.$$.fragment,d),md=!1},d(d){n(p),d&&n(y),d&&n(u),q(g),d&&n(Ia),d&&n($e),q(St),d&&n(Oa),d&&n(an),d&&n(Wa),d&&n(nt),d&&n(Ha),d&&n(dn),d&&n(Ba),d&&n(cn),d&&n(Qa),d&&n(rt),d&&n(Ua),d&&n(Ee),q(It),d&&n(Va),d&&n(de),q(Ot),q(Wt),q(Bt),d&&n(Ka),d&&n(je),q(Ut),d&&n(Ya),d&&n(Me),q(Vt),q(Kt),d&&n(Ja),d&&n(Ge),q(Yt),d&&n(Xa),d&&n(Ae),q(Jt),d&&n(Za),d&&n(Xt),q(Zt),d&&n(ed),d&&n(Ne),q(eo),d&&n(td),d&&n(X),q(to),q(oo),q(no),q(ro),q(so),d&&n(od),d&&n(Ce),q(io),d&&n(nd),d&&n(D),q(co),q(_t),q(uo),q(ft),q(mo),d&&n(rd),d&&n(Le),q(go),d&&n(sd),d&&n(E),q(_o),q(bt),q(To),q(Tt),q(wo),q(ko),d&&n(ad),d&&n(We),q(yo),d&&n(dd),d&&n(j),q(xo),q(qt),q($o),q(yt),q(Eo),q(jo),d&&n(id),d&&n(Qe),q(Mo),d&&n(cd),d&&n(P),q(Go),q(Ft),q(Co),q(zt),q(So),d&&n(ld),d&&n(Ke),q(Do),d&&n(pd),d&&n(F),q(Lo),q(Et),q(Ho),q(jt),q(Bo),q(Qo),d&&n(hd),d&&n(Xe),q(Vo),d&&n(ud),d&&n(z),q(Ko),q(At),q(Zo),q(Nt),q(en),q(tn)}}}const bT={local:"rag",sections:[{local:"overview",title:"Overview"},{local:"transformers.RagConfig",title:"RagConfig"},{local:"transformers.RagTokenizer",title:"RagTokenizer"},{local:"transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput",title:"Rag specific outputs"},{local:"transformers.RagRetriever",title:"RagRetriever"},{local:"transformers.RagModel",title:"RagModel"},{local:"transformers.RagSequenceForGeneration",title:"RagSequenceForGeneration"},{local:"transformers.RagTokenForGeneration",title:"RagTokenForGeneration"},{local:"transformers.TFRagModel",title:"TFRagModel"},{local:"transformers.TFRagSequenceForGeneration",title:"TFRagSequenceForGeneration"},{local:"transformers.TFRagTokenForGeneration",title:"TFRagTokenForGeneration"}],title:"RAG"};function TT($,p,y){let{fw:u}=p;return $.$$set=f=>{"fw"in f&&y(0,u=f.fw)},[u]}class FT extends oT{constructor(p){super();nT(this,p,TT,vT,rT,{fw:0})}}export{FT as default,bT as metadata};
282
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/fnet.mdx-45669f92.js
import{S as Wh,i as Uh,s as Hh,e as n,k as c,w as g,t as a,M as Qh,c as s,d as o,m as d,a as r,x as _,h as i,b as l,F as e,g as f,y as k,q as b,o as F,B as v}from"../../chunks/vendor-4833417e.js";import{T as bt}from"../../chunks/Tip-fffd6df1.js";import{D as z}from"../../chunks/Docstring-4f315ed9.js";import{C as Y}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as G}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Vh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function Rh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function Gh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function Jh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function Kh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function Xh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function Yh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function Zh(q){let p,y,m,T,N;return{c(){p=n("p"),y=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),T=a("Module"),N=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){p=s(w,"P",{});var u=r(p);y=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(u,"CODE",{});var $=r(m);T=i($,"Module"),$.forEach(o),N=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(w,u){f(w,p,u),e(p,y),e(p,m),e(m,T),e(p,N)},d(w){w&&o(p)}}}function ef(q){let p,y,m,T,N,w,u,$,Jr,Xs,me,Be,jn,Ft,Kr,Cn,Xr,Ys,We,Yr,vt,Zr,ea,Zs,Uo,Sn,ta,er,Ho,oa,tr,Qo,An,na,or,oe,sa,wt,ra,aa,Tt,ia,la,nr,ue,Ue,Ln,yt,ca,In,da,sr,x,Nt,pa,ge,ha,Vo,fa,ma,$t,ua,ga,_a,_e,ka,Ro,ba,Fa,Go,va,wa,Ta,On,ya,Na,zt,rr,ke,He,Dn,qt,$a,Bn,za,ar,P,Et,qa,Z,Ea,Jo,Pa,xa,Pt,Ma,ja,Ko,Ca,Sa,Aa,ne,xt,La,Wn,Ia,Oa,Mt,Xo,Da,Un,Ba,Wa,Yo,Ua,Hn,Ha,Qa,Qe,jt,Va,Ct,Ra,Qn,Ga,Ja,Ka,J,St,Xa,Vn,Ya,Za,At,ei,be,ti,Rn,oi,ni,Gn,si,ri,ai,Zo,Lt,ir,Fe,Ve,Jn,It,ii,Kn,li,lr,D,Ot,ci,B,di,Xn,pi,hi,en,fi,mi,Dt,ui,gi,tn,_i,ki,bi,se,Bt,Fi,Yn,vi,wi,Wt,on,Ti,Zn,yi,Ni,nn,$i,es,zi,qi,K,Ut,Ei,ts,Pi,xi,Ht,Mi,os,ji,cr,ve,Re,ns,Qt,Ci,ss,Si,dr,W,Vt,Ai,Rt,Li,Gt,Ii,Oi,Di,Jt,Bi,Kt,Wi,Ui,Hi,M,Xt,Qi,we,Vi,sn,Ri,Gi,rs,Ji,Ki,Xi,Ge,Yi,as,Zi,el,Yt,pr,Te,Je,is,Zt,tl,ls,ol,hr,U,eo,nl,ye,sl,cs,rl,al,ds,il,ll,cl,to,dl,oo,pl,hl,fl,j,no,ml,Ne,ul,rn,gl,_l,ps,kl,bl,Fl,Ke,vl,hs,wl,Tl,so,fr,$e,Xe,fs,ro,yl,ms,Nl,mr,ee,ao,$l,ze,zl,us,ql,El,io,Pl,xl,Ml,C,lo,jl,qe,Cl,an,Sl,Al,gs,Ll,Il,Ol,Ye,Dl,_s,Bl,Wl,co,ur,Ee,Ze,ks,po,Ul,bs,Hl,gr,te,ho,Ql,Pe,Vl,Fs,Rl,Gl,fo,Jl,Kl,Xl,S,mo,Yl,xe,Zl,ln,ec,tc,vs,oc,nc,sc,et,rc,ws,ac,ic,uo,_r,Me,tt,Ts,go,lc,ys,cc,kr,H,_o,dc,Ns,pc,hc,ko,fc,bo,mc,uc,gc,E,Fo,_c,je,kc,cn,bc,Fc,$s,vc,wc,Tc,ot,yc,zs,Nc,$c,vo,zc,qs,qc,Ec,wo,br,Ce,nt,Es,To,Pc,Ps,xc,Fr,Q,yo,Mc,xs,jc,Cc,No,Sc,$o,Ac,Lc,Ic,A,zo,Oc,Se,Dc,dn,Bc,Wc,Ms,Uc,Hc,Qc,st,Vc,js,Rc,Gc,qo,vr,Ae,rt,Cs,Eo,Jc,Ss,Kc,wr,V,Po,Xc,As,Yc,Zc,xo,ed,Mo,td,od,nd,L,jo,sd,Le,rd,pn,ad,id,Ls,ld,cd,dd,at,pd,Is,hd,fd,Co,Tr,Ie,it,Os,So,md,Ds,ud,yr,R,Ao,gd,Oe,_d,Bs,kd,bd,Ws,Fd,vd,wd,Lo,Td,Io,yd,Nd,$d,I,Oo,zd,De,qd,hn,Ed,Pd,Us,xd,Md,jd,lt,Cd,Hs,Sd,Ad,Do,Nr;return w=new G({}),Ft=new G({}),yt=new G({}),Nt=new z({props:{name:"class transformers.FNetConfig",anchor:"transformers.FNetConfig",parameters:[{name:"vocab_size",val:" = 32000"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu_new'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 4"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_tpu_fourier_optimizations",val:" = False"},{name:"tpu_short_seq_length",val:" = 512"},{name:"pad_token_id",val:" = 3"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/configuration_fnet.py#L30",parametersDescription:[{anchor:"transformers.FNetConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32000) &#x2014; Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetModel">FNetModel</a> or <code>TFFNetModel</code>.`,name:"vocab_size"},{anchor:"transformers.FNetConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.FNetConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.FNetConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.FNetConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.FNetConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.FNetConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.FNetConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetModel">FNetModel</a> or <code>TFFNetModel</code>.`,name:"type_vocab_size"},{anchor:"transformers.FNetConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.FNetConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.FNetConfig.use_tpu_fourier_optimizations",description:`<strong>use_tpu_fourier_optimizations</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Determines whether to use TPU optimized FFTs. If <code>True</code>, the model will favor axis-wise FFTs transforms. Set to <code>False</code> for GPU/CPU hardware, in which case n-dimensional FFTs are used.`,name:"use_tpu_fourier_optimizations"},{anchor:"transformers.FNetConfig.tpu_short_seq_length",description:`<strong>tpu_short_seq_length</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT matrix only when <em>use_tpu_fourier_optimizations</em> is set to <code>True</code> and the input sequence is shorter than or equal to 4096 tokens.`,name:"tpu_short_seq_length"}]}}),zt=new Y({props:{code:`from transformers import FNetModel, FNetConfig # Initializing a FNet fnet-base style configuration configuration = FNetConfig() # Initializing a model from the fnet-base style configuration model = FNetModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetModel, FNetConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a FNet fnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = FNetConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the fnet-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),qt=new G({}),Et=new z({props:{name:"class transformers.FNetTokenizer",anchor:"transformers.FNetTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = False"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = True"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet.py#L46",parametersDescription:[{anchor:"transformers.FNetTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.FNetTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.FNetTokenizer.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.FNetTokenizer.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.FNetTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.FNetTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.FNetTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.FNetTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.FNetTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.FNetTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"},{anchor:"transformers.FNetTokenizer.sp_model",description:`<strong>sp_model</strong> (<code>SentencePieceProcessor</code>) &#x2014; The <em>SentencePiece</em> processor that is used for every conversion (string, tokens and IDs).`,name:"sp_model"}]}}),xt=new z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.FNetTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet.py#L218",parametersDescription:[{anchor:"transformers.FNetTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),jt=new z({props:{name:"get_special_tokens_mask",anchor:"transformers.FNetTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet.py#L243",parametersDescription:[{anchor:"transformers.FNetTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.FNetTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),St=new z({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.FNetTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet.py#L271",parametersDescription:[{anchor:"transformers.FNetTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),At=new Y({props:{code:"0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |",highlighted:'<span class="hljs-attribute">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">0</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> <span class="hljs-number">1</span> | first sequence | second sequence |'}}),Lt=new z({props:{name:"save_vocabulary",anchor:"transformers.FNetTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet.py#L300"}}),It=new G({}),Ot=new z({props:{name:"class transformers.FNetTokenizerFast",anchor:"transformers.FNetTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = False"},{name:"remove_space",val:" = True"},{name:"keep_accents",val:" = True"},{name:"unk_token",val:" = '<unk>'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '<pad>'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet_fast.py#L55",parametersDescription:[{anchor:"transformers.FNetTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a <em>.spm</em> extension) that contains the vocabulary necessary to instantiate a tokenizer.`,name:"vocab_file"},{anchor:"transformers.FNetTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.FNetTokenizerFast.remove_space",description:`<strong>remove_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).`,name:"remove_space"},{anchor:"transformers.FNetTokenizerFast.keep_accents",description:`<strong>keep_accents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to keep accents when tokenizing.`,name:"keep_accents"},{anchor:"transformers.FNetTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.FNetTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.FNetTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.FNetTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.FNetTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"}]}}),Bt=new z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.FNetTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet_fast.py#L138",parametersDescription:[{anchor:"transformers.FNetTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ut=new z({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.FNetTokenizerFast.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/tokenization_fnet_fast.py#L163",parametersDescription:[{anchor:"transformers.FNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids.`,name:"token_ids_0"},{anchor:"transformers.FNetTokenizerFast.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ht=new Y({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),Qt=new G({}),Vt=new z({props:{name:"class transformers.FNetModel",anchor:"transformers.FNetModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L518",parametersDescription:[{anchor:"transformers.FNetModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xt=new z({props:{name:"forward",anchor:"transformers.FNetModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L544",parametersDescription:[{anchor:"transformers.FNetModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ge=new bt({props:{$$slots:{default:[Vh]},$$scope:{ctx:q}}}),Yt=new Y({props:{code:`from transformers import FNetTokenizer, FNetModel import torch tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetModel.from_pretrained("google/fnet-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetModel.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Zt=new G({}),eo=new z({props:{name:"class transformers.FNetForPreTraining",anchor:"transformers.FNetForPreTraining",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L627",parametersDescription:[{anchor:"transformers.FNetForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),no=new z({props:{name:"forward",anchor:"transformers.FNetForPreTraining.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"next_sentence_label",val:": typing.Optional[torch.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L643",parametersDescription:[{anchor:"transformers.FNetForPreTraining.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForPreTraining.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForPreTraining.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForPreTraining.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForPreTraining.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.FNetForPreTraining.forward.next_sentence_label",description:`<strong>next_sentence_label</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring) Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"next_sentence_label"},{anchor:"transformers.FNetForPreTraining.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <code>transformers.models.fnet.modeling_fnet.FNetForPreTrainingOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<em>optional</em>, returned when <code>labels</code> is provided, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.</li> <li><strong>prediction_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</li> <li><strong>seq_relationship_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> </ul> `,returnType:` <p><code>transformers.models.fnet.modeling_fnet.FNetForPreTrainingOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Ke=new bt({props:{$$slots:{default:[Rh]},$$scope:{ctx:q}}}),so=new Y({props:{code:`from transformers import FNetTokenizer, FNetForPreTraining import torch tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForPreTraining.from_pretrained("google/fnet-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForPreTraining.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.prediction_logits <span class="hljs-meta">&gt;&gt;&gt; </span>seq_relationship_logits = outputs.seq_relationship_logits`}}),ro=new G({}),ao=new z({props:{name:"class transformers.FNetForMaskedLM",anchor:"transformers.FNetForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L719",parametersDescription:[{anchor:"transformers.FNetForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lo=new z({props:{name:"forward",anchor:"transformers.FNetForMaskedLM.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L735",parametersDescription:[{anchor:"transformers.FNetForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ye=new bt({props:{$$slots:{default:[Gh]},$$scope:{ctx:q}}}),co=new Y({props:{code:`from transformers import FNetTokenizer, FNetForMaskedLM import torch tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForMaskedLM.from_pretrained("google/fnet-base") inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForMaskedLM.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),po=new G({}),ho=new z({props:{name:"class transformers.FNetForNextSentencePrediction",anchor:"transformers.FNetForNextSentencePrediction",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L788",parametersDescription:[{anchor:"transformers.FNetForNextSentencePrediction.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),mo=new z({props:{name:"forward",anchor:"transformers.FNetForNextSentencePrediction.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L798",parametersDescription:[{anchor:"transformers.FNetForNextSentencePrediction.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForNextSentencePrediction.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForNextSentencePrediction.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForNextSentencePrediction.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForNextSentencePrediction.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForNextSentencePrediction.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForNextSentencePrediction.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see <code>input_ids</code> docstring). Indices should be in <code>[0, 1]</code>:</p> <ul> <li>0 indicates sequence B is a continuation of sequence A,</li> <li>1 indicates sequence B is a random sequence.</li> </ul>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) \u2014 Next sequence prediction (classification) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) \u2014 Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.NextSentencePredictorOutput" >transformers.modeling_outputs.NextSentencePredictorOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),et=new bt({props:{$$slots:{default:[Jh]},$$scope:{ctx:q}}}),uo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForNextSentencePrediction import torch tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors="pt") outputs = model(**encoding, labels=torch.LongTensor([1])) logits = outputs.logits assert logits[0, 0] < logits[0, 1] # next sentence was random`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForNextSentencePrediction <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForNextSentencePrediction.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_sentence = <span class="hljs-string">&quot;The sky is blue due to the shorter wavelength of blue light.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(prompt, next_sentence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding, labels=torch.LongTensor([<span class="hljs-number">1</span>])) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> logits[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>] &lt; logits[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># next sentence was random</span>`}}),go=new G({}),_o=new z({props:{name:"class transformers.FNetForSequenceClassification",anchor:"transformers.FNetForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L882",parametersDescription:[{anchor:"transformers.FNetForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Fo=new z({props:{name:"forward",anchor:"transformers.FNetForSequenceClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L894",parametersDescription:[{anchor:"transformers.FNetForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ot=new bt({props:{$$slots:{default:[Kh]},$$scope:{ctx:q}}}),vo=new Y({props:{code:`import torch from transformers import FNetTokenizer, FNetForSequenceClassification torch.manual_seed(0) tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForSequenceClassification.from_pretrained("google/fnet-base", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),wo=new Y({props:{code:`import torch from transformers import FNetTokenizer, FNetForSequenceClassification torch.manual_seed(0) tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForSequenceClassification.from_pretrained("google/fnet-base", problem_type="multi_label_classification", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),To=new G({}),yo=new z({props:{name:"class transformers.FNetForMultipleChoice",anchor:"transformers.FNetForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L968",parametersDescription:[{anchor:"transformers.FNetForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),zo=new z({props:{name:"forward",anchor:"transformers.FNetForMultipleChoice.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L979",parametersDescription:[{anchor:"transformers.FNetForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),st=new bt({props:{$$slots:{default:[Xh]},$$scope:{ctx:q}}}),qo=new Y({props:{code:`from transformers import FNetTokenizer, FNetForMultipleChoice import torch tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForMultipleChoice.from_pretrained("google/fnet-base") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Eo=new G({}),Po=new z({props:{name:"class transformers.FNetForTokenClassification",anchor:"transformers.FNetForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L1048",parametersDescription:[{anchor:"transformers.FNetForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),jo=new z({props:{name:"forward",anchor:"transformers.FNetForTokenClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L1061",parametersDescription:[{anchor:"transformers.FNetForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),at=new bt({props:{$$slots:{default:[Yh]},$$scope:{ctx:q}}}),Co=new Y({props:{code:`from transformers import FNetTokenizer, FNetForTokenClassification import torch tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForTokenClassification.from_pretrained("google/fnet-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForTokenClassification.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),So=new G({}),Ao=new z({props:{name:"class transformers.FNetForQuestionAnswering",anchor:"transformers.FNetForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L1118",parametersDescription:[{anchor:"transformers.FNetForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig">FNetConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Oo=new z({props:{name:"forward",anchor:"transformers.FNetForQuestionAnswering.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"start_positions",val:": typing.Optional[torch.Tensor] = None"},{name:"end_positions",val:": typing.Optional[torch.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/fnet/modeling_fnet.py#L1130",parametersDescription:[{anchor:"transformers.FNetForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetTokenizer">FNetTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FNetForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FNetForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FNetForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.FNetForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FNetForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.FNetForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.FNetForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetConfig" >FNetConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),lt=new bt({props:{$$slots:{default:[Zh]},$$scope:{ctx:q}}}),Do=new Y({props:{code:`from transformers import FNetTokenizer, FNetForQuestionAnswering import torch torch.manual_seed(0) tokenizer = FNetTokenizer.from_pretrained("google/fnet-base") model = FNetForQuestionAnswering.from_pretrained("google/fnet-base") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss round(loss.item(), 2) start_scores = outputs.start_logits list(start_scores.shape) end_scores = outputs.end_logits list(end_scores.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FNetTokenizer, FNetForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = FNetTokenizer.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FNetForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;google/fnet-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) `}}),{c(){p=n("meta"),y=c(),m=n("h1"),T=n("a"),N=n("span"),g(w.$$.fragment),u=c(),$=n("span"),Jr=a("FNet"),Xs=c(),me=n("h2"),Be=n("a"),jn=n("span"),g(Ft.$$.fragment),Kr=c(),Cn=n("span"),Xr=a("Overview"),Ys=c(),We=n("p"),Yr=a("The FNet model was proposed in "),vt=n("a"),Zr=a("FNet: Mixing Tokens with Fourier Transforms"),ea=a(` by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT model with a fourier transform which returns only the real parts of the transform. The model is significantly faster than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97% accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the paper is the following:`),Zs=c(),Uo=n("p"),Sn=n("em"),ta=a(`We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the self-attention sublayers with simple linear transformations that \u201Cmix\u201D input tokens. These linear mixers, along with standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths, our FNet model is significantly faster: when compared to the \u201Cefficient\u201D Transformers on the Long Range Arena benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models outperform Transformer counterparts.`),er=c(),Ho=n("p"),oa=a("Tips on usage:"),tr=c(),Qo=n("ul"),An=n("li"),na=a(`The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum sequence length for fine-tuning and inference.`),or=c(),oe=n("p"),sa=a("This model was contributed by "),wt=n("a"),ra=a("gchhablani"),aa=a(". The original code can be found "),Tt=n("a"),ia=a("here"),la=a("."),nr=c(),ue=n("h2"),Ue=n("a"),Ln=n("span"),g(yt.$$.fragment),ca=c(),In=n("span"),da=a("FNetConfig"),sr=c(),x=n("div"),g(Nt.$$.fragment),pa=c(),ge=n("p"),ha=a("This is the configuration class to store the configuration of a "),Vo=n("a"),fa=a("FNetModel"),ma=a(`. It is used to instantiate an FNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FNet `),$t=n("a"),ua=a("fnet-base"),ga=a(" architecture."),_a=c(),_e=n("p"),ka=a("Configuration objects inherit from "),Ro=n("a"),ba=a("PretrainedConfig"),Fa=a(` and can be used to control the model outputs. Read the documentation from `),Go=n("a"),va=a("PretrainedConfig"),wa=a(" for more information."),Ta=c(),On=n("p"),ya=a("Example:"),Na=c(),g(zt.$$.fragment),rr=c(),ke=n("h2"),He=n("a"),Dn=n("span"),g(qt.$$.fragment),$a=c(),Bn=n("span"),za=a("FNetTokenizer"),ar=c(),P=n("div"),g(Et.$$.fragment),qa=c(),Z=n("p"),Ea=a("Construct an FNet tokenizer. Adapted from "),Jo=n("a"),Pa=a("AlbertTokenizer"),xa=a(`. Based on `),Pt=n("a"),Ma=a("SentencePiece"),ja=a(". This tokenizer inherits from "),Ko=n("a"),Ca=a("PreTrainedTokenizer"),Sa=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Aa=c(),ne=n("div"),g(xt.$$.fragment),La=c(),Wn=n("p"),Ia=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),Oa=c(),Mt=n("ul"),Xo=n("li"),Da=a("single sequence: "),Un=n("code"),Ba=a("[CLS] X [SEP]"),Wa=c(),Yo=n("li"),Ua=a("pair of sequences: "),Hn=n("code"),Ha=a("[CLS] A [SEP] B [SEP]"),Qa=c(),Qe=n("div"),g(jt.$$.fragment),Va=c(),Ct=n("p"),Ra=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Qn=n("code"),Ga=a("prepare_for_model"),Ja=a(" method."),Ka=c(),J=n("div"),g(St.$$.fragment),Xa=c(),Vn=n("p"),Ya=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format: :`),Za=c(),g(At.$$.fragment),ei=c(),be=n("p"),ti=a("If "),Rn=n("code"),oi=a("token_ids_1"),ni=a(" is "),Gn=n("code"),si=a("None"),ri=a(", this method only returns the first portion of the mask (0s)."),ai=c(),Zo=n("div"),g(Lt.$$.fragment),ir=c(),Fe=n("h2"),Ve=n("a"),Jn=n("span"),g(It.$$.fragment),ii=c(),Kn=n("span"),li=a("FNetTokenizerFast"),lr=c(),D=n("div"),g(Ot.$$.fragment),ci=c(),B=n("p"),di=a("Construct a \u201Cfast\u201D FNetTokenizer (backed by HuggingFace\u2019s "),Xn=n("em"),pi=a("tokenizers"),hi=a(` library). Adapted from `),en=n("a"),fi=a("AlbertTokenizerFast"),mi=a(`. Based on `),Dt=n("a"),ui=a("Unigram"),gi=a(`. This tokenizer inherits from `),tn=n("a"),_i=a("PreTrainedTokenizerFast"),ki=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),bi=c(),se=n("div"),g(Bt.$$.fragment),Fi=c(),Yn=n("p"),vi=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),wi=c(),Wt=n("ul"),on=n("li"),Ti=a("single sequence: "),Zn=n("code"),yi=a("[CLS] X [SEP]"),Ni=c(),nn=n("li"),$i=a("pair of sequences: "),es=n("code"),zi=a("[CLS] A [SEP] B [SEP]"),qi=c(),K=n("div"),g(Ut.$$.fragment),Ei=c(),ts=n("p"),Pi=a(`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format:`),xi=c(),g(Ht.$$.fragment),Mi=c(),os=n("p"),ji=a("if token_ids_1 is None, only returns the first portion of the mask (0s)."),cr=c(),ve=n("h2"),Re=n("a"),ns=n("span"),g(Qt.$$.fragment),Ci=c(),ss=n("span"),Si=a("FNetModel"),dr=c(),W=n("div"),g(Vt.$$.fragment),Ai=c(),Rt=n("p"),Li=a(`The bare FNet Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Gt=n("a"),Ii=a("torch.nn.Module"),Oi=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Di=c(),Jt=n("p"),Bi=a("The model can behave as an encoder, following the architecture described in "),Kt=n("a"),Wi=a(`FNet: Mixing Tokens with Fourier Transforms`),Ui=a(" by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon."),Hi=c(),M=n("div"),g(Xt.$$.fragment),Qi=c(),we=n("p"),Vi=a("The "),sn=n("a"),Ri=a("FNetModel"),Gi=a(" forward method, overrides the "),rs=n("code"),Ji=a("__call__"),Ki=a(" special method."),Xi=c(),g(Ge.$$.fragment),Yi=c(),as=n("p"),Zi=a("Example:"),el=c(),g(Yt.$$.fragment),pr=c(),Te=n("h2"),Je=n("a"),is=n("span"),g(Zt.$$.fragment),tl=c(),ls=n("span"),ol=a("FNetForPreTraining"),hr=c(),U=n("div"),g(eo.$$.fragment),nl=c(),ye=n("p"),sl=a("FNet Model with two heads on top as done during the pretraining: a "),cs=n("code"),rl=a("masked language modeling"),al=a(" head and a "),ds=n("code"),il=a("next sentence prediction (classification)"),ll=a(" head."),cl=c(),to=n("p"),dl=a("This model is a PyTorch "),oo=n("a"),pl=a("torch.nn.Module"),hl=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fl=c(),j=n("div"),g(no.$$.fragment),ml=c(),Ne=n("p"),ul=a("The "),rn=n("a"),gl=a("FNetForPreTraining"),_l=a(" forward method, overrides the "),ps=n("code"),kl=a("__call__"),bl=a(" special method."),Fl=c(),g(Ke.$$.fragment),vl=c(),hs=n("p"),wl=a("Example:"),Tl=c(),g(so.$$.fragment),fr=c(),$e=n("h2"),Xe=n("a"),fs=n("span"),g(ro.$$.fragment),yl=c(),ms=n("span"),Nl=a("FNetForMaskedLM"),mr=c(),ee=n("div"),g(ao.$$.fragment),$l=c(),ze=n("p"),zl=a("FNet Model with a "),us=n("code"),ql=a("language modeling"),El=a(` head on top. This model is a PyTorch `),io=n("a"),Pl=a("torch.nn.Module"),xl=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ml=c(),C=n("div"),g(lo.$$.fragment),jl=c(),qe=n("p"),Cl=a("The "),an=n("a"),Sl=a("FNetForMaskedLM"),Al=a(" forward method, overrides the "),gs=n("code"),Ll=a("__call__"),Il=a(" special method."),Ol=c(),g(Ye.$$.fragment),Dl=c(),_s=n("p"),Bl=a("Example:"),Wl=c(),g(co.$$.fragment),ur=c(),Ee=n("h2"),Ze=n("a"),ks=n("span"),g(po.$$.fragment),Ul=c(),bs=n("span"),Hl=a("FNetForNextSentencePrediction"),gr=c(),te=n("div"),g(ho.$$.fragment),Ql=c(),Pe=n("p"),Vl=a("FNet Model with a "),Fs=n("code"),Rl=a("next sentence prediction (classification)"),Gl=a(` head on top. This model is a PyTorch `),fo=n("a"),Jl=a("torch.nn.Module"),Kl=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xl=c(),S=n("div"),g(mo.$$.fragment),Yl=c(),xe=n("p"),Zl=a("The "),ln=n("a"),ec=a("FNetForNextSentencePrediction"),tc=a(" forward method, overrides the "),vs=n("code"),oc=a("__call__"),nc=a(" special method."),sc=c(),g(et.$$.fragment),rc=c(),ws=n("p"),ac=a("Example:"),ic=c(),g(uo.$$.fragment),_r=c(),Me=n("h2"),tt=n("a"),Ts=n("span"),g(go.$$.fragment),lc=c(),ys=n("span"),cc=a("FNetForSequenceClassification"),kr=c(),H=n("div"),g(_o.$$.fragment),dc=c(),Ns=n("p"),pc=a(`FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),hc=c(),ko=n("p"),fc=a("This model is a PyTorch "),bo=n("a"),mc=a("torch.nn.Module"),uc=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gc=c(),E=n("div"),g(Fo.$$.fragment),_c=c(),je=n("p"),kc=a("The "),cn=n("a"),bc=a("FNetForSequenceClassification"),Fc=a(" forward method, overrides the "),$s=n("code"),vc=a("__call__"),wc=a(" special method."),Tc=c(),g(ot.$$.fragment),yc=c(),zs=n("p"),Nc=a("Example of single-label classification:"),$c=c(),g(vo.$$.fragment),zc=c(),qs=n("p"),qc=a("Example of multi-label classification:"),Ec=c(),g(wo.$$.fragment),br=c(),Ce=n("h2"),nt=n("a"),Es=n("span"),g(To.$$.fragment),Pc=c(),Ps=n("span"),xc=a("FNetForMultipleChoice"),Fr=c(),Q=n("div"),g(yo.$$.fragment),Mc=c(),xs=n("p"),jc=a(`FNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Cc=c(),No=n("p"),Sc=a("This model is a PyTorch "),$o=n("a"),Ac=a("torch.nn.Module"),Lc=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ic=c(),A=n("div"),g(zo.$$.fragment),Oc=c(),Se=n("p"),Dc=a("The "),dn=n("a"),Bc=a("FNetForMultipleChoice"),Wc=a(" forward method, overrides the "),Ms=n("code"),Uc=a("__call__"),Hc=a(" special method."),Qc=c(),g(st.$$.fragment),Vc=c(),js=n("p"),Rc=a("Example:"),Gc=c(),g(qo.$$.fragment),vr=c(),Ae=n("h2"),rt=n("a"),Cs=n("span"),g(Eo.$$.fragment),Jc=c(),Ss=n("span"),Kc=a("FNetForTokenClassification"),wr=c(),V=n("div"),g(Po.$$.fragment),Xc=c(),As=n("p"),Yc=a(`FNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zc=c(),xo=n("p"),ed=a("This model is a PyTorch "),Mo=n("a"),td=a("torch.nn.Module"),od=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nd=c(),L=n("div"),g(jo.$$.fragment),sd=c(),Le=n("p"),rd=a("The "),pn=n("a"),ad=a("FNetForTokenClassification"),id=a(" forward method, overrides the "),Ls=n("code"),ld=a("__call__"),cd=a(" special method."),dd=c(),g(at.$$.fragment),pd=c(),Is=n("p"),hd=a("Example:"),fd=c(),g(Co.$$.fragment),Tr=c(),Ie=n("h2"),it=n("a"),Os=n("span"),g(So.$$.fragment),md=c(),Ds=n("span"),ud=a("FNetForQuestionAnswering"),yr=c(),R=n("div"),g(Ao.$$.fragment),gd=c(),Oe=n("p"),_d=a(`FNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Bs=n("code"),kd=a("span start logits"),bd=a(" and "),Ws=n("code"),Fd=a("span end logits"),vd=a(")."),wd=c(),Lo=n("p"),Td=a("This model is a PyTorch "),Io=n("a"),yd=a("torch.nn.Module"),Nd=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$d=c(),I=n("div"),g(Oo.$$.fragment),zd=c(),De=n("p"),qd=a("The "),hn=n("a"),Ed=a("FNetForQuestionAnswering"),Pd=a(" forward method, overrides the "),Us=n("code"),xd=a("__call__"),Md=a(" special method."),jd=c(),g(lt.$$.fragment),Cd=c(),Hs=n("p"),Sd=a("Example:"),Ad=c(),g(Do.$$.fragment),this.h()},l(t){const h=Qh('[data-svelte="svelte-1phssyn"]',document.head);p=s(h,"META",{name:!0,content:!0}),h.forEach(o),y=d(t),m=s(t,"H1",{class:!0});var Bo=r(m);T=s(Bo,"A",{id:!0,class:!0,href:!0});var Qs=r(T);N=s(Qs,"SPAN",{});var Vs=r(N);_(w.$$.fragment,Vs),Vs.forEach(o),Qs.forEach(o),u=d(Bo),$=s(Bo,"SPAN",{});var Rs=r($);Jr=i(Rs,"FNet"),Rs.forEach(o),Bo.forEach(o),Xs=d(t),me=s(t,"H2",{class:!0});var Wo=r(me);Be=s(Wo,"A",{id:!0,class:!0,href:!0});var Gs=r(Be);jn=s(Gs,"SPAN",{});var Js=r(jn);_(Ft.$$.fragment,Js),Js.forEach(o),Gs.forEach(o),Kr=d(Wo),Cn=s(Wo,"SPAN",{});var Ks=r(Cn);Xr=i(Ks,"Overview"),Ks.forEach(o),Wo.forEach(o),Ys=d(t),We=s(t,"P",{});var $r=r(We);Yr=i($r,"The FNet model was proposed in "),vt=s($r,"A",{href:!0,rel:!0});var Bd=r(vt);Zr=i(Bd,"FNet: Mixing Tokens with Fourier Transforms"),Bd.forEach(o),ea=i($r,` by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT model with a fourier transform which returns only the real parts of the transform. The model is significantly faster than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97% accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the paper is the following:`),$r.forEach(o),Zs=d(t),Uo=s(t,"P",{});var Wd=r(Uo);Sn=s(Wd,"EM",{});var Ud=r(Sn);ta=i(Ud,`We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the self-attention sublayers with simple linear transformations that \u201Cmix\u201D input tokens. These linear mixers, along with standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths, our FNet model is significantly faster: when compared to the \u201Cefficient\u201D Transformers on the Long Range Arena benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models outperform Transformer counterparts.`),Ud.forEach(o),Wd.forEach(o),er=d(t),Ho=s(t,"P",{});var Hd=r(Ho);oa=i(Hd,"Tips on usage:"),Hd.forEach(o),tr=d(t),Qo=s(t,"UL",{});var Qd=r(Qo);An=s(Qd,"LI",{});var Vd=r(An);na=i(Vd,`The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum sequence length for fine-tuning and inference.`),Vd.forEach(o),Qd.forEach(o),or=d(t),oe=s(t,"P",{});var fn=r(oe);sa=i(fn,"This model was contributed by "),wt=s(fn,"A",{href:!0,rel:!0});var Rd=r(wt);ra=i(Rd,"gchhablani"),Rd.forEach(o),aa=i(fn,". The original code can be found "),Tt=s(fn,"A",{href:!0,rel:!0});var Gd=r(Tt);ia=i(Gd,"here"),Gd.forEach(o),la=i(fn,"."),fn.forEach(o),nr=d(t),ue=s(t,"H2",{class:!0});var zr=r(ue);Ue=s(zr,"A",{id:!0,class:!0,href:!0});var Jd=r(Ue);Ln=s(Jd,"SPAN",{});var Kd=r(Ln);_(yt.$$.fragment,Kd),Kd.forEach(o),Jd.forEach(o),ca=d(zr),In=s(zr,"SPAN",{});var Xd=r(In);da=i(Xd,"FNetConfig"),Xd.forEach(o),zr.forEach(o),sr=d(t),x=s(t,"DIV",{class:!0});var re=r(x);_(Nt.$$.fragment,re),pa=d(re),ge=s(re,"P",{});var mn=r(ge);ha=i(mn,"This is the configuration class to store the configuration of a "),Vo=s(mn,"A",{href:!0});var Yd=r(Vo);fa=i(Yd,"FNetModel"),Yd.forEach(o),ma=i(mn,`. It is used to instantiate an FNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FNet `),$t=s(mn,"A",{href:!0,rel:!0});var Zd=r($t);ua=i(Zd,"fnet-base"),Zd.forEach(o),ga=i(mn," architecture."),mn.forEach(o),_a=d(re),_e=s(re,"P",{});var un=r(_e);ka=i(un,"Configuration objects inherit from "),Ro=s(un,"A",{href:!0});var ep=r(Ro);ba=i(ep,"PretrainedConfig"),ep.forEach(o),Fa=i(un,` and can be used to control the model outputs. Read the documentation from `),Go=s(un,"A",{href:!0});var tp=r(Go);va=i(tp,"PretrainedConfig"),tp.forEach(o),wa=i(un," for more information."),un.forEach(o),Ta=d(re),On=s(re,"P",{});var op=r(On);ya=i(op,"Example:"),op.forEach(o),Na=d(re),_(zt.$$.fragment,re),re.forEach(o),rr=d(t),ke=s(t,"H2",{class:!0});var qr=r(ke);He=s(qr,"A",{id:!0,class:!0,href:!0});var np=r(He);Dn=s(np,"SPAN",{});var sp=r(Dn);_(qt.$$.fragment,sp),sp.forEach(o),np.forEach(o),$a=d(qr),Bn=s(qr,"SPAN",{});var rp=r(Bn);za=i(rp,"FNetTokenizer"),rp.forEach(o),qr.forEach(o),ar=d(t),P=s(t,"DIV",{class:!0});var X=r(P);_(Et.$$.fragment,X),qa=d(X),Z=s(X,"P",{});var ct=r(Z);Ea=i(ct,"Construct an FNet tokenizer. Adapted from "),Jo=s(ct,"A",{href:!0});var ap=r(Jo);Pa=i(ap,"AlbertTokenizer"),ap.forEach(o),xa=i(ct,`. Based on `),Pt=s(ct,"A",{href:!0,rel:!0});var ip=r(Pt);Ma=i(ip,"SentencePiece"),ip.forEach(o),ja=i(ct,". This tokenizer inherits from "),Ko=s(ct,"A",{href:!0});var lp=r(Ko);Ca=i(lp,"PreTrainedTokenizer"),lp.forEach(o),Sa=i(ct,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ct.forEach(o),Aa=d(X),ne=s(X,"DIV",{class:!0});var gn=r(ne);_(xt.$$.fragment,gn),La=d(gn),Wn=s(gn,"P",{});var cp=r(Wn);Ia=i(cp,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),cp.forEach(o),Oa=d(gn),Mt=s(gn,"UL",{});var Er=r(Mt);Xo=s(Er,"LI",{});var Ld=r(Xo);Da=i(Ld,"single sequence: "),Un=s(Ld,"CODE",{});var dp=r(Un);Ba=i(dp,"[CLS] X [SEP]"),dp.forEach(o),Ld.forEach(o),Wa=d(Er),Yo=s(Er,"LI",{});var Id=r(Yo);Ua=i(Id,"pair of sequences: "),Hn=s(Id,"CODE",{});var pp=r(Hn);Ha=i(pp,"[CLS] A [SEP] B [SEP]"),pp.forEach(o),Id.forEach(o),Er.forEach(o),gn.forEach(o),Qa=d(X),Qe=s(X,"DIV",{class:!0});var Pr=r(Qe);_(jt.$$.fragment,Pr),Va=d(Pr),Ct=s(Pr,"P",{});var xr=r(Ct);Ra=i(xr,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Qn=s(xr,"CODE",{});var hp=r(Qn);Ga=i(hp,"prepare_for_model"),hp.forEach(o),Ja=i(xr," method."),xr.forEach(o),Pr.forEach(o),Ka=d(X),J=s(X,"DIV",{class:!0});var dt=r(J);_(St.$$.fragment,dt),Xa=d(dt),Vn=s(dt,"P",{});var fp=r(Vn);Ya=i(fp,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format: :`),fp.forEach(o),Za=d(dt),_(At.$$.fragment,dt),ei=d(dt),be=s(dt,"P",{});var _n=r(be);ti=i(_n,"If "),Rn=s(_n,"CODE",{});var mp=r(Rn);oi=i(mp,"token_ids_1"),mp.forEach(o),ni=i(_n," is "),Gn=s(_n,"CODE",{});var up=r(Gn);si=i(up,"None"),up.forEach(o),ri=i(_n,", this method only returns the first portion of the mask (0s)."),_n.forEach(o),dt.forEach(o),ai=d(X),Zo=s(X,"DIV",{class:!0});var gp=r(Zo);_(Lt.$$.fragment,gp),gp.forEach(o),X.forEach(o),ir=d(t),Fe=s(t,"H2",{class:!0});var Mr=r(Fe);Ve=s(Mr,"A",{id:!0,class:!0,href:!0});var _p=r(Ve);Jn=s(_p,"SPAN",{});var kp=r(Jn);_(It.$$.fragment,kp),kp.forEach(o),_p.forEach(o),ii=d(Mr),Kn=s(Mr,"SPAN",{});var bp=r(Kn);li=i(bp,"FNetTokenizerFast"),bp.forEach(o),Mr.forEach(o),lr=d(t),D=s(t,"DIV",{class:!0});var pt=r(D);_(Ot.$$.fragment,pt),ci=d(pt),B=s(pt,"P",{});var ae=r(B);di=i(ae,"Construct a \u201Cfast\u201D FNetTokenizer (backed by HuggingFace\u2019s "),Xn=s(ae,"EM",{});var Fp=r(Xn);pi=i(Fp,"tokenizers"),Fp.forEach(o),hi=i(ae,` library). Adapted from `),en=s(ae,"A",{href:!0});var vp=r(en);fi=i(vp,"AlbertTokenizerFast"),vp.forEach(o),mi=i(ae,`. Based on `),Dt=s(ae,"A",{href:!0,rel:!0});var wp=r(Dt);ui=i(wp,"Unigram"),wp.forEach(o),gi=i(ae,`. This tokenizer inherits from `),tn=s(ae,"A",{href:!0});var Tp=r(tn);_i=i(Tp,"PreTrainedTokenizerFast"),Tp.forEach(o),ki=i(ae,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods`),ae.forEach(o),bi=d(pt),se=s(pt,"DIV",{class:!0});var kn=r(se);_(Bt.$$.fragment,kn),Fi=d(kn),Yn=s(kn,"P",{});var yp=r(Yn);vi=i(yp,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format:`),yp.forEach(o),wi=d(kn),Wt=s(kn,"UL",{});var jr=r(Wt);on=s(jr,"LI",{});var Od=r(on);Ti=i(Od,"single sequence: "),Zn=s(Od,"CODE",{});var Np=r(Zn);yi=i(Np,"[CLS] X [SEP]"),Np.forEach(o),Od.forEach(o),Ni=d(jr),nn=s(jr,"LI",{});var Dd=r(nn);$i=i(Dd,"pair of sequences: "),es=s(Dd,"CODE",{});var $p=r(es);zi=i($p,"[CLS] A [SEP] B [SEP]"),$p.forEach(o),Dd.forEach(o),jr.forEach(o),kn.forEach(o),qi=d(pt),K=s(pt,"DIV",{class:!0});var ht=r(K);_(Ut.$$.fragment,ht),Ei=d(ht),ts=s(ht,"P",{});var zp=r(ts);Pi=i(zp,`Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence pair mask has the following format:`),zp.forEach(o),xi=d(ht),_(Ht.$$.fragment,ht),Mi=d(ht),os=s(ht,"P",{});var qp=r(os);ji=i(qp,"if token_ids_1 is None, only returns the first portion of the mask (0s)."),qp.forEach(o),ht.forEach(o),pt.forEach(o),cr=d(t),ve=s(t,"H2",{class:!0});var Cr=r(ve);Re=s(Cr,"A",{id:!0,class:!0,href:!0});var Ep=r(Re);ns=s(Ep,"SPAN",{});var Pp=r(ns);_(Qt.$$.fragment,Pp),Pp.forEach(o),Ep.forEach(o),Ci=d(Cr),ss=s(Cr,"SPAN",{});var xp=r(ss);Si=i(xp,"FNetModel"),xp.forEach(o),Cr.forEach(o),dr=d(t),W=s(t,"DIV",{class:!0});var ft=r(W);_(Vt.$$.fragment,ft),Ai=d(ft),Rt=s(ft,"P",{});var Sr=r(Rt);Li=i(Sr,`The bare FNet Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Gt=s(Sr,"A",{href:!0,rel:!0});var Mp=r(Gt);Ii=i(Mp,"torch.nn.Module"),Mp.forEach(o),Oi=i(Sr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sr.forEach(o),Di=d(ft),Jt=s(ft,"P",{});var Ar=r(Jt);Bi=i(Ar,"The model can behave as an encoder, following the architecture described in "),Kt=s(Ar,"A",{href:!0,rel:!0});var jp=r(Kt);Wi=i(jp,`FNet: Mixing Tokens with Fourier Transforms`),jp.forEach(o),Ui=i(Ar," by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon."),Ar.forEach(o),Hi=d(ft),M=s(ft,"DIV",{class:!0});var ie=r(M);_(Xt.$$.fragment,ie),Qi=d(ie),we=s(ie,"P",{});var bn=r(we);Vi=i(bn,"The "),sn=s(bn,"A",{href:!0});var Cp=r(sn);Ri=i(Cp,"FNetModel"),Cp.forEach(o),Gi=i(bn," forward method, overrides the "),rs=s(bn,"CODE",{});var Sp=r(rs);Ji=i(Sp,"__call__"),Sp.forEach(o),Ki=i(bn," special method."),bn.forEach(o),Xi=d(ie),_(Ge.$$.fragment,ie),Yi=d(ie),as=s(ie,"P",{});var Ap=r(as);Zi=i(Ap,"Example:"),Ap.forEach(o),el=d(ie),_(Yt.$$.fragment,ie),ie.forEach(o),ft.forEach(o),pr=d(t),Te=s(t,"H2",{class:!0});var Lr=r(Te);Je=s(Lr,"A",{id:!0,class:!0,href:!0});var Lp=r(Je);is=s(Lp,"SPAN",{});var Ip=r(is);_(Zt.$$.fragment,Ip),Ip.forEach(o),Lp.forEach(o),tl=d(Lr),ls=s(Lr,"SPAN",{});var Op=r(ls);ol=i(Op,"FNetForPreTraining"),Op.forEach(o),Lr.forEach(o),hr=d(t),U=s(t,"DIV",{class:!0});var mt=r(U);_(eo.$$.fragment,mt),nl=d(mt),ye=s(mt,"P",{});var Fn=r(ye);sl=i(Fn,"FNet Model with two heads on top as done during the pretraining: a "),cs=s(Fn,"CODE",{});var Dp=r(cs);rl=i(Dp,"masked language modeling"),Dp.forEach(o),al=i(Fn," head and a "),ds=s(Fn,"CODE",{});var Bp=r(ds);il=i(Bp,"next sentence prediction (classification)"),Bp.forEach(o),ll=i(Fn," head."),Fn.forEach(o),cl=d(mt),to=s(mt,"P",{});var Ir=r(to);dl=i(Ir,"This model is a PyTorch "),oo=s(Ir,"A",{href:!0,rel:!0});var Wp=r(oo);pl=i(Wp,"torch.nn.Module"),Wp.forEach(o),hl=i(Ir,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ir.forEach(o),fl=d(mt),j=s(mt,"DIV",{class:!0});var le=r(j);_(no.$$.fragment,le),ml=d(le),Ne=s(le,"P",{});var vn=r(Ne);ul=i(vn,"The "),rn=s(vn,"A",{href:!0});var Up=r(rn);gl=i(Up,"FNetForPreTraining"),Up.forEach(o),_l=i(vn," forward method, overrides the "),ps=s(vn,"CODE",{});var Hp=r(ps);kl=i(Hp,"__call__"),Hp.forEach(o),bl=i(vn," special method."),vn.forEach(o),Fl=d(le),_(Ke.$$.fragment,le),vl=d(le),hs=s(le,"P",{});var Qp=r(hs);wl=i(Qp,"Example:"),Qp.forEach(o),Tl=d(le),_(so.$$.fragment,le),le.forEach(o),mt.forEach(o),fr=d(t),$e=s(t,"H2",{class:!0});var Or=r($e);Xe=s(Or,"A",{id:!0,class:!0,href:!0});var Vp=r(Xe);fs=s(Vp,"SPAN",{});var Rp=r(fs);_(ro.$$.fragment,Rp),Rp.forEach(o),Vp.forEach(o),yl=d(Or),ms=s(Or,"SPAN",{});var Gp=r(ms);Nl=i(Gp,"FNetForMaskedLM"),Gp.forEach(o),Or.forEach(o),mr=d(t),ee=s(t,"DIV",{class:!0});var wn=r(ee);_(ao.$$.fragment,wn),$l=d(wn),ze=s(wn,"P",{});var Tn=r(ze);zl=i(Tn,"FNet Model with a "),us=s(Tn,"CODE",{});var Jp=r(us);ql=i(Jp,"language modeling"),Jp.forEach(o),El=i(Tn,` head on top. This model is a PyTorch `),io=s(Tn,"A",{href:!0,rel:!0});var Kp=r(io);Pl=i(Kp,"torch.nn.Module"),Kp.forEach(o),xl=i(Tn,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Tn.forEach(o),Ml=d(wn),C=s(wn,"DIV",{class:!0});var ce=r(C);_(lo.$$.fragment,ce),jl=d(ce),qe=s(ce,"P",{});var yn=r(qe);Cl=i(yn,"The "),an=s(yn,"A",{href:!0});var Xp=r(an);Sl=i(Xp,"FNetForMaskedLM"),Xp.forEach(o),Al=i(yn," forward method, overrides the "),gs=s(yn,"CODE",{});var Yp=r(gs);Ll=i(Yp,"__call__"),Yp.forEach(o),Il=i(yn," special method."),yn.forEach(o),Ol=d(ce),_(Ye.$$.fragment,ce),Dl=d(ce),_s=s(ce,"P",{});var Zp=r(_s);Bl=i(Zp,"Example:"),Zp.forEach(o),Wl=d(ce),_(co.$$.fragment,ce),ce.forEach(o),wn.forEach(o),ur=d(t),Ee=s(t,"H2",{class:!0});var Dr=r(Ee);Ze=s(Dr,"A",{id:!0,class:!0,href:!0});var eh=r(Ze);ks=s(eh,"SPAN",{});var th=r(ks);_(po.$$.fragment,th),th.forEach(o),eh.forEach(o),Ul=d(Dr),bs=s(Dr,"SPAN",{});var oh=r(bs);Hl=i(oh,"FNetForNextSentencePrediction"),oh.forEach(o),Dr.forEach(o),gr=d(t),te=s(t,"DIV",{class:!0});var Nn=r(te);_(ho.$$.fragment,Nn),Ql=d(Nn),Pe=s(Nn,"P",{});var $n=r(Pe);Vl=i($n,"FNet Model with a "),Fs=s($n,"CODE",{});var nh=r(Fs);Rl=i(nh,"next sentence prediction (classification)"),nh.forEach(o),Gl=i($n,` head on top. This model is a PyTorch `),fo=s($n,"A",{href:!0,rel:!0});var sh=r(fo);Jl=i(sh,"torch.nn.Module"),sh.forEach(o),Kl=i($n,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$n.forEach(o),Xl=d(Nn),S=s(Nn,"DIV",{class:!0});var de=r(S);_(mo.$$.fragment,de),Yl=d(de),xe=s(de,"P",{});var zn=r(xe);Zl=i(zn,"The "),ln=s(zn,"A",{href:!0});var rh=r(ln);ec=i(rh,"FNetForNextSentencePrediction"),rh.forEach(o),tc=i(zn," forward method, overrides the "),vs=s(zn,"CODE",{});var ah=r(vs);oc=i(ah,"__call__"),ah.forEach(o),nc=i(zn," special method."),zn.forEach(o),sc=d(de),_(et.$$.fragment,de),rc=d(de),ws=s(de,"P",{});var ih=r(ws);ac=i(ih,"Example:"),ih.forEach(o),ic=d(de),_(uo.$$.fragment,de),de.forEach(o),Nn.forEach(o),_r=d(t),Me=s(t,"H2",{class:!0});var Br=r(Me);tt=s(Br,"A",{id:!0,class:!0,href:!0});var lh=r(tt);Ts=s(lh,"SPAN",{});var ch=r(Ts);_(go.$$.fragment,ch),ch.forEach(o),lh.forEach(o),lc=d(Br),ys=s(Br,"SPAN",{});var dh=r(ys);cc=i(dh,"FNetForSequenceClassification"),dh.forEach(o),Br.forEach(o),kr=d(t),H=s(t,"DIV",{class:!0});var ut=r(H);_(_o.$$.fragment,ut),dc=d(ut),Ns=s(ut,"P",{});var ph=r(Ns);pc=i(ph,`FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),ph.forEach(o),hc=d(ut),ko=s(ut,"P",{});var Wr=r(ko);fc=i(Wr,"This model is a PyTorch "),bo=s(Wr,"A",{href:!0,rel:!0});var hh=r(bo);mc=i(hh,"torch.nn.Module"),hh.forEach(o),uc=i(Wr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wr.forEach(o),gc=d(ut),E=s(ut,"DIV",{class:!0});var O=r(E);_(Fo.$$.fragment,O),_c=d(O),je=s(O,"P",{});var qn=r(je);kc=i(qn,"The "),cn=s(qn,"A",{href:!0});var fh=r(cn);bc=i(fh,"FNetForSequenceClassification"),fh.forEach(o),Fc=i(qn," forward method, overrides the "),$s=s(qn,"CODE",{});var mh=r($s);vc=i(mh,"__call__"),mh.forEach(o),wc=i(qn," special method."),qn.forEach(o),Tc=d(O),_(ot.$$.fragment,O),yc=d(O),zs=s(O,"P",{});var uh=r(zs);Nc=i(uh,"Example of single-label classification:"),uh.forEach(o),$c=d(O),_(vo.$$.fragment,O),zc=d(O),qs=s(O,"P",{});var gh=r(qs);qc=i(gh,"Example of multi-label classification:"),gh.forEach(o),Ec=d(O),_(wo.$$.fragment,O),O.forEach(o),ut.forEach(o),br=d(t),Ce=s(t,"H2",{class:!0});var Ur=r(Ce);nt=s(Ur,"A",{id:!0,class:!0,href:!0});var _h=r(nt);Es=s(_h,"SPAN",{});var kh=r(Es);_(To.$$.fragment,kh),kh.forEach(o),_h.forEach(o),Pc=d(Ur),Ps=s(Ur,"SPAN",{});var bh=r(Ps);xc=i(bh,"FNetForMultipleChoice"),bh.forEach(o),Ur.forEach(o),Fr=d(t),Q=s(t,"DIV",{class:!0});var gt=r(Q);_(yo.$$.fragment,gt),Mc=d(gt),xs=s(gt,"P",{});var Fh=r(xs);jc=i(Fh,`FNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Fh.forEach(o),Cc=d(gt),No=s(gt,"P",{});var Hr=r(No);Sc=i(Hr,"This model is a PyTorch "),$o=s(Hr,"A",{href:!0,rel:!0});var vh=r($o);Ac=i(vh,"torch.nn.Module"),vh.forEach(o),Lc=i(Hr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Hr.forEach(o),Ic=d(gt),A=s(gt,"DIV",{class:!0});var pe=r(A);_(zo.$$.fragment,pe),Oc=d(pe),Se=s(pe,"P",{});var En=r(Se);Dc=i(En,"The "),dn=s(En,"A",{href:!0});var wh=r(dn);Bc=i(wh,"FNetForMultipleChoice"),wh.forEach(o),Wc=i(En," forward method, overrides the "),Ms=s(En,"CODE",{});var Th=r(Ms);Uc=i(Th,"__call__"),Th.forEach(o),Hc=i(En," special method."),En.forEach(o),Qc=d(pe),_(st.$$.fragment,pe),Vc=d(pe),js=s(pe,"P",{});var yh=r(js);Rc=i(yh,"Example:"),yh.forEach(o),Gc=d(pe),_(qo.$$.fragment,pe),pe.forEach(o),gt.forEach(o),vr=d(t),Ae=s(t,"H2",{class:!0});var Qr=r(Ae);rt=s(Qr,"A",{id:!0,class:!0,href:!0});var Nh=r(rt);Cs=s(Nh,"SPAN",{});var $h=r(Cs);_(Eo.$$.fragment,$h),$h.forEach(o),Nh.forEach(o),Jc=d(Qr),Ss=s(Qr,"SPAN",{});var zh=r(Ss);Kc=i(zh,"FNetForTokenClassification"),zh.forEach(o),Qr.forEach(o),wr=d(t),V=s(t,"DIV",{class:!0});var _t=r(V);_(Po.$$.fragment,_t),Xc=d(_t),As=s(_t,"P",{});var qh=r(As);Yc=i(qh,`FNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),qh.forEach(o),Zc=d(_t),xo=s(_t,"P",{});var Vr=r(xo);ed=i(Vr,"This model is a PyTorch "),Mo=s(Vr,"A",{href:!0,rel:!0});var Eh=r(Mo);td=i(Eh,"torch.nn.Module"),Eh.forEach(o),od=i(Vr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vr.forEach(o),nd=d(_t),L=s(_t,"DIV",{class:!0});var he=r(L);_(jo.$$.fragment,he),sd=d(he),Le=s(he,"P",{});var Pn=r(Le);rd=i(Pn,"The "),pn=s(Pn,"A",{href:!0});var Ph=r(pn);ad=i(Ph,"FNetForTokenClassification"),Ph.forEach(o),id=i(Pn," forward method, overrides the "),Ls=s(Pn,"CODE",{});var xh=r(Ls);ld=i(xh,"__call__"),xh.forEach(o),cd=i(Pn," special method."),Pn.forEach(o),dd=d(he),_(at.$$.fragment,he),pd=d(he),Is=s(he,"P",{});var Mh=r(Is);hd=i(Mh,"Example:"),Mh.forEach(o),fd=d(he),_(Co.$$.fragment,he),he.forEach(o),_t.forEach(o),Tr=d(t),Ie=s(t,"H2",{class:!0});var Rr=r(Ie);it=s(Rr,"A",{id:!0,class:!0,href:!0});var jh=r(it);Os=s(jh,"SPAN",{});var Ch=r(Os);_(So.$$.fragment,Ch),Ch.forEach(o),jh.forEach(o),md=d(Rr),Ds=s(Rr,"SPAN",{});var Sh=r(Ds);ud=i(Sh,"FNetForQuestionAnswering"),Sh.forEach(o),Rr.forEach(o),yr=d(t),R=s(t,"DIV",{class:!0});var kt=r(R);_(Ao.$$.fragment,kt),gd=d(kt),Oe=s(kt,"P",{});var xn=r(Oe);_d=i(xn,`FNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Bs=s(xn,"CODE",{});var Ah=r(Bs);kd=i(Ah,"span start logits"),Ah.forEach(o),bd=i(xn," and "),Ws=s(xn,"CODE",{});var Lh=r(Ws);Fd=i(Lh,"span end logits"),Lh.forEach(o),vd=i(xn,")."),xn.forEach(o),wd=d(kt),Lo=s(kt,"P",{});var Gr=r(Lo);Td=i(Gr,"This model is a PyTorch "),Io=s(Gr,"A",{href:!0,rel:!0});var Ih=r(Io);yd=i(Ih,"torch.nn.Module"),Ih.forEach(o),Nd=i(Gr,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gr.forEach(o),$d=d(kt),I=s(kt,"DIV",{class:!0});var fe=r(I);_(Oo.$$.fragment,fe),zd=d(fe),De=s(fe,"P",{});var Mn=r(De);qd=i(Mn,"The "),hn=s(Mn,"A",{href:!0});var Oh=r(hn);Ed=i(Oh,"FNetForQuestionAnswering"),Oh.forEach(o),Pd=i(Mn," forward method, overrides the "),Us=s(Mn,"CODE",{});var Dh=r(Us);xd=i(Dh,"__call__"),Dh.forEach(o),Md=i(Mn," special method."),Mn.forEach(o),jd=d(fe),_(lt.$$.fragment,fe),Cd=d(fe),Hs=s(fe,"P",{});var Bh=r(Hs);Sd=i(Bh,"Example:"),Bh.forEach(o),Ad=d(fe),_(Do.$$.fragment,fe),fe.forEach(o),kt.forEach(o),this.h()},h(){l(p,"name","hf:doc:metadata"),l(p,"content",JSON.stringify(tf)),l(T,"id","fnet"),l(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(T,"href","#fnet"),l(m,"class","relative group"),l(Be,"id","overview"),l(Be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Be,"href","#overview"),l(me,"class","relative group"),l(vt,"href","https://arxiv.org/abs/2105.03824"),l(vt,"rel","nofollow"),l(wt,"href","https://huggingface.co/gchhablani"),l(wt,"rel","nofollow"),l(Tt,"href","https://github.com/google-research/google-research/tree/master/f_net"),l(Tt,"rel","nofollow"),l(Ue,"id","transformers.FNetConfig"),l(Ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ue,"href","#transformers.FNetConfig"),l(ue,"class","relative group"),l(Vo,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetModel"),l($t,"href","https://huggingface.co/google/fnet-base"),l($t,"rel","nofollow"),l(Ro,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(Go,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(x,"class","docstring"),l(He,"id","transformers.FNetTokenizer"),l(He,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(He,"href","#transformers.FNetTokenizer"),l(ke,"class","relative group"),l(Jo,"href","/docs/transformers/pr_16143/en/model_doc/albert#transformers.AlbertTokenizer"),l(Pt,"href","https://github.com/google/sentencepiece"),l(Pt,"rel","nofollow"),l(Ko,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(ne,"class","docstring"),l(Qe,"class","docstring"),l(J,"class","docstring"),l(Zo,"class","docstring"),l(P,"class","docstring"),l(Ve,"id","transformers.FNetTokenizerFast"),l(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ve,"href","#transformers.FNetTokenizerFast"),l(Fe,"class","relative group"),l(en,"href","/docs/transformers/pr_16143/en/model_doc/albert#transformers.AlbertTokenizerFast"),l(Dt,"href","https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models"),l(Dt,"rel","nofollow"),l(tn,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(se,"class","docstring"),l(K,"class","docstring"),l(D,"class","docstring"),l(Re,"id","transformers.FNetModel"),l(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Re,"href","#transformers.FNetModel"),l(ve,"class","relative group"),l(Gt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Gt,"rel","nofollow"),l(Kt,"href","https://arxiv.org/abs/2105.03824"),l(Kt,"rel","nofollow"),l(sn,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetModel"),l(M,"class","docstring"),l(W,"class","docstring"),l(Je,"id","transformers.FNetForPreTraining"),l(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Je,"href","#transformers.FNetForPreTraining"),l(Te,"class","relative group"),l(oo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(oo,"rel","nofollow"),l(rn,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetForPreTraining"),l(j,"class","docstring"),l(U,"class","docstring"),l(Xe,"id","transformers.FNetForMaskedLM"),l(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Xe,"href","#transformers.FNetForMaskedLM"),l($e,"class","relative group"),l(io,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(io,"rel","nofollow"),l(an,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetForMaskedLM"),l(C,"class","docstring"),l(ee,"class","docstring"),l(Ze,"id","transformers.FNetForNextSentencePrediction"),l(Ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ze,"href","#transformers.FNetForNextSentencePrediction"),l(Ee,"class","relative group"),l(fo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(fo,"rel","nofollow"),l(ln,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetForNextSentencePrediction"),l(S,"class","docstring"),l(te,"class","docstring"),l(tt,"id","transformers.FNetForSequenceClassification"),l(tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(tt,"href","#transformers.FNetForSequenceClassification"),l(Me,"class","relative group"),l(bo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(bo,"rel","nofollow"),l(cn,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetForSequenceClassification"),l(E,"class","docstring"),l(H,"class","docstring"),l(nt,"id","transformers.FNetForMultipleChoice"),l(nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(nt,"href","#transformers.FNetForMultipleChoice"),l(Ce,"class","relative group"),l($o,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l($o,"rel","nofollow"),l(dn,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetForMultipleChoice"),l(A,"class","docstring"),l(Q,"class","docstring"),l(rt,"id","transformers.FNetForTokenClassification"),l(rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(rt,"href","#transformers.FNetForTokenClassification"),l(Ae,"class","relative group"),l(Mo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Mo,"rel","nofollow"),l(pn,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetForTokenClassification"),l(L,"class","docstring"),l(V,"class","docstring"),l(it,"id","transformers.FNetForQuestionAnswering"),l(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(it,"href","#transformers.FNetForQuestionAnswering"),l(Ie,"class","relative group"),l(Io,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Io,"rel","nofollow"),l(hn,"href","/docs/transformers/pr_16143/en/model_doc/fnet#transformers.FNetForQuestionAnswering"),l(I,"class","docstring"),l(R,"class","docstring")},m(t,h){e(document.head,p),f(t,y,h),f(t,m,h),e(m,T),e(T,N),k(w,N,null),e(m,u),e(m,$),e($,Jr),f(t,Xs,h),f(t,me,h),e(me,Be),e(Be,jn),k(Ft,jn,null),e(me,Kr),e(me,Cn),e(Cn,Xr),f(t,Ys,h),f(t,We,h),e(We,Yr),e(We,vt),e(vt,Zr),e(We,ea),f(t,Zs,h),f(t,Uo,h),e(Uo,Sn),e(Sn,ta),f(t,er,h),f(t,Ho,h),e(Ho,oa),f(t,tr,h),f(t,Qo,h),e(Qo,An),e(An,na),f(t,or,h),f(t,oe,h),e(oe,sa),e(oe,wt),e(wt,ra),e(oe,aa),e(oe,Tt),e(Tt,ia),e(oe,la),f(t,nr,h),f(t,ue,h),e(ue,Ue),e(Ue,Ln),k(yt,Ln,null),e(ue,ca),e(ue,In),e(In,da),f(t,sr,h),f(t,x,h),k(Nt,x,null),e(x,pa),e(x,ge),e(ge,ha),e(ge,Vo),e(Vo,fa),e(ge,ma),e(ge,$t),e($t,ua),e(ge,ga),e(x,_a),e(x,_e),e(_e,ka),e(_e,Ro),e(Ro,ba),e(_e,Fa),e(_e,Go),e(Go,va),e(_e,wa),e(x,Ta),e(x,On),e(On,ya),e(x,Na),k(zt,x,null),f(t,rr,h),f(t,ke,h),e(ke,He),e(He,Dn),k(qt,Dn,null),e(ke,$a),e(ke,Bn),e(Bn,za),f(t,ar,h),f(t,P,h),k(Et,P,null),e(P,qa),e(P,Z),e(Z,Ea),e(Z,Jo),e(Jo,Pa),e(Z,xa),e(Z,Pt),e(Pt,Ma),e(Z,ja),e(Z,Ko),e(Ko,Ca),e(Z,Sa),e(P,Aa),e(P,ne),k(xt,ne,null),e(ne,La),e(ne,Wn),e(Wn,Ia),e(ne,Oa),e(ne,Mt),e(Mt,Xo),e(Xo,Da),e(Xo,Un),e(Un,Ba),e(Mt,Wa),e(Mt,Yo),e(Yo,Ua),e(Yo,Hn),e(Hn,Ha),e(P,Qa),e(P,Qe),k(jt,Qe,null),e(Qe,Va),e(Qe,Ct),e(Ct,Ra),e(Ct,Qn),e(Qn,Ga),e(Ct,Ja),e(P,Ka),e(P,J),k(St,J,null),e(J,Xa),e(J,Vn),e(Vn,Ya),e(J,Za),k(At,J,null),e(J,ei),e(J,be),e(be,ti),e(be,Rn),e(Rn,oi),e(be,ni),e(be,Gn),e(Gn,si),e(be,ri),e(P,ai),e(P,Zo),k(Lt,Zo,null),f(t,ir,h),f(t,Fe,h),e(Fe,Ve),e(Ve,Jn),k(It,Jn,null),e(Fe,ii),e(Fe,Kn),e(Kn,li),f(t,lr,h),f(t,D,h),k(Ot,D,null),e(D,ci),e(D,B),e(B,di),e(B,Xn),e(Xn,pi),e(B,hi),e(B,en),e(en,fi),e(B,mi),e(B,Dt),e(Dt,ui),e(B,gi),e(B,tn),e(tn,_i),e(B,ki),e(D,bi),e(D,se),k(Bt,se,null),e(se,Fi),e(se,Yn),e(Yn,vi),e(se,wi),e(se,Wt),e(Wt,on),e(on,Ti),e(on,Zn),e(Zn,yi),e(Wt,Ni),e(Wt,nn),e(nn,$i),e(nn,es),e(es,zi),e(D,qi),e(D,K),k(Ut,K,null),e(K,Ei),e(K,ts),e(ts,Pi),e(K,xi),k(Ht,K,null),e(K,Mi),e(K,os),e(os,ji),f(t,cr,h),f(t,ve,h),e(ve,Re),e(Re,ns),k(Qt,ns,null),e(ve,Ci),e(ve,ss),e(ss,Si),f(t,dr,h),f(t,W,h),k(Vt,W,null),e(W,Ai),e(W,Rt),e(Rt,Li),e(Rt,Gt),e(Gt,Ii),e(Rt,Oi),e(W,Di),e(W,Jt),e(Jt,Bi),e(Jt,Kt),e(Kt,Wi),e(Jt,Ui),e(W,Hi),e(W,M),k(Xt,M,null),e(M,Qi),e(M,we),e(we,Vi),e(we,sn),e(sn,Ri),e(we,Gi),e(we,rs),e(rs,Ji),e(we,Ki),e(M,Xi),k(Ge,M,null),e(M,Yi),e(M,as),e(as,Zi),e(M,el),k(Yt,M,null),f(t,pr,h),f(t,Te,h),e(Te,Je),e(Je,is),k(Zt,is,null),e(Te,tl),e(Te,ls),e(ls,ol),f(t,hr,h),f(t,U,h),k(eo,U,null),e(U,nl),e(U,ye),e(ye,sl),e(ye,cs),e(cs,rl),e(ye,al),e(ye,ds),e(ds,il),e(ye,ll),e(U,cl),e(U,to),e(to,dl),e(to,oo),e(oo,pl),e(to,hl),e(U,fl),e(U,j),k(no,j,null),e(j,ml),e(j,Ne),e(Ne,ul),e(Ne,rn),e(rn,gl),e(Ne,_l),e(Ne,ps),e(ps,kl),e(Ne,bl),e(j,Fl),k(Ke,j,null),e(j,vl),e(j,hs),e(hs,wl),e(j,Tl),k(so,j,null),f(t,fr,h),f(t,$e,h),e($e,Xe),e(Xe,fs),k(ro,fs,null),e($e,yl),e($e,ms),e(ms,Nl),f(t,mr,h),f(t,ee,h),k(ao,ee,null),e(ee,$l),e(ee,ze),e(ze,zl),e(ze,us),e(us,ql),e(ze,El),e(ze,io),e(io,Pl),e(ze,xl),e(ee,Ml),e(ee,C),k(lo,C,null),e(C,jl),e(C,qe),e(qe,Cl),e(qe,an),e(an,Sl),e(qe,Al),e(qe,gs),e(gs,Ll),e(qe,Il),e(C,Ol),k(Ye,C,null),e(C,Dl),e(C,_s),e(_s,Bl),e(C,Wl),k(co,C,null),f(t,ur,h),f(t,Ee,h),e(Ee,Ze),e(Ze,ks),k(po,ks,null),e(Ee,Ul),e(Ee,bs),e(bs,Hl),f(t,gr,h),f(t,te,h),k(ho,te,null),e(te,Ql),e(te,Pe),e(Pe,Vl),e(Pe,Fs),e(Fs,Rl),e(Pe,Gl),e(Pe,fo),e(fo,Jl),e(Pe,Kl),e(te,Xl),e(te,S),k(mo,S,null),e(S,Yl),e(S,xe),e(xe,Zl),e(xe,ln),e(ln,ec),e(xe,tc),e(xe,vs),e(vs,oc),e(xe,nc),e(S,sc),k(et,S,null),e(S,rc),e(S,ws),e(ws,ac),e(S,ic),k(uo,S,null),f(t,_r,h),f(t,Me,h),e(Me,tt),e(tt,Ts),k(go,Ts,null),e(Me,lc),e(Me,ys),e(ys,cc),f(t,kr,h),f(t,H,h),k(_o,H,null),e(H,dc),e(H,Ns),e(Ns,pc),e(H,hc),e(H,ko),e(ko,fc),e(ko,bo),e(bo,mc),e(ko,uc),e(H,gc),e(H,E),k(Fo,E,null),e(E,_c),e(E,je),e(je,kc),e(je,cn),e(cn,bc),e(je,Fc),e(je,$s),e($s,vc),e(je,wc),e(E,Tc),k(ot,E,null),e(E,yc),e(E,zs),e(zs,Nc),e(E,$c),k(vo,E,null),e(E,zc),e(E,qs),e(qs,qc),e(E,Ec),k(wo,E,null),f(t,br,h),f(t,Ce,h),e(Ce,nt),e(nt,Es),k(To,Es,null),e(Ce,Pc),e(Ce,Ps),e(Ps,xc),f(t,Fr,h),f(t,Q,h),k(yo,Q,null),e(Q,Mc),e(Q,xs),e(xs,jc),e(Q,Cc),e(Q,No),e(No,Sc),e(No,$o),e($o,Ac),e(No,Lc),e(Q,Ic),e(Q,A),k(zo,A,null),e(A,Oc),e(A,Se),e(Se,Dc),e(Se,dn),e(dn,Bc),e(Se,Wc),e(Se,Ms),e(Ms,Uc),e(Se,Hc),e(A,Qc),k(st,A,null),e(A,Vc),e(A,js),e(js,Rc),e(A,Gc),k(qo,A,null),f(t,vr,h),f(t,Ae,h),e(Ae,rt),e(rt,Cs),k(Eo,Cs,null),e(Ae,Jc),e(Ae,Ss),e(Ss,Kc),f(t,wr,h),f(t,V,h),k(Po,V,null),e(V,Xc),e(V,As),e(As,Yc),e(V,Zc),e(V,xo),e(xo,ed),e(xo,Mo),e(Mo,td),e(xo,od),e(V,nd),e(V,L),k(jo,L,null),e(L,sd),e(L,Le),e(Le,rd),e(Le,pn),e(pn,ad),e(Le,id),e(Le,Ls),e(Ls,ld),e(Le,cd),e(L,dd),k(at,L,null),e(L,pd),e(L,Is),e(Is,hd),e(L,fd),k(Co,L,null),f(t,Tr,h),f(t,Ie,h),e(Ie,it),e(it,Os),k(So,Os,null),e(Ie,md),e(Ie,Ds),e(Ds,ud),f(t,yr,h),f(t,R,h),k(Ao,R,null),e(R,gd),e(R,Oe),e(Oe,_d),e(Oe,Bs),e(Bs,kd),e(Oe,bd),e(Oe,Ws),e(Ws,Fd),e(Oe,vd),e(R,wd),e(R,Lo),e(Lo,Td),e(Lo,Io),e(Io,yd),e(Lo,Nd),e(R,$d),e(R,I),k(Oo,I,null),e(I,zd),e(I,De),e(De,qd),e(De,hn),e(hn,Ed),e(De,Pd),e(De,Us),e(Us,xd),e(De,Md),e(I,jd),k(lt,I,null),e(I,Cd),e(I,Hs),e(Hs,Sd),e(I,Ad),k(Do,I,null),Nr=!0},p(t,[h]){const Bo={};h&2&&(Bo.$$scope={dirty:h,ctx:t}),Ge.$set(Bo);const Qs={};h&2&&(Qs.$$scope={dirty:h,ctx:t}),Ke.$set(Qs);const Vs={};h&2&&(Vs.$$scope={dirty:h,ctx:t}),Ye.$set(Vs);const Rs={};h&2&&(Rs.$$scope={dirty:h,ctx:t}),et.$set(Rs);const Wo={};h&2&&(Wo.$$scope={dirty:h,ctx:t}),ot.$set(Wo);const Gs={};h&2&&(Gs.$$scope={dirty:h,ctx:t}),st.$set(Gs);const Js={};h&2&&(Js.$$scope={dirty:h,ctx:t}),at.$set(Js);const Ks={};h&2&&(Ks.$$scope={dirty:h,ctx:t}),lt.$set(Ks)},i(t){Nr||(b(w.$$.fragment,t),b(Ft.$$.fragment,t),b(yt.$$.fragment,t),b(Nt.$$.fragment,t),b(zt.$$.fragment,t),b(qt.$$.fragment,t),b(Et.$$.fragment,t),b(xt.$$.fragment,t),b(jt.$$.fragment,t),b(St.$$.fragment,t),b(At.$$.fragment,t),b(Lt.$$.fragment,t),b(It.$$.fragment,t),b(Ot.$$.fragment,t),b(Bt.$$.fragment,t),b(Ut.$$.fragment,t),b(Ht.$$.fragment,t),b(Qt.$$.fragment,t),b(Vt.$$.fragment,t),b(Xt.$$.fragment,t),b(Ge.$$.fragment,t),b(Yt.$$.fragment,t),b(Zt.$$.fragment,t),b(eo.$$.fragment,t),b(no.$$.fragment,t),b(Ke.$$.fragment,t),b(so.$$.fragment,t),b(ro.$$.fragment,t),b(ao.$$.fragment,t),b(lo.$$.fragment,t),b(Ye.$$.fragment,t),b(co.$$.fragment,t),b(po.$$.fragment,t),b(ho.$$.fragment,t),b(mo.$$.fragment,t),b(et.$$.fragment,t),b(uo.$$.fragment,t),b(go.$$.fragment,t),b(_o.$$.fragment,t),b(Fo.$$.fragment,t),b(ot.$$.fragment,t),b(vo.$$.fragment,t),b(wo.$$.fragment,t),b(To.$$.fragment,t),b(yo.$$.fragment,t),b(zo.$$.fragment,t),b(st.$$.fragment,t),b(qo.$$.fragment,t),b(Eo.$$.fragment,t),b(Po.$$.fragment,t),b(jo.$$.fragment,t),b(at.$$.fragment,t),b(Co.$$.fragment,t),b(So.$$.fragment,t),b(Ao.$$.fragment,t),b(Oo.$$.fragment,t),b(lt.$$.fragment,t),b(Do.$$.fragment,t),Nr=!0)},o(t){F(w.$$.fragment,t),F(Ft.$$.fragment,t),F(yt.$$.fragment,t),F(Nt.$$.fragment,t),F(zt.$$.fragment,t),F(qt.$$.fragment,t),F(Et.$$.fragment,t),F(xt.$$.fragment,t),F(jt.$$.fragment,t),F(St.$$.fragment,t),F(At.$$.fragment,t),F(Lt.$$.fragment,t),F(It.$$.fragment,t),F(Ot.$$.fragment,t),F(Bt.$$.fragment,t),F(Ut.$$.fragment,t),F(Ht.$$.fragment,t),F(Qt.$$.fragment,t),F(Vt.$$.fragment,t),F(Xt.$$.fragment,t),F(Ge.$$.fragment,t),F(Yt.$$.fragment,t),F(Zt.$$.fragment,t),F(eo.$$.fragment,t),F(no.$$.fragment,t),F(Ke.$$.fragment,t),F(so.$$.fragment,t),F(ro.$$.fragment,t),F(ao.$$.fragment,t),F(lo.$$.fragment,t),F(Ye.$$.fragment,t),F(co.$$.fragment,t),F(po.$$.fragment,t),F(ho.$$.fragment,t),F(mo.$$.fragment,t),F(et.$$.fragment,t),F(uo.$$.fragment,t),F(go.$$.fragment,t),F(_o.$$.fragment,t),F(Fo.$$.fragment,t),F(ot.$$.fragment,t),F(vo.$$.fragment,t),F(wo.$$.fragment,t),F(To.$$.fragment,t),F(yo.$$.fragment,t),F(zo.$$.fragment,t),F(st.$$.fragment,t),F(qo.$$.fragment,t),F(Eo.$$.fragment,t),F(Po.$$.fragment,t),F(jo.$$.fragment,t),F(at.$$.fragment,t),F(Co.$$.fragment,t),F(So.$$.fragment,t),F(Ao.$$.fragment,t),F(Oo.$$.fragment,t),F(lt.$$.fragment,t),F(Do.$$.fragment,t),Nr=!1},d(t){o(p),t&&o(y),t&&o(m),v(w),t&&o(Xs),t&&o(me),v(Ft),t&&o(Ys),t&&o(We),t&&o(Zs),t&&o(Uo),t&&o(er),t&&o(Ho),t&&o(tr),t&&o(Qo),t&&o(or),t&&o(oe),t&&o(nr),t&&o(ue),v(yt),t&&o(sr),t&&o(x),v(Nt),v(zt),t&&o(rr),t&&o(ke),v(qt),t&&o(ar),t&&o(P),v(Et),v(xt),v(jt),v(St),v(At),v(Lt),t&&o(ir),t&&o(Fe),v(It),t&&o(lr),t&&o(D),v(Ot),v(Bt),v(Ut),v(Ht),t&&o(cr),t&&o(ve),v(Qt),t&&o(dr),t&&o(W),v(Vt),v(Xt),v(Ge),v(Yt),t&&o(pr),t&&o(Te),v(Zt),t&&o(hr),t&&o(U),v(eo),v(no),v(Ke),v(so),t&&o(fr),t&&o($e),v(ro),t&&o(mr),t&&o(ee),v(ao),v(lo),v(Ye),v(co),t&&o(ur),t&&o(Ee),v(po),t&&o(gr),t&&o(te),v(ho),v(mo),v(et),v(uo),t&&o(_r),t&&o(Me),v(go),t&&o(kr),t&&o(H),v(_o),v(Fo),v(ot),v(vo),v(wo),t&&o(br),t&&o(Ce),v(To),t&&o(Fr),t&&o(Q),v(yo),v(zo),v(st),v(qo),t&&o(vr),t&&o(Ae),v(Eo),t&&o(wr),t&&o(V),v(Po),v(jo),v(at),v(Co),t&&o(Tr),t&&o(Ie),v(So),t&&o(yr),t&&o(R),v(Ao),v(Oo),v(lt),v(Do)}}}const tf={local:"fnet",sections:[{local:"overview",title:"Overview"},{local:"transformers.FNetConfig",title:"FNetConfig"},{local:"transformers.FNetTokenizer",title:"FNetTokenizer"},{local:"transformers.FNetTokenizerFast",title:"FNetTokenizerFast"},{local:"transformers.FNetModel",title:"FNetModel"},{local:"transformers.FNetForPreTraining",title:"FNetForPreTraining"},{local:"transformers.FNetForMaskedLM",title:"FNetForMaskedLM"},{local:"transformers.FNetForNextSentencePrediction",title:"FNetForNextSentencePrediction"},{local:"transformers.FNetForSequenceClassification",title:"FNetForSequenceClassification"},{local:"transformers.FNetForMultipleChoice",title:"FNetForMultipleChoice"},{local:"transformers.FNetForTokenClassification",title:"FNetForTokenClassification"},{local:"transformers.FNetForQuestionAnswering",title:"FNetForQuestionAnswering"}],title:"FNet"};function of(q,p,y){let{fw:m}=p;return q.$$set=T=>{"fw"in T&&y(0,m=T.fw)},[m]}class df extends Wh{constructor(p){super();Uh(this,p,of,ef,Hh,{fw:0})}}export{df as default,tf as metadata};
283
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/squeezebert.mdx-a1f791b4.js
import{S as Am,i as Lm,s as Nm,e as r,k as l,w as m,t as n,M as Im,c as a,d as s,m as d,a as i,x as f,h as o,b as c,F as e,g as u,y as g,q as _,o as z,B as b}from"../../chunks/vendor-4833417e.js";import{T as $n}from"../../chunks/Tip-fffd6df1.js";import{D as O}from"../../chunks/Docstring-4f315ed9.js";import{C as D}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as te}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Dm(H){let h,w,q,y,T;return{c(){h=r("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),y=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=a(v,"P",{});var k=i(h);w=o(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(k,"CODE",{});var F=i(q);y=o(F,"Module"),F.forEach(s),T=o(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(s)},m(v,k){u(v,h,k),e(h,w),e(h,q),e(q,y),e(h,T)},d(v){v&&s(h)}}}function Om(H){let h,w,q,y,T;return{c(){h=r("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),y=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=a(v,"P",{});var k=i(h);w=o(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(k,"CODE",{});var F=i(q);y=o(F,"Module"),F.forEach(s),T=o(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(s)},m(v,k){u(v,h,k),e(h,w),e(h,q),e(q,y),e(h,T)},d(v){v&&s(h)}}}function Rm(H){let h,w,q,y,T;return{c(){h=r("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),y=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=a(v,"P",{});var k=i(h);w=o(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(k,"CODE",{});var F=i(q);y=o(F,"Module"),F.forEach(s),T=o(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(s)},m(v,k){u(v,h,k),e(h,w),e(h,q),e(q,y),e(h,T)},d(v){v&&s(h)}}}function Wm(H){let h,w,q,y,T;return{c(){h=r("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),y=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=a(v,"P",{});var k=i(h);w=o(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(k,"CODE",{});var F=i(q);y=o(F,"Module"),F.forEach(s),T=o(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(s)},m(v,k){u(v,h,k),e(h,w),e(h,q),e(q,y),e(h,T)},d(v){v&&s(h)}}}function Km(H){let h,w,q,y,T;return{c(){h=r("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),y=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=a(v,"P",{});var k=i(h);w=o(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(k,"CODE",{});var F=i(q);y=o(F,"Module"),F.forEach(s),T=o(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(s)},m(v,k){u(v,h,k),e(h,w),e(h,q),e(q,y),e(h,T)},d(v){v&&s(h)}}}function Hm(H){let h,w,q,y,T;return{c(){h=r("p"),w=n("Although the recipe for forward pass needs to be defined within this function, one should call the "),q=r("code"),y=n("Module"),T=n(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(v){h=a(v,"P",{});var k=i(h);w=o(k,"Although the recipe for forward pass needs to be defined within this function, one should call the "),q=a(k,"CODE",{});var F=i(q);y=o(F,"Module"),F.forEach(s),T=o(k,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),k.forEach(s)},m(v,k){u(v,h,k),e(h,w),e(h,q),e(q,y),e(h,T)},d(v){v&&s(h)}}}function Qm(H){let h,w,q,y,T,v,k,F,za,nr,ce,Ee,En,Xe,ba,jn,qa,or,se,ka,Ye,va,ya,Ze,wa,Ta,rr,Rs,Sa,ar,Ws,Mn,Ba,ir,Ks,$a,lr,ne,Fn,Ea,ja,Cn,Ma,Fa,et,Ca,Pn,Pa,xa,dr,je,Aa,tt,La,Na,cr,pe,Me,xn,st,Ia,An,Da,pr,K,nt,Oa,ot,Ra,Hs,Wa,Ka,Ha,he,Qa,Qs,Ua,Va,Us,Ga,Ja,Xa,Ln,Ya,Za,rt,ei,Nn,ti,hr,ue,Fe,In,at,si,Dn,ni,ur,C,it,oi,On,ri,ai,Ce,Vs,ii,li,Gs,di,ci,pi,Rn,Wn,hi,ui,lt,mi,Js,fi,gi,_i,oe,dt,zi,Kn,bi,qi,ct,Xs,ki,Hn,vi,yi,Ys,wi,Qn,Ti,Si,Pe,pt,Bi,ht,$i,Un,Ei,ji,Mi,Z,ut,Fi,Vn,Ci,Pi,mt,xi,me,Ai,Gn,Li,Ni,Jn,Ii,Di,Oi,Zs,ft,mr,fe,xe,Xn,gt,Ri,Yn,Wi,fr,Y,_t,Ki,zt,Hi,Zn,Qi,Ui,Vi,Ae,en,Gi,Ji,tn,Xi,Yi,Zi,bt,el,sn,tl,sl,gr,ge,Le,eo,qt,nl,to,ol,_r,S,kt,rl,so,al,il,vt,ll,yt,dl,cl,pl,wt,hl,nn,ul,ml,fl,Tt,gl,St,_l,zl,bl,Bt,ql,no,kl,vl,yl,oo,wl,Tl,$t,Sl,ro,Bl,$l,Et,El,Q,jt,jl,_e,Ml,on,Fl,Cl,ao,Pl,xl,Al,Ne,Ll,io,Nl,Il,Mt,zr,ze,Ie,lo,Ft,Dl,co,Ol,br,B,Ct,Rl,Pt,Wl,po,Kl,Hl,Ql,xt,Ul,At,Vl,Gl,Jl,Lt,Xl,rn,Yl,Zl,ed,Nt,td,It,sd,nd,od,Dt,rd,ho,ad,id,ld,uo,dd,cd,Ot,pd,mo,hd,ud,Rt,md,U,Wt,fd,be,gd,an,_d,zd,fo,bd,qd,kd,De,vd,go,yd,wd,Kt,qr,qe,Oe,_o,Ht,Td,zo,Sd,kr,$,Qt,Bd,bo,$d,Ed,Ut,jd,Vt,Md,Fd,Cd,Gt,Pd,ln,xd,Ad,Ld,Jt,Nd,Xt,Id,Dd,Od,Yt,Rd,qo,Wd,Kd,Hd,ko,Qd,Ud,Zt,Vd,vo,Gd,Jd,es,Xd,R,ts,Yd,ke,Zd,dn,ec,tc,yo,sc,nc,oc,Re,rc,wo,ac,ic,ss,lc,To,dc,cc,ns,vr,ve,We,So,os,pc,Bo,hc,yr,E,rs,uc,$o,mc,fc,as,gc,is,_c,zc,bc,ls,qc,cn,kc,vc,yc,ds,wc,cs,Tc,Sc,Bc,ps,$c,Eo,Ec,jc,Mc,jo,Fc,Cc,hs,Pc,Mo,xc,Ac,us,Lc,V,ms,Nc,ye,Ic,pn,Dc,Oc,Fo,Rc,Wc,Kc,Ke,Hc,Co,Qc,Uc,fs,wr,we,He,Po,gs,Vc,xo,Gc,Tr,j,_s,Jc,Ao,Xc,Yc,zs,Zc,bs,ep,tp,sp,qs,np,hn,op,rp,ap,ks,ip,vs,lp,dp,cp,ys,pp,Lo,hp,up,mp,No,fp,gp,ws,_p,Io,zp,bp,Ts,qp,G,Ss,kp,Te,vp,un,yp,wp,Do,Tp,Sp,Bp,Qe,$p,Oo,Ep,jp,Bs,Sr,Se,Ue,Ro,$s,Mp,Wo,Fp,Br,M,Es,Cp,Be,Pp,Ko,xp,Ap,Ho,Lp,Np,Ip,js,Dp,Ms,Op,Rp,Wp,Fs,Kp,mn,Hp,Qp,Up,Cs,Vp,Ps,Gp,Jp,Xp,xs,Yp,Qo,Zp,eh,th,Uo,sh,nh,As,oh,Vo,rh,ah,Ls,ih,J,Ns,lh,$e,dh,fn,ch,ph,Go,hh,uh,mh,Ve,fh,Jo,gh,_h,Is,$r;return v=new te({}),Xe=new te({}),st=new te({}),nt=new O({props:{name:"class transformers.SqueezeBertConfig",anchor:"transformers.SqueezeBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 0"},{name:"embedding_size",val:" = 768"},{name:"q_groups",val:" = 4"},{name:"k_groups",val:" = 4"},{name:"v_groups",val:" = 4"},{name:"post_attention_groups",val:" = 1"},{name:"intermediate_groups",val:" = 4"},{name:"output_groups",val:" = 4"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/configuration_squeezebert.py#L30",parametersDescription:[{anchor:"transformers.SqueezeBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertModel">SqueezeBertModel</a>.`,name:"vocab_size"},{anchor:"transformers.SqueezeBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.SqueezeBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.SqueezeBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.SqueezeBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.SqueezeBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.SqueezeBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.SqueezeBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.SqueezeBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.SqueezeBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel">BertModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.TFBertModel">TFBertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.SqueezeBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.SqueezeBertConfig.layer_norm_eps",description:"<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014;",name:"layer_norm_eps"},{anchor:"transformers.SqueezeBertConfig.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The ID of the token in the word embedding to use as padding.`,name:"pad_token_id"},{anchor:"transformers.SqueezeBertConfig.embedding_size",description:`<strong>embedding_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; The dimension of the word embedding vectors.`,name:"embedding_size"},{anchor:"transformers.SqueezeBertConfig.q_groups",description:`<strong>q_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in Q layer.`,name:"q_groups"},{anchor:"transformers.SqueezeBertConfig.k_groups",description:`<strong>k_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in K layer.`,name:"k_groups"},{anchor:"transformers.SqueezeBertConfig.v_groups",description:`<strong>v_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in V layer.`,name:"v_groups"},{anchor:"transformers.SqueezeBertConfig.post_attention_groups",description:`<strong>post_attention_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of groups in the first feed forward network layer.`,name:"post_attention_groups"},{anchor:"transformers.SqueezeBertConfig.intermediate_groups",description:`<strong>intermediate_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in the second feed forward network layer.`,name:"intermediate_groups"},{anchor:"transformers.SqueezeBertConfig.output_groups",description:`<strong>output_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of groups in the third feed forward network layer.`,name:"output_groups"}]}}),rt=new D({props:{code:`from transformers import SqueezeBertModel, SqueezeBertConfig # Initializing a SqueezeBERT configuration configuration = SqueezeBertConfig() # Initializing a model from the configuration above model = SqueezeBertModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertModel, SqueezeBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a SqueezeBERT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SqueezeBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration above</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),at=new te({}),it=new O({props:{name:"class transformers.SqueezeBertTokenizer",anchor:"transformers.SqueezeBertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/tokenization_squeezebert.py#L47"}}),dt=new O({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L248",parametersDescription:[{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),pt=new O({props:{name:"get_special_tokens_mask",anchor:"transformers.BertTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L273",parametersDescription:[{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.BertTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ut=new O({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L301",parametersDescription:[{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.BertTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),mt=new D({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),ft=new O({props:{name:"save_vocabulary",anchor:"transformers.BertTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/bert/tokenization_bert.py#L330"}}),gt=new te({}),_t=new O({props:{name:"class transformers.SqueezeBertTokenizerFast",anchor:"transformers.SqueezeBertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py#L53"}}),qt=new te({}),kt=new O({props:{name:"class transformers.SqueezeBertModel",anchor:"transformers.SqueezeBertModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L550",parametersDescription:[{anchor:"transformers.SqueezeBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$t=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-symbol">hierarchy: <span class="hljs-symbol">SqueezeBertModel</span></span> <span class="hljs-symbol">SqueezeBertEncoder</span> <span class="hljs-symbol">SqueezeBertModule</span> <span class="hljs-symbol">SqueezeBertSelfAttention</span> <span class="hljs-symbol">ConvActivation</span> <span class="hljs-symbol">ConvDropoutLayerNorm</span>`}}),Et=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.`,highlighted:`<span class="hljs-keyword">Input</span> data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-keyword">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-keyword">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. The final output <span class="hljs-keyword">of</span> the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>.`}}),jt=new O({props:{name:"forward",anchor:"transformers.SqueezeBertModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L575",parametersDescription:[{anchor:"transformers.SqueezeBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPooling" >transformers.modeling_outputs.BaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new $n({props:{$$slots:{default:[Dm]},$$scope:{ctx:H}}}),Mt=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertModel import torch tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-uncased") model = SqueezeBertModel.from_pretrained("squeezebert/squeezebert-uncased") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertModel.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Ft=new te({}),Ct=new O({props:{name:"class transformers.SqueezeBertForMaskedLM",anchor:"transformers.SqueezeBertForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L650",parametersDescription:[{anchor:"transformers.SqueezeBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ot=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-symbol">hierarchy: <span class="hljs-symbol">SqueezeBertModel</span></span> <span class="hljs-symbol">SqueezeBertEncoder</span> <span class="hljs-symbol">SqueezeBertModule</span> <span class="hljs-symbol">SqueezeBertSelfAttention</span> <span class="hljs-symbol">ConvActivation</span> <span class="hljs-symbol">ConvDropoutLayerNorm</span>`}}),Rt=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.`,highlighted:`<span class="hljs-keyword">Input</span> data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-keyword">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-keyword">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. The final output <span class="hljs-keyword">of</span> the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>.`}}),Wt=new O({props:{name:"forward",anchor:"transformers.SqueezeBertForMaskedLM.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L669",parametersDescription:[{anchor:"transformers.SqueezeBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),De=new $n({props:{$$slots:{default:[Om]},$$scope:{ctx:H}}}),Kt=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForMaskedLM import torch tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-uncased") model = SqueezeBertForMaskedLM.from_pretrained("squeezebert/squeezebert-uncased") inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForMaskedLM.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ht=new te({}),Qt=new O({props:{name:"class transformers.SqueezeBertForSequenceClassification",anchor:"transformers.SqueezeBertForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L736",parametersDescription:[{anchor:"transformers.SqueezeBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Zt=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-symbol">hierarchy: <span class="hljs-symbol">SqueezeBertModel</span></span> <span class="hljs-symbol">SqueezeBertEncoder</span> <span class="hljs-symbol">SqueezeBertModule</span> <span class="hljs-symbol">SqueezeBertSelfAttention</span> <span class="hljs-symbol">ConvActivation</span> <span class="hljs-symbol">ConvDropoutLayerNorm</span>`}}),es=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.`,highlighted:`<span class="hljs-keyword">Input</span> data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-keyword">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-keyword">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. The final output <span class="hljs-keyword">of</span> the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>.`}}),ts=new O({props:{name:"forward",anchor:"transformers.SqueezeBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L749",parametersDescription:[{anchor:"transformers.SqueezeBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Re=new $n({props:{$$slots:{default:[Rm]},$$scope:{ctx:H}}}),ss=new D({props:{code:`import torch from transformers import SqueezeBertTokenizer, SqueezeBertForSequenceClassification torch.manual_seed(0) tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-uncased") model = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-uncased", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),ns=new D({props:{code:`import torch from transformers import SqueezeBertTokenizer, SqueezeBertForSequenceClassification torch.manual_seed(0) tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-uncased") model = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-uncased", problem_type="multi_label_classification", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),os=new te({}),rs=new O({props:{name:"class transformers.SqueezeBertForMultipleChoice",anchor:"transformers.SqueezeBertForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L836",parametersDescription:[{anchor:"transformers.SqueezeBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),hs=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-symbol">hierarchy: <span class="hljs-symbol">SqueezeBertModel</span></span> <span class="hljs-symbol">SqueezeBertEncoder</span> <span class="hljs-symbol">SqueezeBertModule</span> <span class="hljs-symbol">SqueezeBertSelfAttention</span> <span class="hljs-symbol">ConvActivation</span> <span class="hljs-symbol">ConvDropoutLayerNorm</span>`}}),us=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.`,highlighted:`<span class="hljs-keyword">Input</span> data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-keyword">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-keyword">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. The final output <span class="hljs-keyword">of</span> the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>.`}}),ms=new O({props:{name:"forward",anchor:"transformers.SqueezeBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L847",parametersDescription:[{anchor:"transformers.SqueezeBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <em>num_choices</em> is the size of the second dimension of the input tensors. (see <em>input_ids</em> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ke=new $n({props:{$$slots:{default:[Wm]},$$scope:{ctx:H}}}),fs=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForMultipleChoice import torch tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-uncased") model = SqueezeBertForMultipleChoice.from_pretrained("squeezebert/squeezebert-uncased") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),gs=new te({}),_s=new O({props:{name:"class transformers.SqueezeBertForTokenClassification",anchor:"transformers.SqueezeBertForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L930",parametersDescription:[{anchor:"transformers.SqueezeBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ws=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-symbol">hierarchy: <span class="hljs-symbol">SqueezeBertModel</span></span> <span class="hljs-symbol">SqueezeBertEncoder</span> <span class="hljs-symbol">SqueezeBertModule</span> <span class="hljs-symbol">SqueezeBertSelfAttention</span> <span class="hljs-symbol">ConvActivation</span> <span class="hljs-symbol">ConvDropoutLayerNorm</span>`}}),Ts=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.`,highlighted:`<span class="hljs-keyword">Input</span> data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-keyword">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-keyword">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. The final output <span class="hljs-keyword">of</span> the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>.`}}),Ss=new O({props:{name:"forward",anchor:"transformers.SqueezeBertForTokenClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L942",parametersDescription:[{anchor:"transformers.SqueezeBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qe=new $n({props:{$$slots:{default:[Km]},$$scope:{ctx:H}}}),Bs=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForTokenClassification import torch tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-uncased") model = SqueezeBertForTokenClassification.from_pretrained("squeezebert/squeezebert-uncased") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForTokenClassification.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),$s=new te({}),Es=new O({props:{name:"class transformers.SqueezeBertForQuestionAnswering",anchor:"transformers.SqueezeBertForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L1009",parametersDescription:[{anchor:"transformers.SqueezeBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig">SqueezeBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),As=new D({props:{code:`Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm`,highlighted:`Internal <span class="hljs-keyword">class</span> <span class="hljs-symbol">hierarchy: <span class="hljs-symbol">SqueezeBertModel</span></span> <span class="hljs-symbol">SqueezeBertEncoder</span> <span class="hljs-symbol">SqueezeBertModule</span> <span class="hljs-symbol">SqueezeBertSelfAttention</span> <span class="hljs-symbol">ConvActivation</span> <span class="hljs-symbol">ConvDropoutLayerNorm</span>`}}),Ls=new D({props:{code:`Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if \`output_hidden_states == True\`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format.`,highlighted:`<span class="hljs-keyword">Input</span> data <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. Data inside the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, hidden_size, sequence_length] <span class="hljs-keyword">format</span>. But, <span class="hljs-keyword">if</span> \`output_hidden_states == <span class="hljs-keyword">True</span>\`, the data <span class="hljs-keyword">from</span> inside the encoder <span class="hljs-keyword">is</span> returned <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>. The final output <span class="hljs-keyword">of</span> the encoder <span class="hljs-keyword">is</span> <span class="hljs-keyword">in</span> [batch, sequence_length, hidden_size] <span class="hljs-keyword">format</span>.`}}),Ns=new O({props:{name:"forward",anchor:"transformers.SqueezeBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"start_positions",val:": typing.Optional[torch.Tensor] = None"},{name:"end_positions",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/squeezebert/modeling_squeezebert.py#L1020",parametersDescription:[{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer">SqueezeBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.SqueezeBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<em>sequence_length</em>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertConfig" >SqueezeBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ve=new $n({props:{$$slots:{default:[Hm]},$$scope:{ctx:H}}}),Is=new D({props:{code:`from transformers import SqueezeBertTokenizer, SqueezeBertForQuestionAnswering import torch torch.manual_seed(0) tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-uncased") model = SqueezeBertForQuestionAnswering.from_pretrained("squeezebert/squeezebert-uncased") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss round(loss.item(), 2) start_scores = outputs.start_logits list(start_scores.shape) end_scores = outputs.end_logits list(end_scores.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SqueezeBertTokenizer, SqueezeBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = SqueezeBertTokenizer.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SqueezeBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;squeezebert/squeezebert-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) `}}),{c(){h=r("meta"),w=l(),q=r("h1"),y=r("a"),T=r("span"),m(v.$$.fragment),k=l(),F=r("span"),za=n("SqueezeBERT"),nr=l(),ce=r("h2"),Ee=r("a"),En=r("span"),m(Xe.$$.fragment),ba=l(),jn=r("span"),qa=n("Overview"),or=l(),se=r("p"),ka=n("The SqueezeBERT model was proposed in "),Ye=r("a"),va=n("SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),ya=n(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, Kurt W. Keutzer. It\u2019s a bidirectional transformer similar to the BERT model. The key difference between the BERT architecture and the SqueezeBERT architecture is that SqueezeBERT uses `),Ze=r("a"),wa=n("grouped convolutions"),Ta=n(` instead of fully-connected layers for the Q, K, V and FFN layers.`),rr=l(),Rs=r("p"),Sa=n("The abstract from the paper is the following:"),ar=l(),Ws=r("p"),Mn=r("em"),Ba=n(`Humans read and write hundreds of billions of messages every day. Further, due to the availability of large datasets, large computing systems, and better neural network models, natural language processing (NLP) technology has made significant strides in understanding, proofreading, and organizing these messages. Thus, there is a significant opportunity to deploy NLP in myriad applications to help web users, social networks, and businesses. In particular, we consider smartphones and other mobile devices as crucial platforms for deploying NLP models at scale. However, today\u2019s highly-accurate NLP neural network models such as BERT and RoBERTa are extremely computationally expensive, with BERT-base taking 1.7 seconds to classify a text snippet on a Pixel 3 smartphone. In this work, we observe that methods such as grouped convolutions have yielded significant speedups for computer vision networks, but many of these techniques have not been adopted by NLP neural network designers. We demonstrate how to replace several operations in self-attention layers with grouped convolutions, and we use this technique in a novel network architecture called SqueezeBERT, which runs 4.3x faster than BERT-base on the Pixel 3 while achieving competitive accuracy on the GLUE test set. The SqueezeBERT code will be released.`),ir=l(),Ks=r("p"),$a=n("Tips:"),lr=l(),ne=r("ul"),Fn=r("li"),Ea=n(`SqueezeBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),ja=l(),Cn=r("li"),Ma=n(`SqueezeBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard.`),Fa=l(),et=r("li"),Ca=n(`For best results when finetuning on sequence classification tasks, it is recommended to start with the `),Pn=r("em"),Pa=n("squeezebert/squeezebert-mnli-headless"),xa=n(" checkpoint."),dr=l(),je=r("p"),Aa=n("This model was contributed by "),tt=r("a"),La=n("forresti"),Na=n("."),cr=l(),pe=r("h2"),Me=r("a"),xn=r("span"),m(st.$$.fragment),Ia=l(),An=r("span"),Da=n("SqueezeBertConfig"),pr=l(),K=r("div"),m(nt.$$.fragment),Oa=l(),ot=r("p"),Ra=n("This is the configuration class to store the configuration of a "),Hs=r("a"),Wa=n("SqueezeBertModel"),Ka=n(`. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture.`),Ha=l(),he=r("p"),Qa=n("Configuration objects inherit from "),Qs=r("a"),Ua=n("PretrainedConfig"),Va=n(` and can be used to control the model outputs. Read the documentation from `),Us=r("a"),Ga=n("PretrainedConfig"),Ja=n(" for more information."),Xa=l(),Ln=r("p"),Ya=n("Examples:"),Za=l(),m(rt.$$.fragment),ei=l(),Nn=r("p"),ti=n(`Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints.`),hr=l(),ue=r("h2"),Fe=r("a"),In=r("span"),m(at.$$.fragment),si=l(),Dn=r("span"),ni=n("SqueezeBertTokenizer"),ur=l(),C=r("div"),m(it.$$.fragment),oi=l(),On=r("p"),ri=n("Constructs a SqueezeBert tokenizer."),ai=l(),Ce=r("p"),Vs=r("a"),ii=n("SqueezeBertTokenizer"),li=n(" is identical to "),Gs=r("a"),di=n("BertTokenizer"),ci=n(" and runs end-to-end tokenization: punctuation splitting"),pi=l(),Rn=r("ul"),Wn=r("li"),hi=n("wordpiece."),ui=l(),lt=r("p"),mi=n("Refer to superclass "),Js=r("a"),fi=n("BertTokenizer"),gi=n(" for usage examples and documentation concerning parameters."),_i=l(),oe=r("div"),m(dt.$$.fragment),zi=l(),Kn=r("p"),bi=n(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),qi=l(),ct=r("ul"),Xs=r("li"),ki=n("single sequence: "),Hn=r("code"),vi=n("[CLS] X [SEP]"),yi=l(),Ys=r("li"),wi=n("pair of sequences: "),Qn=r("code"),Ti=n("[CLS] A [SEP] B [SEP]"),Si=l(),Pe=r("div"),m(pt.$$.fragment),Bi=l(),ht=r("p"),$i=n(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Un=r("code"),Ei=n("prepare_for_model"),ji=n(" method."),Mi=l(),Z=r("div"),m(ut.$$.fragment),Fi=l(),Vn=r("p"),Ci=n(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),Pi=l(),m(mt.$$.fragment),xi=l(),me=r("p"),Ai=n("If "),Gn=r("code"),Li=n("token_ids_1"),Ni=n(" is "),Jn=r("code"),Ii=n("None"),Di=n(", this method only returns the first portion of the mask (0s)."),Oi=l(),Zs=r("div"),m(ft.$$.fragment),mr=l(),fe=r("h2"),xe=r("a"),Xn=r("span"),m(gt.$$.fragment),Ri=l(),Yn=r("span"),Wi=n("SqueezeBertTokenizerFast"),fr=l(),Y=r("div"),m(_t.$$.fragment),Ki=l(),zt=r("p"),Hi=n("Constructs a \u201CFast\u201D SqueezeBert tokenizer (backed by HuggingFace\u2019s "),Zn=r("em"),Qi=n("tokenizers"),Ui=n(" library)."),Vi=l(),Ae=r("p"),en=r("a"),Gi=n("SqueezeBertTokenizerFast"),Ji=n(" is identical to "),tn=r("a"),Xi=n("BertTokenizerFast"),Yi=n(` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),Zi=l(),bt=r("p"),el=n("Refer to superclass "),sn=r("a"),tl=n("BertTokenizerFast"),sl=n(" for usage examples and documentation concerning parameters."),gr=l(),ge=r("h2"),Le=r("a"),eo=r("span"),m(qt.$$.fragment),nl=l(),to=r("span"),ol=n("SqueezeBertModel"),_r=l(),S=r("div"),m(kt.$$.fragment),rl=l(),so=r("p"),al=n("The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top."),il=l(),vt=r("p"),ll=n("The SqueezeBERT model was proposed in "),yt=r("a"),dl=n(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),cl=n(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),pl=l(),wt=r("p"),hl=n("This model inherits from "),nn=r("a"),ul=n("PreTrainedModel"),ml=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fl=l(),Tt=r("p"),gl=n("This model is also a PyTorch "),St=r("a"),_l=n("torch.nn.Module"),zl=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bl=l(),Bt=r("p"),ql=n(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),no=r("em"),kl=n("squeezebert/squeezebert-mnli-headless"),vl=n(" checkpoint as a starting point."),yl=l(),oo=r("p"),wl=n("Hierarchy:"),Tl=l(),m($t.$$.fragment),Sl=l(),ro=r("p"),Bl=n("Data layouts:"),$l=l(),m(Et.$$.fragment),El=l(),Q=r("div"),m(jt.$$.fragment),jl=l(),_e=r("p"),Ml=n("The "),on=r("a"),Fl=n("SqueezeBertModel"),Cl=n(" forward method, overrides the "),ao=r("code"),Pl=n("__call__"),xl=n(" special method."),Al=l(),m(Ne.$$.fragment),Ll=l(),io=r("p"),Nl=n("Example:"),Il=l(),m(Mt.$$.fragment),zr=l(),ze=r("h2"),Ie=r("a"),lo=r("span"),m(Ft.$$.fragment),Dl=l(),co=r("span"),Ol=n("SqueezeBertForMaskedLM"),br=l(),B=r("div"),m(Ct.$$.fragment),Rl=l(),Pt=r("p"),Wl=n("SqueezeBERT Model with a "),po=r("code"),Kl=n("language modeling"),Hl=n(" head on top."),Ql=l(),xt=r("p"),Ul=n("The SqueezeBERT model was proposed in "),At=r("a"),Vl=n(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Gl=n(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Jl=l(),Lt=r("p"),Xl=n("This model inherits from "),rn=r("a"),Yl=n("PreTrainedModel"),Zl=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ed=l(),Nt=r("p"),td=n("This model is also a PyTorch "),It=r("a"),sd=n("torch.nn.Module"),nd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),od=l(),Dt=r("p"),rd=n(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),ho=r("em"),ad=n("squeezebert/squeezebert-mnli-headless"),id=n(" checkpoint as a starting point."),ld=l(),uo=r("p"),dd=n("Hierarchy:"),cd=l(),m(Ot.$$.fragment),pd=l(),mo=r("p"),hd=n("Data layouts:"),ud=l(),m(Rt.$$.fragment),md=l(),U=r("div"),m(Wt.$$.fragment),fd=l(),be=r("p"),gd=n("The "),an=r("a"),_d=n("SqueezeBertForMaskedLM"),zd=n(" forward method, overrides the "),fo=r("code"),bd=n("__call__"),qd=n(" special method."),kd=l(),m(De.$$.fragment),vd=l(),go=r("p"),yd=n("Example:"),wd=l(),m(Kt.$$.fragment),qr=l(),qe=r("h2"),Oe=r("a"),_o=r("span"),m(Ht.$$.fragment),Td=l(),zo=r("span"),Sd=n("SqueezeBertForSequenceClassification"),kr=l(),$=r("div"),m(Qt.$$.fragment),Bd=l(),bo=r("p"),$d=n(`SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ed=l(),Ut=r("p"),jd=n("The SqueezeBERT model was proposed in "),Vt=r("a"),Md=n(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Fd=n(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Cd=l(),Gt=r("p"),Pd=n("This model inherits from "),ln=r("a"),xd=n("PreTrainedModel"),Ad=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ld=l(),Jt=r("p"),Nd=n("This model is also a PyTorch "),Xt=r("a"),Id=n("torch.nn.Module"),Dd=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Od=l(),Yt=r("p"),Rd=n(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),qo=r("em"),Wd=n("squeezebert/squeezebert-mnli-headless"),Kd=n(" checkpoint as a starting point."),Hd=l(),ko=r("p"),Qd=n("Hierarchy:"),Ud=l(),m(Zt.$$.fragment),Vd=l(),vo=r("p"),Gd=n("Data layouts:"),Jd=l(),m(es.$$.fragment),Xd=l(),R=r("div"),m(ts.$$.fragment),Yd=l(),ke=r("p"),Zd=n("The "),dn=r("a"),ec=n("SqueezeBertForSequenceClassification"),tc=n(" forward method, overrides the "),yo=r("code"),sc=n("__call__"),nc=n(" special method."),oc=l(),m(Re.$$.fragment),rc=l(),wo=r("p"),ac=n("Example of single-label classification:"),ic=l(),m(ss.$$.fragment),lc=l(),To=r("p"),dc=n("Example of multi-label classification:"),cc=l(),m(ns.$$.fragment),vr=l(),ve=r("h2"),We=r("a"),So=r("span"),m(os.$$.fragment),pc=l(),Bo=r("span"),hc=n("SqueezeBertForMultipleChoice"),yr=l(),E=r("div"),m(rs.$$.fragment),uc=l(),$o=r("p"),mc=n(`SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),fc=l(),as=r("p"),gc=n("The SqueezeBERT model was proposed in "),is=r("a"),_c=n(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),zc=n(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),bc=l(),ls=r("p"),qc=n("This model inherits from "),cn=r("a"),kc=n("PreTrainedModel"),vc=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yc=l(),ds=r("p"),wc=n("This model is also a PyTorch "),cs=r("a"),Tc=n("torch.nn.Module"),Sc=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bc=l(),ps=r("p"),$c=n(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Eo=r("em"),Ec=n("squeezebert/squeezebert-mnli-headless"),jc=n(" checkpoint as a starting point."),Mc=l(),jo=r("p"),Fc=n("Hierarchy:"),Cc=l(),m(hs.$$.fragment),Pc=l(),Mo=r("p"),xc=n("Data layouts:"),Ac=l(),m(us.$$.fragment),Lc=l(),V=r("div"),m(ms.$$.fragment),Nc=l(),ye=r("p"),Ic=n("The "),pn=r("a"),Dc=n("SqueezeBertForMultipleChoice"),Oc=n(" forward method, overrides the "),Fo=r("code"),Rc=n("__call__"),Wc=n(" special method."),Kc=l(),m(Ke.$$.fragment),Hc=l(),Co=r("p"),Qc=n("Example:"),Uc=l(),m(fs.$$.fragment),wr=l(),we=r("h2"),He=r("a"),Po=r("span"),m(gs.$$.fragment),Vc=l(),xo=r("span"),Gc=n("SqueezeBertForTokenClassification"),Tr=l(),j=r("div"),m(_s.$$.fragment),Jc=l(),Ao=r("p"),Xc=n(`SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Yc=l(),zs=r("p"),Zc=n("The SqueezeBERT model was proposed in "),bs=r("a"),ep=n(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),tp=n(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),sp=l(),qs=r("p"),np=n("This model inherits from "),hn=r("a"),op=n("PreTrainedModel"),rp=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ap=l(),ks=r("p"),ip=n("This model is also a PyTorch "),vs=r("a"),lp=n("torch.nn.Module"),dp=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cp=l(),ys=r("p"),pp=n(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Lo=r("em"),hp=n("squeezebert/squeezebert-mnli-headless"),up=n(" checkpoint as a starting point."),mp=l(),No=r("p"),fp=n("Hierarchy:"),gp=l(),m(ws.$$.fragment),_p=l(),Io=r("p"),zp=n("Data layouts:"),bp=l(),m(Ts.$$.fragment),qp=l(),G=r("div"),m(Ss.$$.fragment),kp=l(),Te=r("p"),vp=n("The "),un=r("a"),yp=n("SqueezeBertForTokenClassification"),wp=n(" forward method, overrides the "),Do=r("code"),Tp=n("__call__"),Sp=n(" special method."),Bp=l(),m(Qe.$$.fragment),$p=l(),Oo=r("p"),Ep=n("Example:"),jp=l(),m(Bs.$$.fragment),Sr=l(),Se=r("h2"),Ue=r("a"),Ro=r("span"),m($s.$$.fragment),Mp=l(),Wo=r("span"),Fp=n("SqueezeBertForQuestionAnswering"),Br=l(),M=r("div"),m(Es.$$.fragment),Cp=l(),Be=r("p"),Pp=n(`SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ko=r("code"),xp=n("span start logits"),Ap=n(" and "),Ho=r("code"),Lp=n("span end logits"),Np=n(")."),Ip=l(),js=r("p"),Dp=n("The SqueezeBERT model was proposed in "),Ms=r("a"),Op=n(`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Rp=n(` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Wp=l(),Fs=r("p"),Kp=n("This model inherits from "),mn=r("a"),Hp=n("PreTrainedModel"),Qp=n(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Up=l(),Cs=r("p"),Vp=n("This model is also a PyTorch "),Ps=r("a"),Gp=n("torch.nn.Module"),Jp=n(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xp=l(),xs=r("p"),Yp=n(`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Qo=r("em"),Zp=n("squeezebert/squeezebert-mnli-headless"),eh=n(" checkpoint as a starting point."),th=l(),Uo=r("p"),sh=n("Hierarchy:"),nh=l(),m(As.$$.fragment),oh=l(),Vo=r("p"),rh=n("Data layouts:"),ah=l(),m(Ls.$$.fragment),ih=l(),J=r("div"),m(Ns.$$.fragment),lh=l(),$e=r("p"),dh=n("The "),fn=r("a"),ch=n("SqueezeBertForQuestionAnswering"),ph=n(" forward method, overrides the "),Go=r("code"),hh=n("__call__"),uh=n(" special method."),mh=l(),m(Ve.$$.fragment),fh=l(),Jo=r("p"),gh=n("Example:"),_h=l(),m(Is.$$.fragment),this.h()},l(t){const p=Im('[data-svelte="svelte-1phssyn"]',document.head);h=a(p,"META",{name:!0,content:!0}),p.forEach(s),w=d(t),q=a(t,"H1",{class:!0});var Ds=i(q);y=a(Ds,"A",{id:!0,class:!0,href:!0});var Xo=i(y);T=a(Xo,"SPAN",{});var Yo=i(T);f(v.$$.fragment,Yo),Yo.forEach(s),Xo.forEach(s),k=d(Ds),F=a(Ds,"SPAN",{});var Zo=i(F);za=o(Zo,"SqueezeBERT"),Zo.forEach(s),Ds.forEach(s),nr=d(t),ce=a(t,"H2",{class:!0});var Os=i(ce);Ee=a(Os,"A",{id:!0,class:!0,href:!0});var er=i(Ee);En=a(er,"SPAN",{});var qh=i(En);f(Xe.$$.fragment,qh),qh.forEach(s),er.forEach(s),ba=d(Os),jn=a(Os,"SPAN",{});var kh=i(jn);qa=o(kh,"Overview"),kh.forEach(s),Os.forEach(s),or=d(t),se=a(t,"P",{});var gn=i(se);ka=o(gn,"The SqueezeBERT model was proposed in "),Ye=a(gn,"A",{href:!0,rel:!0});var vh=i(Ye);va=o(vh,"SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),vh.forEach(s),ya=o(gn,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, Kurt W. Keutzer. It\u2019s a bidirectional transformer similar to the BERT model. The key difference between the BERT architecture and the SqueezeBERT architecture is that SqueezeBERT uses `),Ze=a(gn,"A",{href:!0,rel:!0});var yh=i(Ze);wa=o(yh,"grouped convolutions"),yh.forEach(s),Ta=o(gn,` instead of fully-connected layers for the Q, K, V and FFN layers.`),gn.forEach(s),rr=d(t),Rs=a(t,"P",{});var wh=i(Rs);Sa=o(wh,"The abstract from the paper is the following:"),wh.forEach(s),ar=d(t),Ws=a(t,"P",{});var Th=i(Ws);Mn=a(Th,"EM",{});var Sh=i(Mn);Ba=o(Sh,`Humans read and write hundreds of billions of messages every day. Further, due to the availability of large datasets, large computing systems, and better neural network models, natural language processing (NLP) technology has made significant strides in understanding, proofreading, and organizing these messages. Thus, there is a significant opportunity to deploy NLP in myriad applications to help web users, social networks, and businesses. In particular, we consider smartphones and other mobile devices as crucial platforms for deploying NLP models at scale. However, today\u2019s highly-accurate NLP neural network models such as BERT and RoBERTa are extremely computationally expensive, with BERT-base taking 1.7 seconds to classify a text snippet on a Pixel 3 smartphone. In this work, we observe that methods such as grouped convolutions have yielded significant speedups for computer vision networks, but many of these techniques have not been adopted by NLP neural network designers. We demonstrate how to replace several operations in self-attention layers with grouped convolutions, and we use this technique in a novel network architecture called SqueezeBERT, which runs 4.3x faster than BERT-base on the Pixel 3 while achieving competitive accuracy on the GLUE test set. The SqueezeBERT code will be released.`),Sh.forEach(s),Th.forEach(s),ir=d(t),Ks=a(t,"P",{});var Bh=i(Ks);$a=o(Bh,"Tips:"),Bh.forEach(s),lr=d(t),ne=a(t,"UL",{});var _n=i(ne);Fn=a(_n,"LI",{});var $h=i(Fn);Ea=o($h,`SqueezeBERT is a model with absolute position embeddings so it\u2019s usually advised to pad the inputs on the right rather than the left.`),$h.forEach(s),ja=d(_n),Cn=a(_n,"LI",{});var Eh=i(Cn);Ma=o(Eh,`SqueezeBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard.`),Eh.forEach(s),Fa=d(_n),et=a(_n,"LI",{});var Er=i(et);Ca=o(Er,`For best results when finetuning on sequence classification tasks, it is recommended to start with the `),Pn=a(Er,"EM",{});var jh=i(Pn);Pa=o(jh,"squeezebert/squeezebert-mnli-headless"),jh.forEach(s),xa=o(Er," checkpoint."),Er.forEach(s),_n.forEach(s),dr=d(t),je=a(t,"P",{});var jr=i(je);Aa=o(jr,"This model was contributed by "),tt=a(jr,"A",{href:!0,rel:!0});var Mh=i(tt);La=o(Mh,"forresti"),Mh.forEach(s),Na=o(jr,"."),jr.forEach(s),cr=d(t),pe=a(t,"H2",{class:!0});var Mr=i(pe);Me=a(Mr,"A",{id:!0,class:!0,href:!0});var Fh=i(Me);xn=a(Fh,"SPAN",{});var Ch=i(xn);f(st.$$.fragment,Ch),Ch.forEach(s),Fh.forEach(s),Ia=d(Mr),An=a(Mr,"SPAN",{});var Ph=i(An);Da=o(Ph,"SqueezeBertConfig"),Ph.forEach(s),Mr.forEach(s),pr=d(t),K=a(t,"DIV",{class:!0});var ee=i(K);f(nt.$$.fragment,ee),Oa=d(ee),ot=a(ee,"P",{});var Fr=i(ot);Ra=o(Fr,"This is the configuration class to store the configuration of a "),Hs=a(Fr,"A",{href:!0});var xh=i(Hs);Wa=o(xh,"SqueezeBertModel"),xh.forEach(s),Ka=o(Fr,`. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture.`),Fr.forEach(s),Ha=d(ee),he=a(ee,"P",{});var zn=i(he);Qa=o(zn,"Configuration objects inherit from "),Qs=a(zn,"A",{href:!0});var Ah=i(Qs);Ua=o(Ah,"PretrainedConfig"),Ah.forEach(s),Va=o(zn,` and can be used to control the model outputs. Read the documentation from `),Us=a(zn,"A",{href:!0});var Lh=i(Us);Ga=o(Lh,"PretrainedConfig"),Lh.forEach(s),Ja=o(zn," for more information."),zn.forEach(s),Xa=d(ee),Ln=a(ee,"P",{});var Nh=i(Ln);Ya=o(Nh,"Examples:"),Nh.forEach(s),Za=d(ee),f(rt.$$.fragment,ee),ei=d(ee),Nn=a(ee,"P",{});var Ih=i(Nn);ti=o(Ih,`Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints.`),Ih.forEach(s),ee.forEach(s),hr=d(t),ue=a(t,"H2",{class:!0});var Cr=i(ue);Fe=a(Cr,"A",{id:!0,class:!0,href:!0});var Dh=i(Fe);In=a(Dh,"SPAN",{});var Oh=i(In);f(at.$$.fragment,Oh),Oh.forEach(s),Dh.forEach(s),si=d(Cr),Dn=a(Cr,"SPAN",{});var Rh=i(Dn);ni=o(Rh,"SqueezeBertTokenizer"),Rh.forEach(s),Cr.forEach(s),ur=d(t),C=a(t,"DIV",{class:!0});var W=i(C);f(it.$$.fragment,W),oi=d(W),On=a(W,"P",{});var Wh=i(On);ri=o(Wh,"Constructs a SqueezeBert tokenizer."),Wh.forEach(s),ai=d(W),Ce=a(W,"P",{});var tr=i(Ce);Vs=a(tr,"A",{href:!0});var Kh=i(Vs);ii=o(Kh,"SqueezeBertTokenizer"),Kh.forEach(s),li=o(tr," is identical to "),Gs=a(tr,"A",{href:!0});var Hh=i(Gs);di=o(Hh,"BertTokenizer"),Hh.forEach(s),ci=o(tr," and runs end-to-end tokenization: punctuation splitting"),tr.forEach(s),pi=d(W),Rn=a(W,"UL",{});var Qh=i(Rn);Wn=a(Qh,"LI",{});var Uh=i(Wn);hi=o(Uh,"wordpiece."),Uh.forEach(s),Qh.forEach(s),ui=d(W),lt=a(W,"P",{});var Pr=i(lt);mi=o(Pr,"Refer to superclass "),Js=a(Pr,"A",{href:!0});var Vh=i(Js);fi=o(Vh,"BertTokenizer"),Vh.forEach(s),gi=o(Pr," for usage examples and documentation concerning parameters."),Pr.forEach(s),_i=d(W),oe=a(W,"DIV",{class:!0});var bn=i(oe);f(dt.$$.fragment,bn),zi=d(bn),Kn=a(bn,"P",{});var Gh=i(Kn);bi=o(Gh,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:`),Gh.forEach(s),qi=d(bn),ct=a(bn,"UL",{});var xr=i(ct);Xs=a(xr,"LI",{});var zh=i(Xs);ki=o(zh,"single sequence: "),Hn=a(zh,"CODE",{});var Jh=i(Hn);vi=o(Jh,"[CLS] X [SEP]"),Jh.forEach(s),zh.forEach(s),yi=d(xr),Ys=a(xr,"LI",{});var bh=i(Ys);wi=o(bh,"pair of sequences: "),Qn=a(bh,"CODE",{});var Xh=i(Qn);Ti=o(Xh,"[CLS] A [SEP] B [SEP]"),Xh.forEach(s),bh.forEach(s),xr.forEach(s),bn.forEach(s),Si=d(W),Pe=a(W,"DIV",{class:!0});var Ar=i(Pe);f(pt.$$.fragment,Ar),Bi=d(Ar),ht=a(Ar,"P",{});var Lr=i(ht);$i=o(Lr,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Un=a(Lr,"CODE",{});var Yh=i(Un);Ei=o(Yh,"prepare_for_model"),Yh.forEach(s),ji=o(Lr," method."),Lr.forEach(s),Ar.forEach(s),Mi=d(W),Z=a(W,"DIV",{class:!0});var Ge=i(Z);f(ut.$$.fragment,Ge),Fi=d(Ge),Vn=a(Ge,"P",{});var Zh=i(Vn);Ci=o(Zh,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:`),Zh.forEach(s),Pi=d(Ge),f(mt.$$.fragment,Ge),xi=d(Ge),me=a(Ge,"P",{});var qn=i(me);Ai=o(qn,"If "),Gn=a(qn,"CODE",{});var eu=i(Gn);Li=o(eu,"token_ids_1"),eu.forEach(s),Ni=o(qn," is "),Jn=a(qn,"CODE",{});var tu=i(Jn);Ii=o(tu,"None"),tu.forEach(s),Di=o(qn,", this method only returns the first portion of the mask (0s)."),qn.forEach(s),Ge.forEach(s),Oi=d(W),Zs=a(W,"DIV",{class:!0});var su=i(Zs);f(ft.$$.fragment,su),su.forEach(s),W.forEach(s),mr=d(t),fe=a(t,"H2",{class:!0});var Nr=i(fe);xe=a(Nr,"A",{id:!0,class:!0,href:!0});var nu=i(xe);Xn=a(nu,"SPAN",{});var ou=i(Xn);f(gt.$$.fragment,ou),ou.forEach(s),nu.forEach(s),Ri=d(Nr),Yn=a(Nr,"SPAN",{});var ru=i(Yn);Wi=o(ru,"SqueezeBertTokenizerFast"),ru.forEach(s),Nr.forEach(s),fr=d(t),Y=a(t,"DIV",{class:!0});var Je=i(Y);f(_t.$$.fragment,Je),Ki=d(Je),zt=a(Je,"P",{});var Ir=i(zt);Hi=o(Ir,"Constructs a \u201CFast\u201D SqueezeBert tokenizer (backed by HuggingFace\u2019s "),Zn=a(Ir,"EM",{});var au=i(Zn);Qi=o(au,"tokenizers"),au.forEach(s),Ui=o(Ir," library)."),Ir.forEach(s),Vi=d(Je),Ae=a(Je,"P",{});var sr=i(Ae);en=a(sr,"A",{href:!0});var iu=i(en);Gi=o(iu,"SqueezeBertTokenizerFast"),iu.forEach(s),Ji=o(sr," is identical to "),tn=a(sr,"A",{href:!0});var lu=i(tn);Xi=o(lu,"BertTokenizerFast"),lu.forEach(s),Yi=o(sr,` and runs end-to-end tokenization: punctuation splitting + wordpiece.`),sr.forEach(s),Zi=d(Je),bt=a(Je,"P",{});var Dr=i(bt);el=o(Dr,"Refer to superclass "),sn=a(Dr,"A",{href:!0});var du=i(sn);tl=o(du,"BertTokenizerFast"),du.forEach(s),sl=o(Dr," for usage examples and documentation concerning parameters."),Dr.forEach(s),Je.forEach(s),gr=d(t),ge=a(t,"H2",{class:!0});var Or=i(ge);Le=a(Or,"A",{id:!0,class:!0,href:!0});var cu=i(Le);eo=a(cu,"SPAN",{});var pu=i(eo);f(qt.$$.fragment,pu),pu.forEach(s),cu.forEach(s),nl=d(Or),to=a(Or,"SPAN",{});var hu=i(to);ol=o(hu,"SqueezeBertModel"),hu.forEach(s),Or.forEach(s),_r=d(t),S=a(t,"DIV",{class:!0});var P=i(S);f(kt.$$.fragment,P),rl=d(P),so=a(P,"P",{});var uu=i(so);al=o(uu,"The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top."),uu.forEach(s),il=d(P),vt=a(P,"P",{});var Rr=i(vt);ll=o(Rr,"The SqueezeBERT model was proposed in "),yt=a(Rr,"A",{href:!0,rel:!0});var mu=i(yt);dl=o(mu,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),mu.forEach(s),cl=o(Rr,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Rr.forEach(s),pl=d(P),wt=a(P,"P",{});var Wr=i(wt);hl=o(Wr,"This model inherits from "),nn=a(Wr,"A",{href:!0});var fu=i(nn);ul=o(fu,"PreTrainedModel"),fu.forEach(s),ml=o(Wr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wr.forEach(s),fl=d(P),Tt=a(P,"P",{});var Kr=i(Tt);gl=o(Kr,"This model is also a PyTorch "),St=a(Kr,"A",{href:!0,rel:!0});var gu=i(St);_l=o(gu,"torch.nn.Module"),gu.forEach(s),zl=o(Kr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kr.forEach(s),bl=d(P),Bt=a(P,"P",{});var Hr=i(Bt);ql=o(Hr,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),no=a(Hr,"EM",{});var _u=i(no);kl=o(_u,"squeezebert/squeezebert-mnli-headless"),_u.forEach(s),vl=o(Hr," checkpoint as a starting point."),Hr.forEach(s),yl=d(P),oo=a(P,"P",{});var zu=i(oo);wl=o(zu,"Hierarchy:"),zu.forEach(s),Tl=d(P),f($t.$$.fragment,P),Sl=d(P),ro=a(P,"P",{});var bu=i(ro);Bl=o(bu,"Data layouts:"),bu.forEach(s),$l=d(P),f(Et.$$.fragment,P),El=d(P),Q=a(P,"DIV",{class:!0});var re=i(Q);f(jt.$$.fragment,re),jl=d(re),_e=a(re,"P",{});var kn=i(_e);Ml=o(kn,"The "),on=a(kn,"A",{href:!0});var qu=i(on);Fl=o(qu,"SqueezeBertModel"),qu.forEach(s),Cl=o(kn," forward method, overrides the "),ao=a(kn,"CODE",{});var ku=i(ao);Pl=o(ku,"__call__"),ku.forEach(s),xl=o(kn," special method."),kn.forEach(s),Al=d(re),f(Ne.$$.fragment,re),Ll=d(re),io=a(re,"P",{});var vu=i(io);Nl=o(vu,"Example:"),vu.forEach(s),Il=d(re),f(Mt.$$.fragment,re),re.forEach(s),P.forEach(s),zr=d(t),ze=a(t,"H2",{class:!0});var Qr=i(ze);Ie=a(Qr,"A",{id:!0,class:!0,href:!0});var yu=i(Ie);lo=a(yu,"SPAN",{});var wu=i(lo);f(Ft.$$.fragment,wu),wu.forEach(s),yu.forEach(s),Dl=d(Qr),co=a(Qr,"SPAN",{});var Tu=i(co);Ol=o(Tu,"SqueezeBertForMaskedLM"),Tu.forEach(s),Qr.forEach(s),br=d(t),B=a(t,"DIV",{class:!0});var x=i(B);f(Ct.$$.fragment,x),Rl=d(x),Pt=a(x,"P",{});var Ur=i(Pt);Wl=o(Ur,"SqueezeBERT Model with a "),po=a(Ur,"CODE",{});var Su=i(po);Kl=o(Su,"language modeling"),Su.forEach(s),Hl=o(Ur," head on top."),Ur.forEach(s),Ql=d(x),xt=a(x,"P",{});var Vr=i(xt);Ul=o(Vr,"The SqueezeBERT model was proposed in "),At=a(Vr,"A",{href:!0,rel:!0});var Bu=i(At);Vl=o(Bu,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Bu.forEach(s),Gl=o(Vr,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Vr.forEach(s),Jl=d(x),Lt=a(x,"P",{});var Gr=i(Lt);Xl=o(Gr,"This model inherits from "),rn=a(Gr,"A",{href:!0});var $u=i(rn);Yl=o($u,"PreTrainedModel"),$u.forEach(s),Zl=o(Gr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Gr.forEach(s),ed=d(x),Nt=a(x,"P",{});var Jr=i(Nt);td=o(Jr,"This model is also a PyTorch "),It=a(Jr,"A",{href:!0,rel:!0});var Eu=i(It);sd=o(Eu,"torch.nn.Module"),Eu.forEach(s),nd=o(Jr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Jr.forEach(s),od=d(x),Dt=a(x,"P",{});var Xr=i(Dt);rd=o(Xr,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),ho=a(Xr,"EM",{});var ju=i(ho);ad=o(ju,"squeezebert/squeezebert-mnli-headless"),ju.forEach(s),id=o(Xr," checkpoint as a starting point."),Xr.forEach(s),ld=d(x),uo=a(x,"P",{});var Mu=i(uo);dd=o(Mu,"Hierarchy:"),Mu.forEach(s),cd=d(x),f(Ot.$$.fragment,x),pd=d(x),mo=a(x,"P",{});var Fu=i(mo);hd=o(Fu,"Data layouts:"),Fu.forEach(s),ud=d(x),f(Rt.$$.fragment,x),md=d(x),U=a(x,"DIV",{class:!0});var ae=i(U);f(Wt.$$.fragment,ae),fd=d(ae),be=a(ae,"P",{});var vn=i(be);gd=o(vn,"The "),an=a(vn,"A",{href:!0});var Cu=i(an);_d=o(Cu,"SqueezeBertForMaskedLM"),Cu.forEach(s),zd=o(vn," forward method, overrides the "),fo=a(vn,"CODE",{});var Pu=i(fo);bd=o(Pu,"__call__"),Pu.forEach(s),qd=o(vn," special method."),vn.forEach(s),kd=d(ae),f(De.$$.fragment,ae),vd=d(ae),go=a(ae,"P",{});var xu=i(go);yd=o(xu,"Example:"),xu.forEach(s),wd=d(ae),f(Kt.$$.fragment,ae),ae.forEach(s),x.forEach(s),qr=d(t),qe=a(t,"H2",{class:!0});var Yr=i(qe);Oe=a(Yr,"A",{id:!0,class:!0,href:!0});var Au=i(Oe);_o=a(Au,"SPAN",{});var Lu=i(_o);f(Ht.$$.fragment,Lu),Lu.forEach(s),Au.forEach(s),Td=d(Yr),zo=a(Yr,"SPAN",{});var Nu=i(zo);Sd=o(Nu,"SqueezeBertForSequenceClassification"),Nu.forEach(s),Yr.forEach(s),kr=d(t),$=a(t,"DIV",{class:!0});var A=i($);f(Qt.$$.fragment,A),Bd=d(A),bo=a(A,"P",{});var Iu=i(bo);$d=o(Iu,`SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Iu.forEach(s),Ed=d(A),Ut=a(A,"P",{});var Zr=i(Ut);jd=o(Zr,"The SqueezeBERT model was proposed in "),Vt=a(Zr,"A",{href:!0,rel:!0});var Du=i(Vt);Md=o(Du,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Du.forEach(s),Fd=o(Zr,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),Zr.forEach(s),Cd=d(A),Gt=a(A,"P",{});var ea=i(Gt);Pd=o(ea,"This model inherits from "),ln=a(ea,"A",{href:!0});var Ou=i(ln);xd=o(Ou,"PreTrainedModel"),Ou.forEach(s),Ad=o(ea,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ea.forEach(s),Ld=d(A),Jt=a(A,"P",{});var ta=i(Jt);Nd=o(ta,"This model is also a PyTorch "),Xt=a(ta,"A",{href:!0,rel:!0});var Ru=i(Xt);Id=o(Ru,"torch.nn.Module"),Ru.forEach(s),Dd=o(ta,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ta.forEach(s),Od=d(A),Yt=a(A,"P",{});var sa=i(Yt);Rd=o(sa,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),qo=a(sa,"EM",{});var Wu=i(qo);Wd=o(Wu,"squeezebert/squeezebert-mnli-headless"),Wu.forEach(s),Kd=o(sa," checkpoint as a starting point."),sa.forEach(s),Hd=d(A),ko=a(A,"P",{});var Ku=i(ko);Qd=o(Ku,"Hierarchy:"),Ku.forEach(s),Ud=d(A),f(Zt.$$.fragment,A),Vd=d(A),vo=a(A,"P",{});var Hu=i(vo);Gd=o(Hu,"Data layouts:"),Hu.forEach(s),Jd=d(A),f(es.$$.fragment,A),Xd=d(A),R=a(A,"DIV",{class:!0});var X=i(R);f(ts.$$.fragment,X),Yd=d(X),ke=a(X,"P",{});var yn=i(ke);Zd=o(yn,"The "),dn=a(yn,"A",{href:!0});var Qu=i(dn);ec=o(Qu,"SqueezeBertForSequenceClassification"),Qu.forEach(s),tc=o(yn," forward method, overrides the "),yo=a(yn,"CODE",{});var Uu=i(yo);sc=o(Uu,"__call__"),Uu.forEach(s),nc=o(yn," special method."),yn.forEach(s),oc=d(X),f(Re.$$.fragment,X),rc=d(X),wo=a(X,"P",{});var Vu=i(wo);ac=o(Vu,"Example of single-label classification:"),Vu.forEach(s),ic=d(X),f(ss.$$.fragment,X),lc=d(X),To=a(X,"P",{});var Gu=i(To);dc=o(Gu,"Example of multi-label classification:"),Gu.forEach(s),cc=d(X),f(ns.$$.fragment,X),X.forEach(s),A.forEach(s),vr=d(t),ve=a(t,"H2",{class:!0});var na=i(ve);We=a(na,"A",{id:!0,class:!0,href:!0});var Ju=i(We);So=a(Ju,"SPAN",{});var Xu=i(So);f(os.$$.fragment,Xu),Xu.forEach(s),Ju.forEach(s),pc=d(na),Bo=a(na,"SPAN",{});var Yu=i(Bo);hc=o(Yu,"SqueezeBertForMultipleChoice"),Yu.forEach(s),na.forEach(s),yr=d(t),E=a(t,"DIV",{class:!0});var L=i(E);f(rs.$$.fragment,L),uc=d(L),$o=a(L,"P",{});var Zu=i($o);mc=o(Zu,`SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Zu.forEach(s),fc=d(L),as=a(L,"P",{});var oa=i(as);gc=o(oa,"The SqueezeBERT model was proposed in "),is=a(oa,"A",{href:!0,rel:!0});var em=i(is);_c=o(em,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),em.forEach(s),zc=o(oa,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),oa.forEach(s),bc=d(L),ls=a(L,"P",{});var ra=i(ls);qc=o(ra,"This model inherits from "),cn=a(ra,"A",{href:!0});var tm=i(cn);kc=o(tm,"PreTrainedModel"),tm.forEach(s),vc=o(ra,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ra.forEach(s),yc=d(L),ds=a(L,"P",{});var aa=i(ds);wc=o(aa,"This model is also a PyTorch "),cs=a(aa,"A",{href:!0,rel:!0});var sm=i(cs);Tc=o(sm,"torch.nn.Module"),sm.forEach(s),Sc=o(aa,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),aa.forEach(s),Bc=d(L),ps=a(L,"P",{});var ia=i(ps);$c=o(ia,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Eo=a(ia,"EM",{});var nm=i(Eo);Ec=o(nm,"squeezebert/squeezebert-mnli-headless"),nm.forEach(s),jc=o(ia," checkpoint as a starting point."),ia.forEach(s),Mc=d(L),jo=a(L,"P",{});var om=i(jo);Fc=o(om,"Hierarchy:"),om.forEach(s),Cc=d(L),f(hs.$$.fragment,L),Pc=d(L),Mo=a(L,"P",{});var rm=i(Mo);xc=o(rm,"Data layouts:"),rm.forEach(s),Ac=d(L),f(us.$$.fragment,L),Lc=d(L),V=a(L,"DIV",{class:!0});var ie=i(V);f(ms.$$.fragment,ie),Nc=d(ie),ye=a(ie,"P",{});var wn=i(ye);Ic=o(wn,"The "),pn=a(wn,"A",{href:!0});var am=i(pn);Dc=o(am,"SqueezeBertForMultipleChoice"),am.forEach(s),Oc=o(wn," forward method, overrides the "),Fo=a(wn,"CODE",{});var im=i(Fo);Rc=o(im,"__call__"),im.forEach(s),Wc=o(wn," special method."),wn.forEach(s),Kc=d(ie),f(Ke.$$.fragment,ie),Hc=d(ie),Co=a(ie,"P",{});var lm=i(Co);Qc=o(lm,"Example:"),lm.forEach(s),Uc=d(ie),f(fs.$$.fragment,ie),ie.forEach(s),L.forEach(s),wr=d(t),we=a(t,"H2",{class:!0});var la=i(we);He=a(la,"A",{id:!0,class:!0,href:!0});var dm=i(He);Po=a(dm,"SPAN",{});var cm=i(Po);f(gs.$$.fragment,cm),cm.forEach(s),dm.forEach(s),Vc=d(la),xo=a(la,"SPAN",{});var pm=i(xo);Gc=o(pm,"SqueezeBertForTokenClassification"),pm.forEach(s),la.forEach(s),Tr=d(t),j=a(t,"DIV",{class:!0});var N=i(j);f(_s.$$.fragment,N),Jc=d(N),Ao=a(N,"P",{});var hm=i(Ao);Xc=o(hm,`SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),hm.forEach(s),Yc=d(N),zs=a(N,"P",{});var da=i(zs);Zc=o(da,"The SqueezeBERT model was proposed in "),bs=a(da,"A",{href:!0,rel:!0});var um=i(bs);ep=o(um,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),um.forEach(s),tp=o(da,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),da.forEach(s),sp=d(N),qs=a(N,"P",{});var ca=i(qs);np=o(ca,"This model inherits from "),hn=a(ca,"A",{href:!0});var mm=i(hn);op=o(mm,"PreTrainedModel"),mm.forEach(s),rp=o(ca,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ca.forEach(s),ap=d(N),ks=a(N,"P",{});var pa=i(ks);ip=o(pa,"This model is also a PyTorch "),vs=a(pa,"A",{href:!0,rel:!0});var fm=i(vs);lp=o(fm,"torch.nn.Module"),fm.forEach(s),dp=o(pa,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pa.forEach(s),cp=d(N),ys=a(N,"P",{});var ha=i(ys);pp=o(ha,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Lo=a(ha,"EM",{});var gm=i(Lo);hp=o(gm,"squeezebert/squeezebert-mnli-headless"),gm.forEach(s),up=o(ha," checkpoint as a starting point."),ha.forEach(s),mp=d(N),No=a(N,"P",{});var _m=i(No);fp=o(_m,"Hierarchy:"),_m.forEach(s),gp=d(N),f(ws.$$.fragment,N),_p=d(N),Io=a(N,"P",{});var zm=i(Io);zp=o(zm,"Data layouts:"),zm.forEach(s),bp=d(N),f(Ts.$$.fragment,N),qp=d(N),G=a(N,"DIV",{class:!0});var le=i(G);f(Ss.$$.fragment,le),kp=d(le),Te=a(le,"P",{});var Tn=i(Te);vp=o(Tn,"The "),un=a(Tn,"A",{href:!0});var bm=i(un);yp=o(bm,"SqueezeBertForTokenClassification"),bm.forEach(s),wp=o(Tn," forward method, overrides the "),Do=a(Tn,"CODE",{});var qm=i(Do);Tp=o(qm,"__call__"),qm.forEach(s),Sp=o(Tn," special method."),Tn.forEach(s),Bp=d(le),f(Qe.$$.fragment,le),$p=d(le),Oo=a(le,"P",{});var km=i(Oo);Ep=o(km,"Example:"),km.forEach(s),jp=d(le),f(Bs.$$.fragment,le),le.forEach(s),N.forEach(s),Sr=d(t),Se=a(t,"H2",{class:!0});var ua=i(Se);Ue=a(ua,"A",{id:!0,class:!0,href:!0});var vm=i(Ue);Ro=a(vm,"SPAN",{});var ym=i(Ro);f($s.$$.fragment,ym),ym.forEach(s),vm.forEach(s),Mp=d(ua),Wo=a(ua,"SPAN",{});var wm=i(Wo);Fp=o(wm,"SqueezeBertForQuestionAnswering"),wm.forEach(s),ua.forEach(s),Br=d(t),M=a(t,"DIV",{class:!0});var I=i(M);f(Es.$$.fragment,I),Cp=d(I),Be=a(I,"P",{});var Sn=i(Be);Pp=o(Sn,`SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Ko=a(Sn,"CODE",{});var Tm=i(Ko);xp=o(Tm,"span start logits"),Tm.forEach(s),Ap=o(Sn," and "),Ho=a(Sn,"CODE",{});var Sm=i(Ho);Lp=o(Sm,"span end logits"),Sm.forEach(s),Np=o(Sn,")."),Sn.forEach(s),Ip=d(I),js=a(I,"P",{});var ma=i(js);Dp=o(ma,"The SqueezeBERT model was proposed in "),Ms=a(ma,"A",{href:!0,rel:!0});var Bm=i(Ms);Op=o(Bm,`SqueezeBERT: What can computer vision teach NLP about efficient neural networks?`),Bm.forEach(s),Rp=o(ma,` by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer`),ma.forEach(s),Wp=d(I),Fs=a(I,"P",{});var fa=i(Fs);Kp=o(fa,"This model inherits from "),mn=a(fa,"A",{href:!0});var $m=i(mn);Hp=o($m,"PreTrainedModel"),$m.forEach(s),Qp=o(fa,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fa.forEach(s),Up=d(I),Cs=a(I,"P",{});var ga=i(Cs);Vp=o(ga,"This model is also a PyTorch "),Ps=a(ga,"A",{href:!0,rel:!0});var Em=i(Ps);Gp=o(Em,"torch.nn.Module"),Em.forEach(s),Jp=o(ga,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ga.forEach(s),Xp=d(I),xs=a(I,"P",{});var _a=i(xs);Yp=o(_a,`For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the `),Qo=a(_a,"EM",{});var jm=i(Qo);Zp=o(jm,"squeezebert/squeezebert-mnli-headless"),jm.forEach(s),eh=o(_a," checkpoint as a starting point."),_a.forEach(s),th=d(I),Uo=a(I,"P",{});var Mm=i(Uo);sh=o(Mm,"Hierarchy:"),Mm.forEach(s),nh=d(I),f(As.$$.fragment,I),oh=d(I),Vo=a(I,"P",{});var Fm=i(Vo);rh=o(Fm,"Data layouts:"),Fm.forEach(s),ah=d(I),f(Ls.$$.fragment,I),ih=d(I),J=a(I,"DIV",{class:!0});var de=i(J);f(Ns.$$.fragment,de),lh=d(de),$e=a(de,"P",{});var Bn=i($e);dh=o(Bn,"The "),fn=a(Bn,"A",{href:!0});var Cm=i(fn);ch=o(Cm,"SqueezeBertForQuestionAnswering"),Cm.forEach(s),ph=o(Bn," forward method, overrides the "),Go=a(Bn,"CODE",{});var Pm=i(Go);hh=o(Pm,"__call__"),Pm.forEach(s),uh=o(Bn," special method."),Bn.forEach(s),mh=d(de),f(Ve.$$.fragment,de),fh=d(de),Jo=a(de,"P",{});var xm=i(Jo);gh=o(xm,"Example:"),xm.forEach(s),_h=d(de),f(Is.$$.fragment,de),de.forEach(s),I.forEach(s),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(Um)),c(y,"id","squeezebert"),c(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(y,"href","#squeezebert"),c(q,"class","relative group"),c(Ee,"id","overview"),c(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ee,"href","#overview"),c(ce,"class","relative group"),c(Ye,"href","https://arxiv.org/abs/2006.11316"),c(Ye,"rel","nofollow"),c(Ze,"href","https://blog.yani.io/filter-group-tutorial"),c(Ze,"rel","nofollow"),c(tt,"href","https://huggingface.co/forresti"),c(tt,"rel","nofollow"),c(Me,"id","transformers.SqueezeBertConfig"),c(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Me,"href","#transformers.SqueezeBertConfig"),c(pe,"class","relative group"),c(Hs,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertModel"),c(Qs,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(Us,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(K,"class","docstring"),c(Fe,"id","transformers.SqueezeBertTokenizer"),c(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fe,"href","#transformers.SqueezeBertTokenizer"),c(ue,"class","relative group"),c(Vs,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizer"),c(Gs,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),c(Js,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),c(oe,"class","docstring"),c(Pe,"class","docstring"),c(Z,"class","docstring"),c(Zs,"class","docstring"),c(C,"class","docstring"),c(xe,"id","transformers.SqueezeBertTokenizerFast"),c(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xe,"href","#transformers.SqueezeBertTokenizerFast"),c(fe,"class","relative group"),c(en,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertTokenizerFast"),c(tn,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast"),c(sn,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast"),c(Y,"class","docstring"),c(Le,"id","transformers.SqueezeBertModel"),c(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Le,"href","#transformers.SqueezeBertModel"),c(ge,"class","relative group"),c(yt,"href","https://arxiv.org/abs/2006.11316"),c(yt,"rel","nofollow"),c(nn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(St,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(St,"rel","nofollow"),c(on,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertModel"),c(Q,"class","docstring"),c(S,"class","docstring"),c(Ie,"id","transformers.SqueezeBertForMaskedLM"),c(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ie,"href","#transformers.SqueezeBertForMaskedLM"),c(ze,"class","relative group"),c(At,"href","https://arxiv.org/abs/2006.11316"),c(At,"rel","nofollow"),c(rn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(It,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(It,"rel","nofollow"),c(an,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertForMaskedLM"),c(U,"class","docstring"),c(B,"class","docstring"),c(Oe,"id","transformers.SqueezeBertForSequenceClassification"),c(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oe,"href","#transformers.SqueezeBertForSequenceClassification"),c(qe,"class","relative group"),c(Vt,"href","https://arxiv.org/abs/2006.11316"),c(Vt,"rel","nofollow"),c(ln,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Xt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Xt,"rel","nofollow"),c(dn,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertForSequenceClassification"),c(R,"class","docstring"),c($,"class","docstring"),c(We,"id","transformers.SqueezeBertForMultipleChoice"),c(We,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(We,"href","#transformers.SqueezeBertForMultipleChoice"),c(ve,"class","relative group"),c(is,"href","https://arxiv.org/abs/2006.11316"),c(is,"rel","nofollow"),c(cn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(cs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(cs,"rel","nofollow"),c(pn,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertForMultipleChoice"),c(V,"class","docstring"),c(E,"class","docstring"),c(He,"id","transformers.SqueezeBertForTokenClassification"),c(He,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(He,"href","#transformers.SqueezeBertForTokenClassification"),c(we,"class","relative group"),c(bs,"href","https://arxiv.org/abs/2006.11316"),c(bs,"rel","nofollow"),c(hn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(vs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(vs,"rel","nofollow"),c(un,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertForTokenClassification"),c(G,"class","docstring"),c(j,"class","docstring"),c(Ue,"id","transformers.SqueezeBertForQuestionAnswering"),c(Ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ue,"href","#transformers.SqueezeBertForQuestionAnswering"),c(Se,"class","relative group"),c(Ms,"href","https://arxiv.org/abs/2006.11316"),c(Ms,"rel","nofollow"),c(mn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Ps,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ps,"rel","nofollow"),c(fn,"href","/docs/transformers/pr_16143/en/model_doc/squeezebert#transformers.SqueezeBertForQuestionAnswering"),c(J,"class","docstring"),c(M,"class","docstring")},m(t,p){e(document.head,h),u(t,w,p),u(t,q,p),e(q,y),e(y,T),g(v,T,null),e(q,k),e(q,F),e(F,za),u(t,nr,p),u(t,ce,p),e(ce,Ee),e(Ee,En),g(Xe,En,null),e(ce,ba),e(ce,jn),e(jn,qa),u(t,or,p),u(t,se,p),e(se,ka),e(se,Ye),e(Ye,va),e(se,ya),e(se,Ze),e(Ze,wa),e(se,Ta),u(t,rr,p),u(t,Rs,p),e(Rs,Sa),u(t,ar,p),u(t,Ws,p),e(Ws,Mn),e(Mn,Ba),u(t,ir,p),u(t,Ks,p),e(Ks,$a),u(t,lr,p),u(t,ne,p),e(ne,Fn),e(Fn,Ea),e(ne,ja),e(ne,Cn),e(Cn,Ma),e(ne,Fa),e(ne,et),e(et,Ca),e(et,Pn),e(Pn,Pa),e(et,xa),u(t,dr,p),u(t,je,p),e(je,Aa),e(je,tt),e(tt,La),e(je,Na),u(t,cr,p),u(t,pe,p),e(pe,Me),e(Me,xn),g(st,xn,null),e(pe,Ia),e(pe,An),e(An,Da),u(t,pr,p),u(t,K,p),g(nt,K,null),e(K,Oa),e(K,ot),e(ot,Ra),e(ot,Hs),e(Hs,Wa),e(ot,Ka),e(K,Ha),e(K,he),e(he,Qa),e(he,Qs),e(Qs,Ua),e(he,Va),e(he,Us),e(Us,Ga),e(he,Ja),e(K,Xa),e(K,Ln),e(Ln,Ya),e(K,Za),g(rt,K,null),e(K,ei),e(K,Nn),e(Nn,ti),u(t,hr,p),u(t,ue,p),e(ue,Fe),e(Fe,In),g(at,In,null),e(ue,si),e(ue,Dn),e(Dn,ni),u(t,ur,p),u(t,C,p),g(it,C,null),e(C,oi),e(C,On),e(On,ri),e(C,ai),e(C,Ce),e(Ce,Vs),e(Vs,ii),e(Ce,li),e(Ce,Gs),e(Gs,di),e(Ce,ci),e(C,pi),e(C,Rn),e(Rn,Wn),e(Wn,hi),e(C,ui),e(C,lt),e(lt,mi),e(lt,Js),e(Js,fi),e(lt,gi),e(C,_i),e(C,oe),g(dt,oe,null),e(oe,zi),e(oe,Kn),e(Kn,bi),e(oe,qi),e(oe,ct),e(ct,Xs),e(Xs,ki),e(Xs,Hn),e(Hn,vi),e(ct,yi),e(ct,Ys),e(Ys,wi),e(Ys,Qn),e(Qn,Ti),e(C,Si),e(C,Pe),g(pt,Pe,null),e(Pe,Bi),e(Pe,ht),e(ht,$i),e(ht,Un),e(Un,Ei),e(ht,ji),e(C,Mi),e(C,Z),g(ut,Z,null),e(Z,Fi),e(Z,Vn),e(Vn,Ci),e(Z,Pi),g(mt,Z,null),e(Z,xi),e(Z,me),e(me,Ai),e(me,Gn),e(Gn,Li),e(me,Ni),e(me,Jn),e(Jn,Ii),e(me,Di),e(C,Oi),e(C,Zs),g(ft,Zs,null),u(t,mr,p),u(t,fe,p),e(fe,xe),e(xe,Xn),g(gt,Xn,null),e(fe,Ri),e(fe,Yn),e(Yn,Wi),u(t,fr,p),u(t,Y,p),g(_t,Y,null),e(Y,Ki),e(Y,zt),e(zt,Hi),e(zt,Zn),e(Zn,Qi),e(zt,Ui),e(Y,Vi),e(Y,Ae),e(Ae,en),e(en,Gi),e(Ae,Ji),e(Ae,tn),e(tn,Xi),e(Ae,Yi),e(Y,Zi),e(Y,bt),e(bt,el),e(bt,sn),e(sn,tl),e(bt,sl),u(t,gr,p),u(t,ge,p),e(ge,Le),e(Le,eo),g(qt,eo,null),e(ge,nl),e(ge,to),e(to,ol),u(t,_r,p),u(t,S,p),g(kt,S,null),e(S,rl),e(S,so),e(so,al),e(S,il),e(S,vt),e(vt,ll),e(vt,yt),e(yt,dl),e(vt,cl),e(S,pl),e(S,wt),e(wt,hl),e(wt,nn),e(nn,ul),e(wt,ml),e(S,fl),e(S,Tt),e(Tt,gl),e(Tt,St),e(St,_l),e(Tt,zl),e(S,bl),e(S,Bt),e(Bt,ql),e(Bt,no),e(no,kl),e(Bt,vl),e(S,yl),e(S,oo),e(oo,wl),e(S,Tl),g($t,S,null),e(S,Sl),e(S,ro),e(ro,Bl),e(S,$l),g(Et,S,null),e(S,El),e(S,Q),g(jt,Q,null),e(Q,jl),e(Q,_e),e(_e,Ml),e(_e,on),e(on,Fl),e(_e,Cl),e(_e,ao),e(ao,Pl),e(_e,xl),e(Q,Al),g(Ne,Q,null),e(Q,Ll),e(Q,io),e(io,Nl),e(Q,Il),g(Mt,Q,null),u(t,zr,p),u(t,ze,p),e(ze,Ie),e(Ie,lo),g(Ft,lo,null),e(ze,Dl),e(ze,co),e(co,Ol),u(t,br,p),u(t,B,p),g(Ct,B,null),e(B,Rl),e(B,Pt),e(Pt,Wl),e(Pt,po),e(po,Kl),e(Pt,Hl),e(B,Ql),e(B,xt),e(xt,Ul),e(xt,At),e(At,Vl),e(xt,Gl),e(B,Jl),e(B,Lt),e(Lt,Xl),e(Lt,rn),e(rn,Yl),e(Lt,Zl),e(B,ed),e(B,Nt),e(Nt,td),e(Nt,It),e(It,sd),e(Nt,nd),e(B,od),e(B,Dt),e(Dt,rd),e(Dt,ho),e(ho,ad),e(Dt,id),e(B,ld),e(B,uo),e(uo,dd),e(B,cd),g(Ot,B,null),e(B,pd),e(B,mo),e(mo,hd),e(B,ud),g(Rt,B,null),e(B,md),e(B,U),g(Wt,U,null),e(U,fd),e(U,be),e(be,gd),e(be,an),e(an,_d),e(be,zd),e(be,fo),e(fo,bd),e(be,qd),e(U,kd),g(De,U,null),e(U,vd),e(U,go),e(go,yd),e(U,wd),g(Kt,U,null),u(t,qr,p),u(t,qe,p),e(qe,Oe),e(Oe,_o),g(Ht,_o,null),e(qe,Td),e(qe,zo),e(zo,Sd),u(t,kr,p),u(t,$,p),g(Qt,$,null),e($,Bd),e($,bo),e(bo,$d),e($,Ed),e($,Ut),e(Ut,jd),e(Ut,Vt),e(Vt,Md),e(Ut,Fd),e($,Cd),e($,Gt),e(Gt,Pd),e(Gt,ln),e(ln,xd),e(Gt,Ad),e($,Ld),e($,Jt),e(Jt,Nd),e(Jt,Xt),e(Xt,Id),e(Jt,Dd),e($,Od),e($,Yt),e(Yt,Rd),e(Yt,qo),e(qo,Wd),e(Yt,Kd),e($,Hd),e($,ko),e(ko,Qd),e($,Ud),g(Zt,$,null),e($,Vd),e($,vo),e(vo,Gd),e($,Jd),g(es,$,null),e($,Xd),e($,R),g(ts,R,null),e(R,Yd),e(R,ke),e(ke,Zd),e(ke,dn),e(dn,ec),e(ke,tc),e(ke,yo),e(yo,sc),e(ke,nc),e(R,oc),g(Re,R,null),e(R,rc),e(R,wo),e(wo,ac),e(R,ic),g(ss,R,null),e(R,lc),e(R,To),e(To,dc),e(R,cc),g(ns,R,null),u(t,vr,p),u(t,ve,p),e(ve,We),e(We,So),g(os,So,null),e(ve,pc),e(ve,Bo),e(Bo,hc),u(t,yr,p),u(t,E,p),g(rs,E,null),e(E,uc),e(E,$o),e($o,mc),e(E,fc),e(E,as),e(as,gc),e(as,is),e(is,_c),e(as,zc),e(E,bc),e(E,ls),e(ls,qc),e(ls,cn),e(cn,kc),e(ls,vc),e(E,yc),e(E,ds),e(ds,wc),e(ds,cs),e(cs,Tc),e(ds,Sc),e(E,Bc),e(E,ps),e(ps,$c),e(ps,Eo),e(Eo,Ec),e(ps,jc),e(E,Mc),e(E,jo),e(jo,Fc),e(E,Cc),g(hs,E,null),e(E,Pc),e(E,Mo),e(Mo,xc),e(E,Ac),g(us,E,null),e(E,Lc),e(E,V),g(ms,V,null),e(V,Nc),e(V,ye),e(ye,Ic),e(ye,pn),e(pn,Dc),e(ye,Oc),e(ye,Fo),e(Fo,Rc),e(ye,Wc),e(V,Kc),g(Ke,V,null),e(V,Hc),e(V,Co),e(Co,Qc),e(V,Uc),g(fs,V,null),u(t,wr,p),u(t,we,p),e(we,He),e(He,Po),g(gs,Po,null),e(we,Vc),e(we,xo),e(xo,Gc),u(t,Tr,p),u(t,j,p),g(_s,j,null),e(j,Jc),e(j,Ao),e(Ao,Xc),e(j,Yc),e(j,zs),e(zs,Zc),e(zs,bs),e(bs,ep),e(zs,tp),e(j,sp),e(j,qs),e(qs,np),e(qs,hn),e(hn,op),e(qs,rp),e(j,ap),e(j,ks),e(ks,ip),e(ks,vs),e(vs,lp),e(ks,dp),e(j,cp),e(j,ys),e(ys,pp),e(ys,Lo),e(Lo,hp),e(ys,up),e(j,mp),e(j,No),e(No,fp),e(j,gp),g(ws,j,null),e(j,_p),e(j,Io),e(Io,zp),e(j,bp),g(Ts,j,null),e(j,qp),e(j,G),g(Ss,G,null),e(G,kp),e(G,Te),e(Te,vp),e(Te,un),e(un,yp),e(Te,wp),e(Te,Do),e(Do,Tp),e(Te,Sp),e(G,Bp),g(Qe,G,null),e(G,$p),e(G,Oo),e(Oo,Ep),e(G,jp),g(Bs,G,null),u(t,Sr,p),u(t,Se,p),e(Se,Ue),e(Ue,Ro),g($s,Ro,null),e(Se,Mp),e(Se,Wo),e(Wo,Fp),u(t,Br,p),u(t,M,p),g(Es,M,null),e(M,Cp),e(M,Be),e(Be,Pp),e(Be,Ko),e(Ko,xp),e(Be,Ap),e(Be,Ho),e(Ho,Lp),e(Be,Np),e(M,Ip),e(M,js),e(js,Dp),e(js,Ms),e(Ms,Op),e(js,Rp),e(M,Wp),e(M,Fs),e(Fs,Kp),e(Fs,mn),e(mn,Hp),e(Fs,Qp),e(M,Up),e(M,Cs),e(Cs,Vp),e(Cs,Ps),e(Ps,Gp),e(Cs,Jp),e(M,Xp),e(M,xs),e(xs,Yp),e(xs,Qo),e(Qo,Zp),e(xs,eh),e(M,th),e(M,Uo),e(Uo,sh),e(M,nh),g(As,M,null),e(M,oh),e(M,Vo),e(Vo,rh),e(M,ah),g(Ls,M,null),e(M,ih),e(M,J),g(Ns,J,null),e(J,lh),e(J,$e),e($e,dh),e($e,fn),e(fn,ch),e($e,ph),e($e,Go),e(Go,hh),e($e,uh),e(J,mh),g(Ve,J,null),e(J,fh),e(J,Jo),e(Jo,gh),e(J,_h),g(Is,J,null),$r=!0},p(t,[p]){const Ds={};p&2&&(Ds.$$scope={dirty:p,ctx:t}),Ne.$set(Ds);const Xo={};p&2&&(Xo.$$scope={dirty:p,ctx:t}),De.$set(Xo);const Yo={};p&2&&(Yo.$$scope={dirty:p,ctx:t}),Re.$set(Yo);const Zo={};p&2&&(Zo.$$scope={dirty:p,ctx:t}),Ke.$set(Zo);const Os={};p&2&&(Os.$$scope={dirty:p,ctx:t}),Qe.$set(Os);const er={};p&2&&(er.$$scope={dirty:p,ctx:t}),Ve.$set(er)},i(t){$r||(_(v.$$.fragment,t),_(Xe.$$.fragment,t),_(st.$$.fragment,t),_(nt.$$.fragment,t),_(rt.$$.fragment,t),_(at.$$.fragment,t),_(it.$$.fragment,t),_(dt.$$.fragment,t),_(pt.$$.fragment,t),_(ut.$$.fragment,t),_(mt.$$.fragment,t),_(ft.$$.fragment,t),_(gt.$$.fragment,t),_(_t.$$.fragment,t),_(qt.$$.fragment,t),_(kt.$$.fragment,t),_($t.$$.fragment,t),_(Et.$$.fragment,t),_(jt.$$.fragment,t),_(Ne.$$.fragment,t),_(Mt.$$.fragment,t),_(Ft.$$.fragment,t),_(Ct.$$.fragment,t),_(Ot.$$.fragment,t),_(Rt.$$.fragment,t),_(Wt.$$.fragment,t),_(De.$$.fragment,t),_(Kt.$$.fragment,t),_(Ht.$$.fragment,t),_(Qt.$$.fragment,t),_(Zt.$$.fragment,t),_(es.$$.fragment,t),_(ts.$$.fragment,t),_(Re.$$.fragment,t),_(ss.$$.fragment,t),_(ns.$$.fragment,t),_(os.$$.fragment,t),_(rs.$$.fragment,t),_(hs.$$.fragment,t),_(us.$$.fragment,t),_(ms.$$.fragment,t),_(Ke.$$.fragment,t),_(fs.$$.fragment,t),_(gs.$$.fragment,t),_(_s.$$.fragment,t),_(ws.$$.fragment,t),_(Ts.$$.fragment,t),_(Ss.$$.fragment,t),_(Qe.$$.fragment,t),_(Bs.$$.fragment,t),_($s.$$.fragment,t),_(Es.$$.fragment,t),_(As.$$.fragment,t),_(Ls.$$.fragment,t),_(Ns.$$.fragment,t),_(Ve.$$.fragment,t),_(Is.$$.fragment,t),$r=!0)},o(t){z(v.$$.fragment,t),z(Xe.$$.fragment,t),z(st.$$.fragment,t),z(nt.$$.fragment,t),z(rt.$$.fragment,t),z(at.$$.fragment,t),z(it.$$.fragment,t),z(dt.$$.fragment,t),z(pt.$$.fragment,t),z(ut.$$.fragment,t),z(mt.$$.fragment,t),z(ft.$$.fragment,t),z(gt.$$.fragment,t),z(_t.$$.fragment,t),z(qt.$$.fragment,t),z(kt.$$.fragment,t),z($t.$$.fragment,t),z(Et.$$.fragment,t),z(jt.$$.fragment,t),z(Ne.$$.fragment,t),z(Mt.$$.fragment,t),z(Ft.$$.fragment,t),z(Ct.$$.fragment,t),z(Ot.$$.fragment,t),z(Rt.$$.fragment,t),z(Wt.$$.fragment,t),z(De.$$.fragment,t),z(Kt.$$.fragment,t),z(Ht.$$.fragment,t),z(Qt.$$.fragment,t),z(Zt.$$.fragment,t),z(es.$$.fragment,t),z(ts.$$.fragment,t),z(Re.$$.fragment,t),z(ss.$$.fragment,t),z(ns.$$.fragment,t),z(os.$$.fragment,t),z(rs.$$.fragment,t),z(hs.$$.fragment,t),z(us.$$.fragment,t),z(ms.$$.fragment,t),z(Ke.$$.fragment,t),z(fs.$$.fragment,t),z(gs.$$.fragment,t),z(_s.$$.fragment,t),z(ws.$$.fragment,t),z(Ts.$$.fragment,t),z(Ss.$$.fragment,t),z(Qe.$$.fragment,t),z(Bs.$$.fragment,t),z($s.$$.fragment,t),z(Es.$$.fragment,t),z(As.$$.fragment,t),z(Ls.$$.fragment,t),z(Ns.$$.fragment,t),z(Ve.$$.fragment,t),z(Is.$$.fragment,t),$r=!1},d(t){s(h),t&&s(w),t&&s(q),b(v),t&&s(nr),t&&s(ce),b(Xe),t&&s(or),t&&s(se),t&&s(rr),t&&s(Rs),t&&s(ar),t&&s(Ws),t&&s(ir),t&&s(Ks),t&&s(lr),t&&s(ne),t&&s(dr),t&&s(je),t&&s(cr),t&&s(pe),b(st),t&&s(pr),t&&s(K),b(nt),b(rt),t&&s(hr),t&&s(ue),b(at),t&&s(ur),t&&s(C),b(it),b(dt),b(pt),b(ut),b(mt),b(ft),t&&s(mr),t&&s(fe),b(gt),t&&s(fr),t&&s(Y),b(_t),t&&s(gr),t&&s(ge),b(qt),t&&s(_r),t&&s(S),b(kt),b($t),b(Et),b(jt),b(Ne),b(Mt),t&&s(zr),t&&s(ze),b(Ft),t&&s(br),t&&s(B),b(Ct),b(Ot),b(Rt),b(Wt),b(De),b(Kt),t&&s(qr),t&&s(qe),b(Ht),t&&s(kr),t&&s($),b(Qt),b(Zt),b(es),b(ts),b(Re),b(ss),b(ns),t&&s(vr),t&&s(ve),b(os),t&&s(yr),t&&s(E),b(rs),b(hs),b(us),b(ms),b(Ke),b(fs),t&&s(wr),t&&s(we),b(gs),t&&s(Tr),t&&s(j),b(_s),b(ws),b(Ts),b(Ss),b(Qe),b(Bs),t&&s(Sr),t&&s(Se),b($s),t&&s(Br),t&&s(M),b(Es),b(As),b(Ls),b(Ns),b(Ve),b(Is)}}}const Um={local:"squeezebert",sections:[{local:"overview",title:"Overview"},{local:"transformers.SqueezeBertConfig",title:"SqueezeBertConfig"},{local:"transformers.SqueezeBertTokenizer",title:"SqueezeBertTokenizer"},{local:"transformers.SqueezeBertTokenizerFast",title:"SqueezeBertTokenizerFast"},{local:"transformers.SqueezeBertModel",title:"SqueezeBertModel"},{local:"transformers.SqueezeBertForMaskedLM",title:"SqueezeBertForMaskedLM"},{local:"transformers.SqueezeBertForSequenceClassification",title:"SqueezeBertForSequenceClassification"},{local:"transformers.SqueezeBertForMultipleChoice",title:"SqueezeBertForMultipleChoice"},{local:"transformers.SqueezeBertForTokenClassification",title:"SqueezeBertForTokenClassification"},{local:"transformers.SqueezeBertForQuestionAnswering",title:"SqueezeBertForQuestionAnswering"}],title:"SqueezeBERT"};function Vm(H,h,w){let{fw:q}=h;return H.$$set=y=>{"fw"in y&&w(0,q=y.fw)},[q]}class tf extends Am{constructor(h){super();Lm(this,h,Vm,Qm,Nm,{fw:0})}}export{tf as default,Um as metadata};
284
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/vision-text-dual-encoder.mdx-bcc9f3e9.js
import{S as Nr,i as Br,s as Or,e as n,k as d,w as _,t as r,M as Wr,c as s,d as t,m as c,a,x as v,h as i,b as l,F as e,g as h,y as x,q as T,o as E,B as b}from"../../chunks/vendor-4833417e.js";import{T as Sr}from"../../chunks/Tip-fffd6df1.js";import{D as U}from"../../chunks/Docstring-4f315ed9.js";import{C as ya}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as Vt}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Rr(he){let m,M,f,j,q;return{c(){m=n("p"),M=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),j=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){m=s(w,"P",{});var $=a(m);M=i($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s($,"CODE",{});var L=a(f);j=i(L,"Module"),L.forEach(t),q=i($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(t)},m(w,$){h(w,m,$),e(m,M),e(m,f),e(f,j),e(m,q)},d(w){w&&t(m)}}}function Ur(he){let m,M,f,j,q;return{c(){m=n("p"),M=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),j=r("Module"),q=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(w){m=s(w,"P",{});var $=a(m);M=i($,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s($,"CODE",{});var L=a(f);j=i(L,"Module"),L.forEach(t),q=i($,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),$.forEach(t)},m(w,$){h(w,m,$),e(m,M),e(m,f),e(f,j),e(m,q)},d(w){w&&t(m)}}}function Jr(he){let m,M,f,j,q,w,$,L,Lo,no,J,ee,Dt,fe,So,$t,No,so,g,Bo,tt,Oo,Wo,Pt,Ro,Uo,ot,Jo,Ho,nt,Zo,Go,st,Xo,Ko,Mt,Qo,Yo,at,en,tn,rt,on,nn,ao,te,sn,ue,an,rn,ro,H,oe,zt,ge,ln,Ct,dn,io,y,_e,cn,B,it,pn,mn,lt,hn,fn,dt,un,gn,_n,Z,vn,ct,xn,Tn,pt,En,bn,jn,Ft,wn,yn,ve,kn,ne,xe,Vn,Te,Dn,mt,$n,Pn,Mn,se,Ee,zn,be,Cn,ht,Fn,In,lo,G,ae,It,je,An,At,qn,co,P,we,Ln,qt,Sn,Nn,z,ft,Bn,On,ut,Wn,Rn,gt,Un,Jn,Lt,Hn,Zn,_t,Gn,Xn,Kn,re,ye,Qn,ke,Yn,vt,es,ts,os,ie,Ve,ns,De,ss,xt,as,rs,po,X,le,St,$e,is,Nt,ls,mo,k,Pe,ds,Me,cs,Bt,ps,ms,hs,ze,fs,Ce,us,gs,_s,Ot,vs,xs,Fe,Ts,Tt,Es,bs,js,Ie,ws,Ae,ys,ks,Vs,C,qe,Ds,K,$s,Et,Ps,Ms,Wt,zs,Cs,Fs,de,Is,Rt,As,qs,Le,ho,Q,ce,Ut,Se,Ls,Jt,Ss,fo,u,Ne,Ns,Be,Bs,Ht,Os,Ws,Rs,Oe,Us,We,Js,Hs,Zs,Zt,Gs,Xs,Re,Ks,bt,Qs,Ys,ea,Ue,ta,Je,oa,na,sa,Gt,aa,ra,S,Xt,He,ia,la,Kt,Ze,da,ca,Qt,Ge,pa,ma,Yt,Xe,ha,fa,F,Ke,ua,Y,ga,jt,_a,va,eo,xa,Ta,Ea,pe,ba,to,ja,wa,Qe,uo;return w=new Vt({}),fe=new Vt({}),ge=new Vt({}),_e=new U({props:{name:"class transformers.VisionTextDualEncoderConfig",anchor:"transformers.VisionTextDualEncoderConfig",parameters:[{name:"projection_dim",val:" = 512"},{name:"logit_scale_init_value",val:" = 2.6592"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L28",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderConfig.text_config_dict",description:`<strong>text_config_dict</strong> (<code>dict</code>) &#x2014; Dictionary of configuration options that defines text model config.`,name:"text_config_dict"},{anchor:"transformers.VisionTextDualEncoderConfig.vision_config_dict",description:`<strong>vision_config_dict</strong> (<code>dict</code>) &#x2014; Dictionary of configuration options that defines vison model config.`,name:"vision_config_dict"},{anchor:"transformers.VisionTextDualEncoderConfig.projection_dim",description:`<strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimentionality of text and vision projection layers.`,name:"projection_dim"},{anchor:"transformers.VisionTextDualEncoderConfig.logit_scale_init_value",description:`<strong>logit_scale_init_value</strong> (<code>float</code>, <em>optional</em>, defaults to 2.6592) &#x2014; The inital value of the <em>logit_scale</em> paramter. Default is used as per the original CLIP implementation.`,name:"logit_scale_init_value"},{anchor:"transformers.VisionTextDualEncoderConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments.`,name:"kwargs"}]}}),ve=new ya({props:{code:`from transformers import ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel # Initializing a BERT and ViT configuration config_vision = ViTConfig() config_text = BertConfig() config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=512) # Initializing a BERT and ViT model model = VisionTextDualEncoderModel(config=config) # Accessing the model configuration config_vision = model.config.vision_config config_text = model.config.text_config # Saving the model, including its configuration model.save_pretrained("my-model") # loading model and config from pretrained folder vision_text_config = VisionTextDualEncoderConfig.from_pretrained("vit-bert") model = VisionTextDualEncoderModel.from_pretrained("vit-bert", config=vision_text_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTConfig, BertConfig, VisionTextDualEncoderConfig, VisionTextDualEncoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT and ViT configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_vision = ViTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_text = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = VisionTextDualEncoderConfig.from_vision_text_configs(config_vision, config_text, projection_dim=<span class="hljs-number">512</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a BERT and ViT model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_vision = model.config.vision_config <span class="hljs-meta">&gt;&gt;&gt; </span>config_text = model.config.text_config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>vision_text_config = VisionTextDualEncoderConfig.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>, config=vision_text_config)`}}),xe=new U({props:{name:"from_vision_text_configs",anchor:"transformers.VisionTextDualEncoderConfig.from_vision_text_configs",parameters:[{name:"vision_config",val:": PretrainedConfig"},{name:"text_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L105",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a></p> `}}),Ee=new U({props:{name:"to_dict",anchor:"transformers.VisionTextDualEncoderConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L117",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),je=new Vt({}),we=new U({props:{name:"class transformers.VisionTextDualEncoderProcessor",anchor:"transformers.VisionTextDualEncoderProcessor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L23",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderProcessor.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>) &#x2014; The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.VisionTextDualEncoderProcessor.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer is a required input.`,name:"tokenizer"}]}}),ye=new U({props:{name:"batch_decode",anchor:"transformers.VisionTextDualEncoderProcessor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L98"}}),Ve=new U({props:{name:"decode",anchor:"transformers.VisionTextDualEncoderProcessor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py#L105"}}),$e=new Vt({}),Pe=new U({props:{name:"class transformers.VisionTextDualEncoderModel",anchor:"transformers.VisionTextDualEncoderModel",parameters:[{name:"config",val:": typing.Optional[transformers.models.vision_text_dual_encoder.configuration_vision_text_dual_encoder.VisionTextDualEncoderConfig] = None"},{name:"vision_model",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"text_model",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py#L163",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qe=new U({props:{name:"forward",anchor:"transformers.VisionTextDualEncoderModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"pixel_values",val:" = None"},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"return_loss",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py#L295",parametersDescription:[{anchor:"transformers.VisionTextDualEncoderModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTokenizer">CLIPTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.VisionTextDualEncoderModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.VisionTextDualEncoderModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.VisionTextDualEncoderModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.VisionTextDualEncoderModel.forward.return_loss",description:`<strong>return_loss</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the contrastive loss.`,name:"return_loss"},{anchor:"transformers.VisionTextDualEncoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisionTextDualEncoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisionTextDualEncoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_clip.CLIPOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>return_loss</code> is <code>True</code>) \u2014 Contrastive loss for image-text similarity.</li> <li><strong>logits_per_image:(<code>torch.FloatTensor</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>torch.FloatTensor</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>image_embeds(<code>torch.FloatTensor</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPTextModel" >CLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>BaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.CLIPVisionModel" >CLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_clip.CLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),de=new Sr({props:{$$slots:{default:[Rr]},$$scope:{ctx:he}}}),Le=new ya({props:{code:`from PIL import Image import requests from transformers import ( VisionTextDualEncoderModel, VisionTextDualEncoderProcessor, ViTFeatureExtractor, BertTokenizer, ) tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) model = VisionTextDualEncoderModel.from_vision_text_pretrained( "google/vit-base-patch16-224", "bert-base-uncased" ) # contrastive training urls = [ "http://images.cocodataset.org/val2017/000000039769.jpg", "https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg", ] images = [Image.open(requests.get(url, stream=True).raw) for url in urls] inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=images, return_tensors="pt", padding=True ) outputs = model( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, pixel_values=inputs.pixel_values, return_loss=True, ) loss, logits_per_image = outputs.loss, outputs.logits_per_image # this is the image-text similarity score # save and load from pretrained model.save_pretrained("vit-bert") model = VisionTextDualEncoderModel.from_pretrained("vit-bert") # inference outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> VisionTextDualEncoderModel, <span class="hljs-meta">... </span> VisionTextDualEncoderProcessor, <span class="hljs-meta">... </span> ViTFeatureExtractor, <span class="hljs-meta">... </span> BertTokenizer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_vision_text_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># contrastive training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>urls = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>images = [Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-keyword">for</span> url <span class="hljs-keyword">in</span> urls] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor( <span class="hljs-meta">... </span> text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=images, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> input_ids=inputs.input_ids, <span class="hljs-meta">... </span> attention_mask=inputs.attention_mask, <span class="hljs-meta">... </span> pixel_values=inputs.pixel_values, <span class="hljs-meta">... </span> return_loss=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>loss, logits_per_image = outputs.loss, outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionTextDualEncoderModel.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = logits_per_image.softmax(dim=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),Se=new Vt({}),Ne=new U({props:{name:"class transformers.FlaxVisionTextDualEncoderModel",anchor:"transformers.FlaxVisionTextDualEncoderModel",parameters:[{name:"config",val:": VisionTextDualEncoderConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py#L219",parametersDescription:[{anchor:"transformers.FlaxVisionTextDualEncoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig">VisionTextDualEncoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Ke=new U({props:{name:"__call__",anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__",parameters:[{name:"input_ids",val:""},{name:"pixel_values",val:""},{name:"attention_mask",val:" = None"},{name:"position_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py#L251",parametersDescription:[{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxVisionTextDualEncoderModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig" >VisionTextDualEncoderConfig</a>) and inputs.</p> <ul> <li><strong>logits_per_image:(<code>jnp.ndarray</code></strong> of shape <code>(image_batch_size, text_batch_size)</code>) \u2014 The scaled dot product scores between <code>image_embeds</code> and <code>text_embeds</code>. This represents the image-text similarity scores.</li> <li><strong>logits_per_text:(<code>jnp.ndarray</code></strong> of shape <code>(text_batch_size, image_batch_size)</code>) \u2014 The scaled dot product scores between <code>text_embeds</code> and <code>image_embeds</code>. This represents the text-image similarity scores.</li> <li><strong>text_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The text embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>image_embeds(<code>jnp.ndarray</code></strong> of shape <code>(batch_size, output_dim</code>) \u2014 The image embeddings obtained by applying the projection layer to the pooled output of <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> <li><strong>text_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPTextModel" >FlaxCLIPTextModel</a>.</li> <li><strong>vision_model_output(<code>FlaxBaseModelOutputWithPooling</code>):</strong> The output of the <a href="/docs/transformers/pr_16143/en/model_doc/clip#transformers.FlaxCLIPVisionModel" >FlaxCLIPVisionModel</a>.</li> </ul> `,returnType:` <p><code>transformers.models.clip.modeling_flax_clip.FlaxCLIPOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),pe=new Sr({props:{$$slots:{default:[Ur]},$$scope:{ctx:he}}}),Qe=new ya({props:{code:`from PIL import Image import requests import jax from transformers import ( FlaxVisionTextDualEncoderModel, VisionTextDualEncoderProcessor, ViTFeatureExtractor, BertTokenizer, ) tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "google/vit-base-patch16-224", "bert-base-uncased" ) # contrastive training urls = [ "http://images.cocodataset.org/val2017/000000039769.jpg", "https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg", ] images = [Image.open(requests.get(url, stream=True).raw) for url in urls] inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=images, return_tensors="np", padding=True ) outputs = model( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, pixel_values=inputs.pixel_values, ) logits_per_image = outputs.logits_per_image # this is the image-text similarity score # save and load from pretrained model.save_pretrained("vit-bert") model = FlaxVisionTextDualEncoderModel.from_pretrained("vit-bert") # inference outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> FlaxVisionTextDualEncoderModel, <span class="hljs-meta">... </span> VisionTextDualEncoderProcessor, <span class="hljs-meta">... </span> ViTFeatureExtractor, <span class="hljs-meta">... </span> BertTokenizer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = VisionTextDualEncoderProcessor(feature_extractor, tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># contrastive training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>urls = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>images = [Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-keyword">for</span> url <span class="hljs-keyword">in</span> urls] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor( <span class="hljs-meta">... </span> text=[<span class="hljs-string">&quot;a photo of a cat&quot;</span>, <span class="hljs-string">&quot;a photo of a dog&quot;</span>], images=images, return_tensors=<span class="hljs-string">&quot;np&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> input_ids=inputs.input_ids, <span class="hljs-meta">... </span> attention_mask=inputs.attention_mask, <span class="hljs-meta">... </span> pixel_values=inputs.pixel_values, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionTextDualEncoderModel.from_pretrained(<span class="hljs-string">&quot;vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits_per_image = outputs.logits_per_image <span class="hljs-comment"># this is the image-text similarity score</span> <span class="hljs-meta">&gt;&gt;&gt; </span>probs = jax.nn.softmax(logits_per_image, axis=<span class="hljs-number">1</span>) <span class="hljs-comment"># we can take the softmax to get the label probabilities</span>`}}),{c(){m=n("meta"),M=d(),f=n("h1"),j=n("a"),q=n("span"),_(w.$$.fragment),$=d(),L=n("span"),Lo=r("VisionTextDualEncoder"),no=d(),J=n("h2"),ee=n("a"),Dt=n("span"),_(fe.$$.fragment),So=d(),$t=n("span"),No=r("Overview"),so=d(),g=n("p"),Bo=r("The "),tt=n("a"),Oo=r("VisionTextDualEncoderModel"),Wo=r(` can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder (`),Pt=n("em"),Ro=r("e.g."),Uo=d(),ot=n("a"),Jo=r("ViT"),Ho=r(", "),nt=n("a"),Zo=r("BEiT"),Go=r(", "),st=n("a"),Xo=r("DeiT"),Ko=r(") and any pretrained text autoencoding model as the text encoder ("),Mt=n("em"),Qo=r("e.g."),Yo=d(),at=n("a"),en=r("RoBERTa"),tn=r(", "),rt=n("a"),on=r("BERT"),nn=r(`). Two projection layers are added on top of both the vision and text encoder to project the output embeddings to a shared latent space. The projection layers are randomly initialized so the model should be fine-tuned on a downstream task. This model can be used to align the vision-text embeddings using CLIP like contrastive image-text training and then can be used for zero-shot vision tasks such image-classification or retrieval.`),ao=d(),te=n("p"),sn=r("In "),ue=n("a"),an=r("LiT: Zero-Shot Transfer with Locked-image Text Tuning"),rn=r(` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),ro=d(),H=n("h2"),oe=n("a"),zt=n("span"),_(ge.$$.fragment),ln=d(),Ct=n("span"),dn=r("VisionTextDualEncoderConfig"),io=d(),y=n("div"),_(_e.$$.fragment),cn=d(),B=n("p"),it=n("a"),pn=r("VisionTextDualEncoderConfig"),mn=r(` is the configuration class to store the configuration of a `),lt=n("a"),hn=r("VisionTextDualEncoderModel"),fn=r(". It is used to instantiate "),dt=n("a"),un=r("VisionTextDualEncoderModel"),gn=r(` model according to the specified arguments, defining the text model and vision model configs.`),_n=d(),Z=n("p"),vn=r("Configuration objects inherit from "),ct=n("a"),xn=r("PretrainedConfig"),Tn=r(` and can be used to control the model outputs. Read the documentation from `),pt=n("a"),En=r("PretrainedConfig"),bn=r(" for more information."),jn=d(),Ft=n("p"),wn=r("Examples:"),yn=d(),_(ve.$$.fragment),kn=d(),ne=n("div"),_(xe.$$.fragment),Vn=d(),Te=n("p"),Dn=r("Instantiate a "),mt=n("a"),$n=r("VisionTextDualEncoderConfig"),Pn=r(` (or a derived class) from text model configuration and vision model configuration.`),Mn=d(),se=n("div"),_(Ee.$$.fragment),zn=d(),be=n("p"),Cn=r("Serializes this instance to a Python dictionary. Override the default "),ht=n("a"),Fn=r("to_dict()"),In=r("."),lo=d(),G=n("h2"),ae=n("a"),It=n("span"),_(je.$$.fragment),An=d(),At=n("span"),qn=r("VisionTextDualEncoderProcessor"),co=d(),P=n("div"),_(we.$$.fragment),Ln=d(),qt=n("p"),Sn=r(`Constructs a VisionTextDualEncoder processor which wraps a vision feature extractor and a tokenizer into a single processor.`),Nn=d(),z=n("p"),ft=n("a"),Bn=r("VisionTextDualEncoderProcessor"),On=r(" offers all the functionalities of "),ut=n("a"),Wn=r("AutoFeatureExtractor"),Rn=r(` and `),gt=n("a"),Un=r("AutoTokenizer"),Jn=r(". See the "),Lt=n("code"),Hn=r("__call__()"),Zn=r(`and `),_t=n("a"),Gn=r("decode()"),Xn=r(" for more information."),Kn=d(),re=n("div"),_(ye.$$.fragment),Qn=d(),ke=n("p"),Yn=r(`This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s `),vt=n("a"),es=r("batch_decode()"),ts=r(". Please refer to the docstring of this method for more information."),os=d(),ie=n("div"),_(Ve.$$.fragment),ns=d(),De=n("p"),ss=r("This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s "),xt=n("a"),as=r("decode()"),rs=r(`. Please refer to the docstring of this method for more information.`),po=d(),X=n("h2"),le=n("a"),St=n("span"),_($e.$$.fragment),is=d(),Nt=n("span"),ls=r("VisionTextDualEncoderModel"),mo=d(),k=n("div"),_(Pe.$$.fragment),ds=d(),Me=n("p"),cs=r(`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),Bt=n("code"),ps=r("from_pretrained()"),ms=r(`method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),hs=d(),ze=n("p"),fs=r("In "),Ce=n("a"),us=r("LiT: Zero-Shot Transfer with Locked-image Text Tuning"),gs=r(` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),_s=d(),Ot=n("p"),vs=r(`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),xs=d(),Fe=n("p"),Ts=r("This model inherits from "),Tt=n("a"),Es=r("PreTrainedModel"),bs=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),js=d(),Ie=n("p"),ws=r("This model is also a PyTorch "),Ae=n("a"),ys=r("torch.nn.Module"),ks=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vs=d(),C=n("div"),_(qe.$$.fragment),Ds=d(),K=n("p"),$s=r("The "),Et=n("a"),Ps=r("VisionTextDualEncoderModel"),Ms=r(" forward method, overrides the "),Wt=n("code"),zs=r("__call__"),Cs=r(" special method."),Fs=d(),_(de.$$.fragment),Is=d(),Rt=n("p"),As=r("Examples:"),qs=d(),_(Le.$$.fragment),ho=d(),Q=n("h2"),ce=n("a"),Ut=n("span"),_(Se.$$.fragment),Ls=d(),Jt=n("span"),Ss=r("FlaxVisionTextDualEncoderModel"),fo=d(),u=n("div"),_(Ne.$$.fragment),Ns=d(),Be=n("p"),Bs=r(`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),Ht=n("code"),Os=r("from_pretrained()"),Ws=r(`method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),Rs=d(),Oe=n("p"),Us=r("In "),We=n("a"),Js=r("LiT: Zero-Shot Transfer with Locked-image Text Tuning"),Hs=r(` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Zs=d(),Zt=n("p"),Gs=r(`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Xs=d(),Re=n("p"),Ks=r("This model inherits from "),bt=n("a"),Qs=r("PreTrainedModel"),Ys=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ea=d(),Ue=n("p"),ta=r("This model is also a Flax Linen "),Je=n("a"),oa=r("flax.linen.Module"),na=r(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),sa=d(),Gt=n("p"),aa=r("Finally, this model supports inherent JAX features such as:"),ra=d(),S=n("ul"),Xt=n("li"),He=n("a"),ia=r("Just-In-Time (JIT) compilation"),la=d(),Kt=n("li"),Ze=n("a"),da=r("Automatic Differentiation"),ca=d(),Qt=n("li"),Ge=n("a"),pa=r("Vectorization"),ma=d(),Yt=n("li"),Xe=n("a"),ha=r("Parallelization"),fa=d(),F=n("div"),_(Ke.$$.fragment),ua=d(),Y=n("p"),ga=r("The "),jt=n("a"),_a=r("FlaxVisionTextDualEncoderModel"),va=r(" forward method, overrides the "),eo=n("code"),xa=r("__call__"),Ta=r(" special method."),Ea=d(),_(pe.$$.fragment),ba=d(),to=n("p"),ja=r("Examples:"),wa=d(),_(Qe.$$.fragment),this.h()},l(o){const p=Wr('[data-svelte="svelte-1phssyn"]',document.head);m=s(p,"META",{name:!0,content:!0}),p.forEach(t),M=c(o),f=s(o,"H1",{class:!0});var Ye=a(f);j=s(Ye,"A",{id:!0,class:!0,href:!0});var oo=a(j);q=s(oo,"SPAN",{});var ka=a(q);v(w.$$.fragment,ka),ka.forEach(t),oo.forEach(t),$=c(Ye),L=s(Ye,"SPAN",{});var Va=a(L);Lo=i(Va,"VisionTextDualEncoder"),Va.forEach(t),Ye.forEach(t),no=c(o),J=s(o,"H2",{class:!0});var go=a(J);ee=s(go,"A",{id:!0,class:!0,href:!0});var Da=a(ee);Dt=s(Da,"SPAN",{});var $a=a(Dt);v(fe.$$.fragment,$a),$a.forEach(t),Da.forEach(t),So=c(go),$t=s(go,"SPAN",{});var Pa=a($t);No=i(Pa,"Overview"),Pa.forEach(t),go.forEach(t),so=c(o),g=s(o,"P",{});var V=a(g);Bo=i(V,"The "),tt=s(V,"A",{href:!0});var Ma=a(tt);Oo=i(Ma,"VisionTextDualEncoderModel"),Ma.forEach(t),Wo=i(V,` can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder (`),Pt=s(V,"EM",{});var za=a(Pt);Ro=i(za,"e.g."),za.forEach(t),Uo=c(V),ot=s(V,"A",{href:!0});var Ca=a(ot);Jo=i(Ca,"ViT"),Ca.forEach(t),Ho=i(V,", "),nt=s(V,"A",{href:!0});var Fa=a(nt);Zo=i(Fa,"BEiT"),Fa.forEach(t),Go=i(V,", "),st=s(V,"A",{href:!0});var Ia=a(st);Xo=i(Ia,"DeiT"),Ia.forEach(t),Ko=i(V,") and any pretrained text autoencoding model as the text encoder ("),Mt=s(V,"EM",{});var Aa=a(Mt);Qo=i(Aa,"e.g."),Aa.forEach(t),Yo=c(V),at=s(V,"A",{href:!0});var qa=a(at);en=i(qa,"RoBERTa"),qa.forEach(t),tn=i(V,", "),rt=s(V,"A",{href:!0});var La=a(rt);on=i(La,"BERT"),La.forEach(t),nn=i(V,`). Two projection layers are added on top of both the vision and text encoder to project the output embeddings to a shared latent space. The projection layers are randomly initialized so the model should be fine-tuned on a downstream task. This model can be used to align the vision-text embeddings using CLIP like contrastive image-text training and then can be used for zero-shot vision tasks such image-classification or retrieval.`),V.forEach(t),ao=c(o),te=s(o,"P",{});var _o=a(te);sn=i(_o,"In "),ue=s(_o,"A",{href:!0,rel:!0});var Sa=a(ue);an=i(Sa,"LiT: Zero-Shot Transfer with Locked-image Text Tuning"),Sa.forEach(t),rn=i(_o,` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),_o.forEach(t),ro=c(o),H=s(o,"H2",{class:!0});var vo=a(H);oe=s(vo,"A",{id:!0,class:!0,href:!0});var Na=a(oe);zt=s(Na,"SPAN",{});var Ba=a(zt);v(ge.$$.fragment,Ba),Ba.forEach(t),Na.forEach(t),ln=c(vo),Ct=s(vo,"SPAN",{});var Oa=a(Ct);dn=i(Oa,"VisionTextDualEncoderConfig"),Oa.forEach(t),vo.forEach(t),io=c(o),y=s(o,"DIV",{class:!0});var I=a(y);v(_e.$$.fragment,I),cn=c(I),B=s(I,"P",{});var et=a(B);it=s(et,"A",{href:!0});var Wa=a(it);pn=i(Wa,"VisionTextDualEncoderConfig"),Wa.forEach(t),mn=i(et,` is the configuration class to store the configuration of a `),lt=s(et,"A",{href:!0});var Ra=a(lt);hn=i(Ra,"VisionTextDualEncoderModel"),Ra.forEach(t),fn=i(et,". It is used to instantiate "),dt=s(et,"A",{href:!0});var Ua=a(dt);un=i(Ua,"VisionTextDualEncoderModel"),Ua.forEach(t),gn=i(et,` model according to the specified arguments, defining the text model and vision model configs.`),et.forEach(t),_n=c(I),Z=s(I,"P",{});var wt=a(Z);vn=i(wt,"Configuration objects inherit from "),ct=s(wt,"A",{href:!0});var Ja=a(ct);xn=i(Ja,"PretrainedConfig"),Ja.forEach(t),Tn=i(wt,` and can be used to control the model outputs. Read the documentation from `),pt=s(wt,"A",{href:!0});var Ha=a(pt);En=i(Ha,"PretrainedConfig"),Ha.forEach(t),bn=i(wt," for more information."),wt.forEach(t),jn=c(I),Ft=s(I,"P",{});var Za=a(Ft);wn=i(Za,"Examples:"),Za.forEach(t),yn=c(I),v(ve.$$.fragment,I),kn=c(I),ne=s(I,"DIV",{class:!0});var xo=a(ne);v(xe.$$.fragment,xo),Vn=c(xo),Te=s(xo,"P",{});var To=a(Te);Dn=i(To,"Instantiate a "),mt=s(To,"A",{href:!0});var Ga=a(mt);$n=i(Ga,"VisionTextDualEncoderConfig"),Ga.forEach(t),Pn=i(To,` (or a derived class) from text model configuration and vision model configuration.`),To.forEach(t),xo.forEach(t),Mn=c(I),se=s(I,"DIV",{class:!0});var Eo=a(se);v(Ee.$$.fragment,Eo),zn=c(Eo),be=s(Eo,"P",{});var bo=a(be);Cn=i(bo,"Serializes this instance to a Python dictionary. Override the default "),ht=s(bo,"A",{href:!0});var Xa=a(ht);Fn=i(Xa,"to_dict()"),Xa.forEach(t),In=i(bo,"."),bo.forEach(t),Eo.forEach(t),I.forEach(t),lo=c(o),G=s(o,"H2",{class:!0});var jo=a(G);ae=s(jo,"A",{id:!0,class:!0,href:!0});var Ka=a(ae);It=s(Ka,"SPAN",{});var Qa=a(It);v(je.$$.fragment,Qa),Qa.forEach(t),Ka.forEach(t),An=c(jo),At=s(jo,"SPAN",{});var Ya=a(At);qn=i(Ya,"VisionTextDualEncoderProcessor"),Ya.forEach(t),jo.forEach(t),co=c(o),P=s(o,"DIV",{class:!0});var O=a(P);v(we.$$.fragment,O),Ln=c(O),qt=s(O,"P",{});var er=a(qt);Sn=i(er,`Constructs a VisionTextDualEncoder processor which wraps a vision feature extractor and a tokenizer into a single processor.`),er.forEach(t),Nn=c(O),z=s(O,"P",{});var N=a(z);ft=s(N,"A",{href:!0});var tr=a(ft);Bn=i(tr,"VisionTextDualEncoderProcessor"),tr.forEach(t),On=i(N," offers all the functionalities of "),ut=s(N,"A",{href:!0});var or=a(ut);Wn=i(or,"AutoFeatureExtractor"),or.forEach(t),Rn=i(N,` and `),gt=s(N,"A",{href:!0});var nr=a(gt);Un=i(nr,"AutoTokenizer"),nr.forEach(t),Jn=i(N,". See the "),Lt=s(N,"CODE",{});var sr=a(Lt);Hn=i(sr,"__call__()"),sr.forEach(t),Zn=i(N,`and `),_t=s(N,"A",{href:!0});var ar=a(_t);Gn=i(ar,"decode()"),ar.forEach(t),Xn=i(N," for more information."),N.forEach(t),Kn=c(O),re=s(O,"DIV",{class:!0});var wo=a(re);v(ye.$$.fragment,wo),Qn=c(wo),ke=s(wo,"P",{});var yo=a(ke);Yn=i(yo,`This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s `),vt=s(yo,"A",{href:!0});var rr=a(vt);es=i(rr,"batch_decode()"),rr.forEach(t),ts=i(yo,". Please refer to the docstring of this method for more information."),yo.forEach(t),wo.forEach(t),os=c(O),ie=s(O,"DIV",{class:!0});var ko=a(ie);v(Ve.$$.fragment,ko),ns=c(ko),De=s(ko,"P",{});var Vo=a(De);ss=i(Vo,"This method forwards all its arguments to VisionTextDualEncoderTokenizer\u2019s "),xt=s(Vo,"A",{href:!0});var ir=a(xt);as=i(ir,"decode()"),ir.forEach(t),rs=i(Vo,`. Please refer to the docstring of this method for more information.`),Vo.forEach(t),ko.forEach(t),O.forEach(t),po=c(o),X=s(o,"H2",{class:!0});var Do=a(X);le=s(Do,"A",{id:!0,class:!0,href:!0});var lr=a(le);St=s(lr,"SPAN",{});var dr=a(St);v($e.$$.fragment,dr),dr.forEach(t),lr.forEach(t),is=c(Do),Nt=s(Do,"SPAN",{});var cr=a(Nt);ls=i(cr,"VisionTextDualEncoderModel"),cr.forEach(t),Do.forEach(t),mo=c(o),k=s(o,"DIV",{class:!0});var A=a(k);v(Pe.$$.fragment,A),ds=c(A),Me=s(A,"P",{});var $o=a(Me);cs=i($o,`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),Bt=s($o,"CODE",{});var pr=a(Bt);ps=i(pr,"from_pretrained()"),pr.forEach(t),ms=i($o,`method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),$o.forEach(t),hs=c(A),ze=s(A,"P",{});var Po=a(ze);fs=i(Po,"In "),Ce=s(Po,"A",{href:!0,rel:!0});var mr=a(Ce);us=i(mr,"LiT: Zero-Shot Transfer with Locked-image Text Tuning"),mr.forEach(t),gs=i(Po,` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Po.forEach(t),_s=c(A),Ot=s(A,"P",{});var hr=a(Ot);vs=i(hr,`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),hr.forEach(t),xs=c(A),Fe=s(A,"P",{});var Mo=a(Fe);Ts=i(Mo,"This model inherits from "),Tt=s(Mo,"A",{href:!0});var fr=a(Tt);Es=i(fr,"PreTrainedModel"),fr.forEach(t),bs=i(Mo,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mo.forEach(t),js=c(A),Ie=s(A,"P",{});var zo=a(Ie);ws=i(zo,"This model is also a PyTorch "),Ae=s(zo,"A",{href:!0,rel:!0});var ur=a(Ae);ys=i(ur,"torch.nn.Module"),ur.forEach(t),ks=i(zo,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),zo.forEach(t),Vs=c(A),C=s(A,"DIV",{class:!0});var W=a(C);v(qe.$$.fragment,W),Ds=c(W),K=s(W,"P",{});var yt=a(K);$s=i(yt,"The "),Et=s(yt,"A",{href:!0});var gr=a(Et);Ps=i(gr,"VisionTextDualEncoderModel"),gr.forEach(t),Ms=i(yt," forward method, overrides the "),Wt=s(yt,"CODE",{});var _r=a(Wt);zs=i(_r,"__call__"),_r.forEach(t),Cs=i(yt," special method."),yt.forEach(t),Fs=c(W),v(de.$$.fragment,W),Is=c(W),Rt=s(W,"P",{});var vr=a(Rt);As=i(vr,"Examples:"),vr.forEach(t),qs=c(W),v(Le.$$.fragment,W),W.forEach(t),A.forEach(t),ho=c(o),Q=s(o,"H2",{class:!0});var Co=a(Q);ce=s(Co,"A",{id:!0,class:!0,href:!0});var xr=a(ce);Ut=s(xr,"SPAN",{});var Tr=a(Ut);v(Se.$$.fragment,Tr),Tr.forEach(t),xr.forEach(t),Ls=c(Co),Jt=s(Co,"SPAN",{});var Er=a(Jt);Ss=i(Er,"FlaxVisionTextDualEncoderModel"),Er.forEach(t),Co.forEach(t),fo=c(o),u=s(o,"DIV",{class:!0});var D=a(u);v(Ne.$$.fragment,D),Ns=c(D),Be=s(D,"P",{});var Fo=a(Be);Bs=i(Fo,`This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the `),Ht=s(Fo,"CODE",{});var br=a(Ht);Os=i(br,"from_pretrained()"),br.forEach(t),Ws=i(Fo,`method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling.`),Fo.forEach(t),Rs=c(D),Oe=s(D,"P",{});var Io=a(Oe);Us=i(Io,"In "),We=s(Io,"A",{href:!0,rel:!0});var jr=a(We);Js=i(jr,"LiT: Zero-Shot Transfer with Locked-image Text Tuning"),jr.forEach(t),Hs=i(Io,` it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvment on new zero-shot vision tasks such as image classification or retrieval.`),Io.forEach(t),Zs=c(D),Zt=s(D,"P",{});var wr=a(Zt);Gs=i(wr,`After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),wr.forEach(t),Xs=c(D),Re=s(D,"P",{});var Ao=a(Re);Ks=i(Ao,"This model inherits from "),bt=s(Ao,"A",{href:!0});var yr=a(bt);Qs=i(yr,"PreTrainedModel"),yr.forEach(t),Ys=i(Ao,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ao.forEach(t),ea=c(D),Ue=s(D,"P",{});var qo=a(Ue);ta=i(qo,"This model is also a Flax Linen "),Je=s(qo,"A",{href:!0,rel:!0});var kr=a(Je);oa=i(kr,"flax.linen.Module"),kr.forEach(t),na=i(qo,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),qo.forEach(t),sa=c(D),Gt=s(D,"P",{});var Vr=a(Gt);aa=i(Vr,"Finally, this model supports inherent JAX features such as:"),Vr.forEach(t),ra=c(D),S=s(D,"UL",{});var me=a(S);Xt=s(me,"LI",{});var Dr=a(Xt);He=s(Dr,"A",{href:!0,rel:!0});var $r=a(He);ia=i($r,"Just-In-Time (JIT) compilation"),$r.forEach(t),Dr.forEach(t),la=c(me),Kt=s(me,"LI",{});var Pr=a(Kt);Ze=s(Pr,"A",{href:!0,rel:!0});var Mr=a(Ze);da=i(Mr,"Automatic Differentiation"),Mr.forEach(t),Pr.forEach(t),ca=c(me),Qt=s(me,"LI",{});var zr=a(Qt);Ge=s(zr,"A",{href:!0,rel:!0});var Cr=a(Ge);pa=i(Cr,"Vectorization"),Cr.forEach(t),zr.forEach(t),ma=c(me),Yt=s(me,"LI",{});var Fr=a(Yt);Xe=s(Fr,"A",{href:!0,rel:!0});var Ir=a(Xe);ha=i(Ir,"Parallelization"),Ir.forEach(t),Fr.forEach(t),me.forEach(t),fa=c(D),F=s(D,"DIV",{class:!0});var R=a(F);v(Ke.$$.fragment,R),ua=c(R),Y=s(R,"P",{});var kt=a(Y);ga=i(kt,"The "),jt=s(kt,"A",{href:!0});var Ar=a(jt);_a=i(Ar,"FlaxVisionTextDualEncoderModel"),Ar.forEach(t),va=i(kt," forward method, overrides the "),eo=s(kt,"CODE",{});var qr=a(eo);xa=i(qr,"__call__"),qr.forEach(t),Ta=i(kt," special method."),kt.forEach(t),Ea=c(R),v(pe.$$.fragment,R),ba=c(R),to=s(R,"P",{});var Lr=a(to);ja=i(Lr,"Examples:"),Lr.forEach(t),wa=c(R),v(Qe.$$.fragment,R),R.forEach(t),D.forEach(t),this.h()},h(){l(m,"name","hf:doc:metadata"),l(m,"content",JSON.stringify(Hr)),l(j,"id","visiontextdualencoder"),l(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(j,"href","#visiontextdualencoder"),l(f,"class","relative group"),l(ee,"id","overview"),l(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ee,"href","#overview"),l(J,"class","relative group"),l(tt,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel"),l(ot,"href","vit"),l(nt,"href","beit"),l(st,"href","deit"),l(at,"href","roberta"),l(rt,"href","bert"),l(ue,"href","https://arxiv.org/abs/2111.07991"),l(ue,"rel","nofollow"),l(oe,"id","transformers.VisionTextDualEncoderConfig"),l(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(oe,"href","#transformers.VisionTextDualEncoderConfig"),l(H,"class","relative group"),l(it,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig"),l(lt,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel"),l(dt,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel"),l(ct,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(pt,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(mt,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderConfig"),l(ne,"class","docstring"),l(ht,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.to_dict"),l(se,"class","docstring"),l(y,"class","docstring"),l(ae,"id","transformers.VisionTextDualEncoderProcessor"),l(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ae,"href","#transformers.VisionTextDualEncoderProcessor"),l(G,"class","relative group"),l(ft,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderProcessor"),l(ut,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor"),l(gt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer"),l(_t,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderProcessor.decode"),l(vt,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),l(re,"class","docstring"),l(xt,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),l(ie,"class","docstring"),l(P,"class","docstring"),l(le,"id","transformers.VisionTextDualEncoderModel"),l(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(le,"href","#transformers.VisionTextDualEncoderModel"),l(X,"class","relative group"),l(Ce,"href","https://arxiv.org/abs/2111.07991"),l(Ce,"rel","nofollow"),l(Tt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),l(Ae,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ae,"rel","nofollow"),l(Et,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.VisionTextDualEncoderModel"),l(C,"class","docstring"),l(k,"class","docstring"),l(ce,"id","transformers.FlaxVisionTextDualEncoderModel"),l(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ce,"href","#transformers.FlaxVisionTextDualEncoderModel"),l(Q,"class","relative group"),l(We,"href","https://arxiv.org/abs/2111.07991"),l(We,"rel","nofollow"),l(bt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),l(Je,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),l(Je,"rel","nofollow"),l(He,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),l(He,"rel","nofollow"),l(Ze,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),l(Ze,"rel","nofollow"),l(Ge,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),l(Ge,"rel","nofollow"),l(Xe,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),l(Xe,"rel","nofollow"),l(jt,"href","/docs/transformers/pr_16143/en/model_doc/vision-text-dual-encoder#transformers.FlaxVisionTextDualEncoderModel"),l(F,"class","docstring"),l(u,"class","docstring")},m(o,p){e(document.head,m),h(o,M,p),h(o,f,p),e(f,j),e(j,q),x(w,q,null),e(f,$),e(f,L),e(L,Lo),h(o,no,p),h(o,J,p),e(J,ee),e(ee,Dt),x(fe,Dt,null),e(J,So),e(J,$t),e($t,No),h(o,so,p),h(o,g,p),e(g,Bo),e(g,tt),e(tt,Oo),e(g,Wo),e(g,Pt),e(Pt,Ro),e(g,Uo),e(g,ot),e(ot,Jo),e(g,Ho),e(g,nt),e(nt,Zo),e(g,Go),e(g,st),e(st,Xo),e(g,Ko),e(g,Mt),e(Mt,Qo),e(g,Yo),e(g,at),e(at,en),e(g,tn),e(g,rt),e(rt,on),e(g,nn),h(o,ao,p),h(o,te,p),e(te,sn),e(te,ue),e(ue,an),e(te,rn),h(o,ro,p),h(o,H,p),e(H,oe),e(oe,zt),x(ge,zt,null),e(H,ln),e(H,Ct),e(Ct,dn),h(o,io,p),h(o,y,p),x(_e,y,null),e(y,cn),e(y,B),e(B,it),e(it,pn),e(B,mn),e(B,lt),e(lt,hn),e(B,fn),e(B,dt),e(dt,un),e(B,gn),e(y,_n),e(y,Z),e(Z,vn),e(Z,ct),e(ct,xn),e(Z,Tn),e(Z,pt),e(pt,En),e(Z,bn),e(y,jn),e(y,Ft),e(Ft,wn),e(y,yn),x(ve,y,null),e(y,kn),e(y,ne),x(xe,ne,null),e(ne,Vn),e(ne,Te),e(Te,Dn),e(Te,mt),e(mt,$n),e(Te,Pn),e(y,Mn),e(y,se),x(Ee,se,null),e(se,zn),e(se,be),e(be,Cn),e(be,ht),e(ht,Fn),e(be,In),h(o,lo,p),h(o,G,p),e(G,ae),e(ae,It),x(je,It,null),e(G,An),e(G,At),e(At,qn),h(o,co,p),h(o,P,p),x(we,P,null),e(P,Ln),e(P,qt),e(qt,Sn),e(P,Nn),e(P,z),e(z,ft),e(ft,Bn),e(z,On),e(z,ut),e(ut,Wn),e(z,Rn),e(z,gt),e(gt,Un),e(z,Jn),e(z,Lt),e(Lt,Hn),e(z,Zn),e(z,_t),e(_t,Gn),e(z,Xn),e(P,Kn),e(P,re),x(ye,re,null),e(re,Qn),e(re,ke),e(ke,Yn),e(ke,vt),e(vt,es),e(ke,ts),e(P,os),e(P,ie),x(Ve,ie,null),e(ie,ns),e(ie,De),e(De,ss),e(De,xt),e(xt,as),e(De,rs),h(o,po,p),h(o,X,p),e(X,le),e(le,St),x($e,St,null),e(X,is),e(X,Nt),e(Nt,ls),h(o,mo,p),h(o,k,p),x(Pe,k,null),e(k,ds),e(k,Me),e(Me,cs),e(Me,Bt),e(Bt,ps),e(Me,ms),e(k,hs),e(k,ze),e(ze,fs),e(ze,Ce),e(Ce,us),e(ze,gs),e(k,_s),e(k,Ot),e(Ot,vs),e(k,xs),e(k,Fe),e(Fe,Ts),e(Fe,Tt),e(Tt,Es),e(Fe,bs),e(k,js),e(k,Ie),e(Ie,ws),e(Ie,Ae),e(Ae,ys),e(Ie,ks),e(k,Vs),e(k,C),x(qe,C,null),e(C,Ds),e(C,K),e(K,$s),e(K,Et),e(Et,Ps),e(K,Ms),e(K,Wt),e(Wt,zs),e(K,Cs),e(C,Fs),x(de,C,null),e(C,Is),e(C,Rt),e(Rt,As),e(C,qs),x(Le,C,null),h(o,ho,p),h(o,Q,p),e(Q,ce),e(ce,Ut),x(Se,Ut,null),e(Q,Ls),e(Q,Jt),e(Jt,Ss),h(o,fo,p),h(o,u,p),x(Ne,u,null),e(u,Ns),e(u,Be),e(Be,Bs),e(Be,Ht),e(Ht,Os),e(Be,Ws),e(u,Rs),e(u,Oe),e(Oe,Us),e(Oe,We),e(We,Js),e(Oe,Hs),e(u,Zs),e(u,Zt),e(Zt,Gs),e(u,Xs),e(u,Re),e(Re,Ks),e(Re,bt),e(bt,Qs),e(Re,Ys),e(u,ea),e(u,Ue),e(Ue,ta),e(Ue,Je),e(Je,oa),e(Ue,na),e(u,sa),e(u,Gt),e(Gt,aa),e(u,ra),e(u,S),e(S,Xt),e(Xt,He),e(He,ia),e(S,la),e(S,Kt),e(Kt,Ze),e(Ze,da),e(S,ca),e(S,Qt),e(Qt,Ge),e(Ge,pa),e(S,ma),e(S,Yt),e(Yt,Xe),e(Xe,ha),e(u,fa),e(u,F),x(Ke,F,null),e(F,ua),e(F,Y),e(Y,ga),e(Y,jt),e(jt,_a),e(Y,va),e(Y,eo),e(eo,xa),e(Y,Ta),e(F,Ea),x(pe,F,null),e(F,ba),e(F,to),e(to,ja),e(F,wa),x(Qe,F,null),uo=!0},p(o,[p]){const Ye={};p&2&&(Ye.$$scope={dirty:p,ctx:o}),de.$set(Ye);const oo={};p&2&&(oo.$$scope={dirty:p,ctx:o}),pe.$set(oo)},i(o){uo||(T(w.$$.fragment,o),T(fe.$$.fragment,o),T(ge.$$.fragment,o),T(_e.$$.fragment,o),T(ve.$$.fragment,o),T(xe.$$.fragment,o),T(Ee.$$.fragment,o),T(je.$$.fragment,o),T(we.$$.fragment,o),T(ye.$$.fragment,o),T(Ve.$$.fragment,o),T($e.$$.fragment,o),T(Pe.$$.fragment,o),T(qe.$$.fragment,o),T(de.$$.fragment,o),T(Le.$$.fragment,o),T(Se.$$.fragment,o),T(Ne.$$.fragment,o),T(Ke.$$.fragment,o),T(pe.$$.fragment,o),T(Qe.$$.fragment,o),uo=!0)},o(o){E(w.$$.fragment,o),E(fe.$$.fragment,o),E(ge.$$.fragment,o),E(_e.$$.fragment,o),E(ve.$$.fragment,o),E(xe.$$.fragment,o),E(Ee.$$.fragment,o),E(je.$$.fragment,o),E(we.$$.fragment,o),E(ye.$$.fragment,o),E(Ve.$$.fragment,o),E($e.$$.fragment,o),E(Pe.$$.fragment,o),E(qe.$$.fragment,o),E(de.$$.fragment,o),E(Le.$$.fragment,o),E(Se.$$.fragment,o),E(Ne.$$.fragment,o),E(Ke.$$.fragment,o),E(pe.$$.fragment,o),E(Qe.$$.fragment,o),uo=!1},d(o){t(m),o&&t(M),o&&t(f),b(w),o&&t(no),o&&t(J),b(fe),o&&t(so),o&&t(g),o&&t(ao),o&&t(te),o&&t(ro),o&&t(H),b(ge),o&&t(io),o&&t(y),b(_e),b(ve),b(xe),b(Ee),o&&t(lo),o&&t(G),b(je),o&&t(co),o&&t(P),b(we),b(ye),b(Ve),o&&t(po),o&&t(X),b($e),o&&t(mo),o&&t(k),b(Pe),b(qe),b(de),b(Le),o&&t(ho),o&&t(Q),b(Se),o&&t(fo),o&&t(u),b(Ne),b(Ke),b(pe),b(Qe)}}}const Hr={local:"visiontextdualencoder",sections:[{local:"overview",title:"Overview"},{local:"transformers.VisionTextDualEncoderConfig",title:"VisionTextDualEncoderConfig"},{local:"transformers.VisionTextDualEncoderProcessor",title:"VisionTextDualEncoderProcessor"},{local:"transformers.VisionTextDualEncoderModel",title:"VisionTextDualEncoderModel"},{local:"transformers.FlaxVisionTextDualEncoderModel",title:"FlaxVisionTextDualEncoderModel"}],title:"VisionTextDualEncoder"};function Zr(he,m,M){let{fw:f}=m;return he.$$set=j=>{"fw"in j&&M(0,f=j.fw)},[f]}class ti extends Nr{constructor(m){super();Br(this,m,Zr,Jr,Or,{fw:0})}}export{ti as default,Hr as metadata};
285
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/retribert.mdx-3029d10f.js
import{S as mo,i as fo,s as ho,e as o,k as d,w as b,t as s,M as po,c as n,d as r,m as c,a,x as k,h as l,b as i,F as e,g as f,y,L as uo,q as B,o as T,B as R}from"../../chunks/vendor-4833417e.js";import{D as Ve}from"../../chunks/Docstring-4f315ed9.js";import{I as we}from"../../chunks/IconCopyLink-4b81c553.js";function _o(_t){let w,me,_,g,ze,V,gt,$e,vt,Ue,E,q,Ee,U,bt,xe,kt,Ke,F,yt,K,Bt,Tt,We,z,Rt,W,wt,zt,Q,$t,Et,Qe,x,N,Pe,G,xt,Ce,Pt,Ge,v,J,Ct,X,Mt,fe,At,qt,Ft,P,Nt,he,St,Dt,pe,It,jt,Je,C,S,Me,Y,Lt,Ae,Ht,Xe,p,Z,Ot,qe,Vt,Ut,D,Fe,Kt,Wt,ue,Qt,Gt,Jt,ee,Xt,_e,Yt,Zt,Ye,M,I,Ne,te,er,Se,tr,Ze,u,re,rr,oe,or,De,nr,ar,ir,j,ge,sr,lr,ve,dr,cr,mr,ne,fr,be,hr,pr,et,A,L,Ie,ae,ur,je,_r,tt,h,ie,gr,Le,vr,br,se,kr,ke,yr,Br,Tr,le,Rr,de,wr,zr,$r,ye,ce,rt;return V=new we({}),U=new we({}),G=new we({}),J=new Ve({props:{name:"class transformers.RetriBertConfig",anchor:"transformers.RetriBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 8"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"share_encoders",val:" = True"},{name:"projection_dim",val:" = 128"},{name:"pad_token_id",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/retribert/configuration_retribert.py#L29",parametersDescription:[{anchor:"transformers.RetriBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/retribert#transformers.RetriBertModel">RetriBertModel</a>`,name:"vocab_size"},{anchor:"transformers.RetriBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.RetriBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.RetriBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.RetriBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.RetriBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.RetriBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.RetriBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.RetriBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.RetriBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <em>token_type_ids</em> passed into <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel">BertModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.RetriBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.RetriBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.RetriBertConfig.share_encoders",description:`<strong>share_encoders</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use the same Bert-type encoder for the queries and document`,name:"share_encoders"},{anchor:"transformers.RetriBertConfig.projection_dim",description:`<strong>projection_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Final dimension of the query and document representation after projection`,name:"projection_dim"}]}}),Y=new we({}),Z=new Ve({props:{name:"class transformers.RetriBertTokenizer",anchor:"transformers.RetriBertTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/retribert/tokenization_retribert.py#L41"}}),te=new we({}),re=new Ve({props:{name:"class transformers.RetriBertTokenizerFast",anchor:"transformers.RetriBertTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/retribert/tokenization_retribert_fast.py#L45"}}),ae=new we({}),ie=new Ve({props:{name:"class transformers.RetriBertModel",anchor:"transformers.RetriBertModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/retribert/modeling_retribert.py#L88",parametersDescription:[{anchor:"transformers.RetriBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/retribert#transformers.RetriBertConfig">RetriBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ce=new Ve({props:{name:"forward",anchor:"transformers.RetriBertModel.forward",parameters:[{name:"input_ids_query",val:""},{name:"attention_mask_query",val:""},{name:"input_ids_doc",val:""},{name:"attention_mask_doc",val:""},{name:"checkpoint_batch_size",val:" = -1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/retribert/modeling_retribert.py#L176",parametersDescription:[{anchor:"transformers.RetriBertModel.forward.input_ids_query",description:`<strong>input_ids_query</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary for the queries in a batch.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/retribert#transformers.RetriBertTokenizer">RetriBertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids_query"},{anchor:"transformers.RetriBertModel.forward.attention_mask_query",description:`<strong>attention_mask_query</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask_query"},{anchor:"transformers.RetriBertModel.forward.input_ids_doc",description:`<strong>input_ids_doc</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary for the documents in a batch.`,name:"input_ids_doc"},{anchor:"transformers.RetriBertModel.forward.attention_mask_doc",description:`<strong>attention_mask_doc</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on documents padding token indices.`,name:"attention_mask_doc"},{anchor:"transformers.RetriBertModel.forward.checkpoint_batch_size",description:`<strong>checkpoint_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to &#x201C;<code>-1</code>) &#x2014; If greater than 0, uses gradient checkpointing to only compute sequence representation on <code>checkpoint_batch_size</code> examples at a time on the GPU. All query representations are still compared to all document representations in the batch.`,name:"checkpoint_batch_size"}],returnDescription:` <p>The bidirectional cross-entropy loss obtained while trying to match each query to its corresponding document and each document to its corresponding query in the batch</p> `,returnType:` <p>\`torch.FloatTensor\u201C</p> `}}),{c(){w=o("meta"),me=d(),_=o("h1"),g=o("a"),ze=o("span"),b(V.$$.fragment),gt=d(),$e=o("span"),vt=s("RetriBERT"),Ue=d(),E=o("h2"),q=o("a"),Ee=o("span"),b(U.$$.fragment),bt=d(),xe=o("span"),kt=s("Overview"),Ke=d(),F=o("p"),yt=s("The RetriBERT model was proposed in the blog post "),K=o("a"),Bt=s(`Explain Anything Like I\u2019m Five: A Model for Open Domain Long Form Question Answering`),Tt=s(`. RetriBERT is a small model that uses either a single or pair of BERT encoders with lower-dimension projection for dense semantic indexing of text.`),We=d(),z=o("p"),Rt=s("This model was contributed by "),W=o("a"),wt=s("yjernite"),zt=s(`. Code to train and use the model can be found `),Q=o("a"),$t=s("here"),Et=s("."),Qe=d(),x=o("h2"),N=o("a"),Pe=o("span"),b(G.$$.fragment),xt=d(),Ce=o("span"),Pt=s("RetriBertConfig"),Ge=d(),v=o("div"),b(J.$$.fragment),Ct=d(),X=o("p"),Mt=s("This is the configuration class to store the configuration of a "),fe=o("a"),At=s("RetriBertModel"),qt=s(`. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture.`),Ft=d(),P=o("p"),Nt=s("Configuration objects inherit from "),he=o("a"),St=s("PretrainedConfig"),Dt=s(` and can be used to control the model outputs. Read the documentation from `),pe=o("a"),It=s("PretrainedConfig"),jt=s(" for more information."),Je=d(),C=o("h2"),S=o("a"),Me=o("span"),b(Y.$$.fragment),Lt=d(),Ae=o("span"),Ht=s("RetriBertTokenizer"),Xe=d(),p=o("div"),b(Z.$$.fragment),Ot=d(),qe=o("p"),Vt=s("Constructs a RetriBERT tokenizer."),Ut=d(),D=o("p"),Fe=o("code"),Kt=s("RetroBertTokenizer"),Wt=s(" is identical to "),ue=o("a"),Qt=s("BertTokenizer"),Gt=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Jt=d(),ee=o("p"),Xt=s("Refer to superclass "),_e=o("a"),Yt=s("BertTokenizer"),Zt=s(" for usage examples and documentation concerning parameters."),Ye=d(),M=o("h2"),I=o("a"),Ne=o("span"),b(te.$$.fragment),er=d(),Se=o("span"),tr=s("RetriBertTokenizerFast"),Ze=d(),u=o("div"),b(re.$$.fragment),rr=d(),oe=o("p"),or=s("Construct a \u201Cfast\u201D RetriBERT tokenizer (backed by HuggingFace\u2019s "),De=o("em"),nr=s("tokenizers"),ar=s(" library)."),ir=d(),j=o("p"),ge=o("a"),sr=s("RetriBertTokenizerFast"),lr=s(" is identical to "),ve=o("a"),dr=s("BertTokenizerFast"),cr=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),mr=d(),ne=o("p"),fr=s("Refer to superclass "),be=o("a"),hr=s("BertTokenizerFast"),pr=s(" for usage examples and documentation concerning parameters."),et=d(),A=o("h2"),L=o("a"),Ie=o("span"),b(ae.$$.fragment),ur=d(),je=o("span"),_r=s("RetriBertModel"),tt=d(),h=o("div"),b(ie.$$.fragment),gr=d(),Le=o("p"),vr=s("Bert Based model to embed queries or document for document retrieval."),br=d(),se=o("p"),kr=s("This model inherits from "),ke=o("a"),yr=s("PreTrainedModel"),Br=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tr=d(),le=o("p"),Rr=s("This model is also a PyTorch "),de=o("a"),wr=s("torch.nn.Module"),zr=s(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$r=d(),ye=o("div"),b(ce.$$.fragment),this.h()},l(t){const m=po('[data-svelte="svelte-1phssyn"]',document.head);w=n(m,"META",{name:!0,content:!0}),m.forEach(r),me=c(t),_=n(t,"H1",{class:!0});var ot=a(_);g=n(ot,"A",{id:!0,class:!0,href:!0});var Er=a(g);ze=n(Er,"SPAN",{});var xr=a(ze);k(V.$$.fragment,xr),xr.forEach(r),Er.forEach(r),gt=c(ot),$e=n(ot,"SPAN",{});var Pr=a($e);vt=l(Pr,"RetriBERT"),Pr.forEach(r),ot.forEach(r),Ue=c(t),E=n(t,"H2",{class:!0});var nt=a(E);q=n(nt,"A",{id:!0,class:!0,href:!0});var Cr=a(q);Ee=n(Cr,"SPAN",{});var Mr=a(Ee);k(U.$$.fragment,Mr),Mr.forEach(r),Cr.forEach(r),bt=c(nt),xe=n(nt,"SPAN",{});var Ar=a(xe);kt=l(Ar,"Overview"),Ar.forEach(r),nt.forEach(r),Ke=c(t),F=n(t,"P",{});var at=a(F);yt=l(at,"The RetriBERT model was proposed in the blog post "),K=n(at,"A",{href:!0,rel:!0});var qr=a(K);Bt=l(qr,`Explain Anything Like I\u2019m Five: A Model for Open Domain Long Form Question Answering`),qr.forEach(r),Tt=l(at,`. RetriBERT is a small model that uses either a single or pair of BERT encoders with lower-dimension projection for dense semantic indexing of text.`),at.forEach(r),We=c(t),z=n(t,"P",{});var Be=a(z);Rt=l(Be,"This model was contributed by "),W=n(Be,"A",{href:!0,rel:!0});var Fr=a(W);wt=l(Fr,"yjernite"),Fr.forEach(r),zt=l(Be,`. Code to train and use the model can be found `),Q=n(Be,"A",{href:!0,rel:!0});var Nr=a(Q);$t=l(Nr,"here"),Nr.forEach(r),Et=l(Be,"."),Be.forEach(r),Qe=c(t),x=n(t,"H2",{class:!0});var it=a(x);N=n(it,"A",{id:!0,class:!0,href:!0});var Sr=a(N);Pe=n(Sr,"SPAN",{});var Dr=a(Pe);k(G.$$.fragment,Dr),Dr.forEach(r),Sr.forEach(r),xt=c(it),Ce=n(it,"SPAN",{});var Ir=a(Ce);Pt=l(Ir,"RetriBertConfig"),Ir.forEach(r),it.forEach(r),Ge=c(t),v=n(t,"DIV",{class:!0});var Te=a(v);k(J.$$.fragment,Te),Ct=c(Te),X=n(Te,"P",{});var st=a(X);Mt=l(st,"This is the configuration class to store the configuration of a "),fe=n(st,"A",{href:!0});var jr=a(fe);At=l(jr,"RetriBertModel"),jr.forEach(r),qt=l(st,`. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture.`),st.forEach(r),Ft=c(Te),P=n(Te,"P",{});var Re=a(P);Nt=l(Re,"Configuration objects inherit from "),he=n(Re,"A",{href:!0});var Lr=a(he);St=l(Lr,"PretrainedConfig"),Lr.forEach(r),Dt=l(Re,` and can be used to control the model outputs. Read the documentation from `),pe=n(Re,"A",{href:!0});var Hr=a(pe);It=l(Hr,"PretrainedConfig"),Hr.forEach(r),jt=l(Re," for more information."),Re.forEach(r),Te.forEach(r),Je=c(t),C=n(t,"H2",{class:!0});var lt=a(C);S=n(lt,"A",{id:!0,class:!0,href:!0});var Or=a(S);Me=n(Or,"SPAN",{});var Vr=a(Me);k(Y.$$.fragment,Vr),Vr.forEach(r),Or.forEach(r),Lt=c(lt),Ae=n(lt,"SPAN",{});var Ur=a(Ae);Ht=l(Ur,"RetriBertTokenizer"),Ur.forEach(r),lt.forEach(r),Xe=c(t),p=n(t,"DIV",{class:!0});var H=a(p);k(Z.$$.fragment,H),Ot=c(H),qe=n(H,"P",{});var Kr=a(qe);Vt=l(Kr,"Constructs a RetriBERT tokenizer."),Kr.forEach(r),Ut=c(H),D=n(H,"P",{});var He=a(D);Fe=n(He,"CODE",{});var Wr=a(Fe);Kt=l(Wr,"RetroBertTokenizer"),Wr.forEach(r),Wt=l(He," is identical to "),ue=n(He,"A",{href:!0});var Qr=a(ue);Qt=l(Qr,"BertTokenizer"),Qr.forEach(r),Gt=l(He,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),He.forEach(r),Jt=c(H),ee=n(H,"P",{});var dt=a(ee);Xt=l(dt,"Refer to superclass "),_e=n(dt,"A",{href:!0});var Gr=a(_e);Yt=l(Gr,"BertTokenizer"),Gr.forEach(r),Zt=l(dt," for usage examples and documentation concerning parameters."),dt.forEach(r),H.forEach(r),Ye=c(t),M=n(t,"H2",{class:!0});var ct=a(M);I=n(ct,"A",{id:!0,class:!0,href:!0});var Jr=a(I);Ne=n(Jr,"SPAN",{});var Xr=a(Ne);k(te.$$.fragment,Xr),Xr.forEach(r),Jr.forEach(r),er=c(ct),Se=n(ct,"SPAN",{});var Yr=a(Se);tr=l(Yr,"RetriBertTokenizerFast"),Yr.forEach(r),ct.forEach(r),Ze=c(t),u=n(t,"DIV",{class:!0});var O=a(u);k(re.$$.fragment,O),rr=c(O),oe=n(O,"P",{});var mt=a(oe);or=l(mt,"Construct a \u201Cfast\u201D RetriBERT tokenizer (backed by HuggingFace\u2019s "),De=n(mt,"EM",{});var Zr=a(De);nr=l(Zr,"tokenizers"),Zr.forEach(r),ar=l(mt," library)."),mt.forEach(r),ir=c(O),j=n(O,"P",{});var Oe=a(j);ge=n(Oe,"A",{href:!0});var eo=a(ge);sr=l(eo,"RetriBertTokenizerFast"),eo.forEach(r),lr=l(Oe," is identical to "),ve=n(Oe,"A",{href:!0});var to=a(ve);dr=l(to,"BertTokenizerFast"),to.forEach(r),cr=l(Oe,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Oe.forEach(r),mr=c(O),ne=n(O,"P",{});var ft=a(ne);fr=l(ft,"Refer to superclass "),be=n(ft,"A",{href:!0});var ro=a(be);hr=l(ro,"BertTokenizerFast"),ro.forEach(r),pr=l(ft," for usage examples and documentation concerning parameters."),ft.forEach(r),O.forEach(r),et=c(t),A=n(t,"H2",{class:!0});var ht=a(A);L=n(ht,"A",{id:!0,class:!0,href:!0});var oo=a(L);Ie=n(oo,"SPAN",{});var no=a(Ie);k(ae.$$.fragment,no),no.forEach(r),oo.forEach(r),ur=c(ht),je=n(ht,"SPAN",{});var ao=a(je);_r=l(ao,"RetriBertModel"),ao.forEach(r),ht.forEach(r),tt=c(t),h=n(t,"DIV",{class:!0});var $=a(h);k(ie.$$.fragment,$),gr=c($),Le=n($,"P",{});var io=a(Le);vr=l(io,"Bert Based model to embed queries or document for document retrieval."),io.forEach(r),br=c($),se=n($,"P",{});var pt=a(se);kr=l(pt,"This model inherits from "),ke=n(pt,"A",{href:!0});var so=a(ke);yr=l(so,"PreTrainedModel"),so.forEach(r),Br=l(pt,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pt.forEach(r),Tr=c($),le=n($,"P",{});var ut=a(le);Rr=l(ut,"This model is also a PyTorch "),de=n(ut,"A",{href:!0,rel:!0});var lo=a(de);wr=l(lo,"torch.nn.Module"),lo.forEach(r),zr=l(ut,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ut.forEach(r),$r=c($),ye=n($,"DIV",{class:!0});var co=a(ye);k(ce.$$.fragment,co),co.forEach(r),$.forEach(r),this.h()},h(){i(w,"name","hf:doc:metadata"),i(w,"content",JSON.stringify(go)),i(g,"id","retribert"),i(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(g,"href","#retribert"),i(_,"class","relative group"),i(q,"id","overview"),i(q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(q,"href","#overview"),i(E,"class","relative group"),i(K,"href","https://yjernite.github.io/lfqa.html"),i(K,"rel","nofollow"),i(W,"href","https://huggingface.co/yjernite"),i(W,"rel","nofollow"),i(Q,"href","https://github.com/huggingface/transformers/tree/master/examples/research-projects/distillation"),i(Q,"rel","nofollow"),i(N,"id","transformers.RetriBertConfig"),i(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(N,"href","#transformers.RetriBertConfig"),i(x,"class","relative group"),i(fe,"href","/docs/transformers/pr_16143/en/model_doc/retribert#transformers.RetriBertModel"),i(he,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),i(pe,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),i(v,"class","docstring"),i(S,"id","transformers.RetriBertTokenizer"),i(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(S,"href","#transformers.RetriBertTokenizer"),i(C,"class","relative group"),i(ue,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),i(_e,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),i(p,"class","docstring"),i(I,"id","transformers.RetriBertTokenizerFast"),i(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(I,"href","#transformers.RetriBertTokenizerFast"),i(M,"class","relative group"),i(ge,"href","/docs/transformers/pr_16143/en/model_doc/retribert#transformers.RetriBertTokenizerFast"),i(ve,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast"),i(be,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast"),i(u,"class","docstring"),i(L,"id","transformers.RetriBertModel"),i(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(L,"href","#transformers.RetriBertModel"),i(A,"class","relative group"),i(ke,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),i(de,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(de,"rel","nofollow"),i(ye,"class","docstring"),i(h,"class","docstring")},m(t,m){e(document.head,w),f(t,me,m),f(t,_,m),e(_,g),e(g,ze),y(V,ze,null),e(_,gt),e(_,$e),e($e,vt),f(t,Ue,m),f(t,E,m),e(E,q),e(q,Ee),y(U,Ee,null),e(E,bt),e(E,xe),e(xe,kt),f(t,Ke,m),f(t,F,m),e(F,yt),e(F,K),e(K,Bt),e(F,Tt),f(t,We,m),f(t,z,m),e(z,Rt),e(z,W),e(W,wt),e(z,zt),e(z,Q),e(Q,$t),e(z,Et),f(t,Qe,m),f(t,x,m),e(x,N),e(N,Pe),y(G,Pe,null),e(x,xt),e(x,Ce),e(Ce,Pt),f(t,Ge,m),f(t,v,m),y(J,v,null),e(v,Ct),e(v,X),e(X,Mt),e(X,fe),e(fe,At),e(X,qt),e(v,Ft),e(v,P),e(P,Nt),e(P,he),e(he,St),e(P,Dt),e(P,pe),e(pe,It),e(P,jt),f(t,Je,m),f(t,C,m),e(C,S),e(S,Me),y(Y,Me,null),e(C,Lt),e(C,Ae),e(Ae,Ht),f(t,Xe,m),f(t,p,m),y(Z,p,null),e(p,Ot),e(p,qe),e(qe,Vt),e(p,Ut),e(p,D),e(D,Fe),e(Fe,Kt),e(D,Wt),e(D,ue),e(ue,Qt),e(D,Gt),e(p,Jt),e(p,ee),e(ee,Xt),e(ee,_e),e(_e,Yt),e(ee,Zt),f(t,Ye,m),f(t,M,m),e(M,I),e(I,Ne),y(te,Ne,null),e(M,er),e(M,Se),e(Se,tr),f(t,Ze,m),f(t,u,m),y(re,u,null),e(u,rr),e(u,oe),e(oe,or),e(oe,De),e(De,nr),e(oe,ar),e(u,ir),e(u,j),e(j,ge),e(ge,sr),e(j,lr),e(j,ve),e(ve,dr),e(j,cr),e(u,mr),e(u,ne),e(ne,fr),e(ne,be),e(be,hr),e(ne,pr),f(t,et,m),f(t,A,m),e(A,L),e(L,Ie),y(ae,Ie,null),e(A,ur),e(A,je),e(je,_r),f(t,tt,m),f(t,h,m),y(ie,h,null),e(h,gr),e(h,Le),e(Le,vr),e(h,br),e(h,se),e(se,kr),e(se,ke),e(ke,yr),e(se,Br),e(h,Tr),e(h,le),e(le,Rr),e(le,de),e(de,wr),e(le,zr),e(h,$r),e(h,ye),y(ce,ye,null),rt=!0},p:uo,i(t){rt||(B(V.$$.fragment,t),B(U.$$.fragment,t),B(G.$$.fragment,t),B(J.$$.fragment,t),B(Y.$$.fragment,t),B(Z.$$.fragment,t),B(te.$$.fragment,t),B(re.$$.fragment,t),B(ae.$$.fragment,t),B(ie.$$.fragment,t),B(ce.$$.fragment,t),rt=!0)},o(t){T(V.$$.fragment,t),T(U.$$.fragment,t),T(G.$$.fragment,t),T(J.$$.fragment,t),T(Y.$$.fragment,t),T(Z.$$.fragment,t),T(te.$$.fragment,t),T(re.$$.fragment,t),T(ae.$$.fragment,t),T(ie.$$.fragment,t),T(ce.$$.fragment,t),rt=!1},d(t){r(w),t&&r(me),t&&r(_),R(V),t&&r(Ue),t&&r(E),R(U),t&&r(Ke),t&&r(F),t&&r(We),t&&r(z),t&&r(Qe),t&&r(x),R(G),t&&r(Ge),t&&r(v),R(J),t&&r(Je),t&&r(C),R(Y),t&&r(Xe),t&&r(p),R(Z),t&&r(Ye),t&&r(M),R(te),t&&r(Ze),t&&r(u),R(re),t&&r(et),t&&r(A),R(ae),t&&r(tt),t&&r(h),R(ie),R(ce)}}}const go={local:"retribert",sections:[{local:"overview",title:"Overview"},{local:"transformers.RetriBertConfig",title:"RetriBertConfig"},{local:"transformers.RetriBertTokenizer",title:"RetriBertTokenizer"},{local:"transformers.RetriBertTokenizerFast",title:"RetriBertTokenizerFast"},{local:"transformers.RetriBertModel",title:"RetriBertModel"}],title:"RetriBERT"};function vo(_t,w,me){let{fw:_}=w;return _t.$$set=g=>{"fw"in g&&me(0,_=g.fw)},[_]}class Bo extends mo{constructor(w){super();fo(this,w,vo,_o,ho,{fw:0})}}export{Bo as default,go as metadata};
286
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/blenderbot.mdx-2f4c855d.js
import{S as Bf,i as zf,s as xf,e as n,k as i,w as _,t as a,M as Ff,c as r,d as t,m as l,a as s,x as g,h as d,b as c,F as e,g as p,y as b,q as k,o as v,B as y}from"../../chunks/vendor-4833417e.js";import{T as ho}from"../../chunks/Tip-fffd6df1.js";import{D as P}from"../../chunks/Docstring-4f315ed9.js";import{C as Ke}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as pe}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function $f(D){let u,x,f,T,z;return{c(){u=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){u=r(B,"P",{});var w=s(u);x=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(w,"CODE",{});var F=s(f);T=d(F,"Module"),F.forEach(t),z=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(B,w){p(B,u,w),e(u,x),e(u,f),e(f,T),e(u,z)},d(B){B&&t(u)}}}function qf(D){let u,x,f,T,z;return{c(){u=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){u=r(B,"P",{});var w=s(u);x=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(w,"CODE",{});var F=s(f);T=d(F,"Module"),F.forEach(t),z=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(B,w){p(B,u,w),e(u,x),e(u,f),e(f,T),e(u,z)},d(B){B&&t(u)}}}function Mf(D){let u,x,f,T,z,B,w,F,Ee,he,q,ve,te,Ce,oe,ne,je,Fe,G,N,ye,U,M,C,Pe,J,Q,Ne,W,Ae,Oe,A,ue,re,$e,me,H,Ie,qe,E,Se,R,Le;return{c(){u=n("p"),x=a("TF 2.0 models accepts two formats as inputs:"),f=i(),T=n("ul"),z=n("li"),B=a("having all inputs as keyword arguments (like PyTorch models), or"),w=i(),F=n("li"),Ee=a("having all inputs as a list, tuple or dict in the first positional arguments."),he=i(),q=n("p"),ve=a("This second option is useful when using "),te=n("code"),Ce=a("tf.keras.Model.fit"),oe=a(` method which currently requires having all the tensors in the first argument of the model call function: `),ne=n("code"),je=a("model(inputs)"),Fe=a("."),G=i(),N=n("p"),ye=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),U=i(),M=n("ul"),C=n("li"),Pe=a("a single Tensor with "),J=n("code"),Q=a("input_ids"),Ne=a(" only and nothing else: "),W=n("code"),Ae=a("model(input_ids)"),Oe=i(),A=n("li"),ue=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=n("code"),$e=a("model([input_ids, attention_mask])"),me=a(" or "),H=n("code"),Ie=a("model([input_ids, attention_mask, token_type_ids])"),qe=i(),E=n("li"),Se=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=n("code"),Le=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=r(m,"P",{});var $=s(u);x=d($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),f=l(m),T=r(m,"UL",{});var Te=s(T);z=r(Te,"LI",{});var ht=s(z);B=d(ht,"having all inputs as keyword arguments (like PyTorch models), or"),ht.forEach(t),w=l(Te),F=r(Te,"LI",{});var Ve=s(F);Ee=d(Ve,"having all inputs as a list, tuple or dict in the first positional arguments."),Ve.forEach(t),Te.forEach(t),he=l(m),q=r(m,"P",{});var j=s(q);ve=d(j,"This second option is useful when using "),te=r(j,"CODE",{});var fe=s(te);Ce=d(fe,"tf.keras.Model.fit"),fe.forEach(t),oe=d(j,` method which currently requires having all the tensors in the first argument of the model call function: `),ne=r(j,"CODE",{});var De=s(ne);je=d(De,"model(inputs)"),De.forEach(t),Fe=d(j,"."),j.forEach(t),G=l(m),N=r(m,"P",{});var we=s(N);ye=d(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),U=l(m),M=r(m,"UL",{});var K=s(M);C=r(K,"LI",{});var V=s(C);Pe=d(V,"a single Tensor with "),J=r(V,"CODE",{});var ut=s(J);Q=d(ut,"input_ids"),ut.forEach(t),Ne=d(V," only and nothing else: "),W=r(V,"CODE",{});var Je=s(W);Ae=d(Je,"model(input_ids)"),Je.forEach(t),V.forEach(t),Oe=l(K),A=r(K,"LI",{});var O=s(A);ue=d(O,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=r(O,"CODE",{});var Be=s(re);$e=d(Be,"model([input_ids, attention_mask])"),Be.forEach(t),me=d(O," or "),H=r(O,"CODE",{});var mt=s(H);Ie=d(mt,"model([input_ids, attention_mask, token_type_ids])"),mt.forEach(t),O.forEach(t),qe=l(K),E=r(K,"LI",{});var se=s(E);Se=d(se,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=r(se,"CODE",{});var ft=s(R);Le=d(ft,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ft.forEach(t),se.forEach(t),K.forEach(t)},m(m,$){p(m,u,$),e(u,x),p(m,f,$),p(m,T,$),e(T,z),e(z,B),e(T,w),e(T,F),e(F,Ee),p(m,he,$),p(m,q,$),e(q,ve),e(q,te),e(te,Ce),e(q,oe),e(q,ne),e(ne,je),e(q,Fe),p(m,G,$),p(m,N,$),e(N,ye),p(m,U,$),p(m,M,$),e(M,C),e(C,Pe),e(C,J),e(J,Q),e(C,Ne),e(C,W),e(W,Ae),e(M,Oe),e(M,A),e(A,ue),e(A,re),e(re,$e),e(A,me),e(A,H),e(H,Ie),e(M,qe),e(M,E),e(E,Se),e(E,R),e(R,Le)},d(m){m&&t(u),m&&t(f),m&&t(T),m&&t(he),m&&t(q),m&&t(G),m&&t(N),m&&t(U),m&&t(M)}}}function Ef(D){let u,x,f,T,z;return{c(){u=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){u=r(B,"P",{});var w=s(u);x=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(w,"CODE",{});var F=s(f);T=d(F,"Module"),F.forEach(t),z=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(B,w){p(B,u,w),e(u,x),e(u,f),e(f,T),e(u,z)},d(B){B&&t(u)}}}function Cf(D){let u,x,f,T,z,B,w,F,Ee,he,q,ve,te,Ce,oe,ne,je,Fe,G,N,ye,U,M,C,Pe,J,Q,Ne,W,Ae,Oe,A,ue,re,$e,me,H,Ie,qe,E,Se,R,Le;return{c(){u=n("p"),x=a("TF 2.0 models accepts two formats as inputs:"),f=i(),T=n("ul"),z=n("li"),B=a("having all inputs as keyword arguments (like PyTorch models), or"),w=i(),F=n("li"),Ee=a("having all inputs as a list, tuple or dict in the first positional arguments."),he=i(),q=n("p"),ve=a("This second option is useful when using "),te=n("code"),Ce=a("tf.keras.Model.fit"),oe=a(` method which currently requires having all the tensors in the first argument of the model call function: `),ne=n("code"),je=a("model(inputs)"),Fe=a("."),G=i(),N=n("p"),ye=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),U=i(),M=n("ul"),C=n("li"),Pe=a("a single Tensor with "),J=n("code"),Q=a("input_ids"),Ne=a(" only and nothing else: "),W=n("code"),Ae=a("model(input_ids)"),Oe=i(),A=n("li"),ue=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=n("code"),$e=a("model([input_ids, attention_mask])"),me=a(" or "),H=n("code"),Ie=a("model([input_ids, attention_mask, token_type_ids])"),qe=i(),E=n("li"),Se=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=n("code"),Le=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(m){u=r(m,"P",{});var $=s(u);x=d($,"TF 2.0 models accepts two formats as inputs:"),$.forEach(t),f=l(m),T=r(m,"UL",{});var Te=s(T);z=r(Te,"LI",{});var ht=s(z);B=d(ht,"having all inputs as keyword arguments (like PyTorch models), or"),ht.forEach(t),w=l(Te),F=r(Te,"LI",{});var Ve=s(F);Ee=d(Ve,"having all inputs as a list, tuple or dict in the first positional arguments."),Ve.forEach(t),Te.forEach(t),he=l(m),q=r(m,"P",{});var j=s(q);ve=d(j,"This second option is useful when using "),te=r(j,"CODE",{});var fe=s(te);Ce=d(fe,"tf.keras.Model.fit"),fe.forEach(t),oe=d(j,` method which currently requires having all the tensors in the first argument of the model call function: `),ne=r(j,"CODE",{});var De=s(ne);je=d(De,"model(inputs)"),De.forEach(t),Fe=d(j,"."),j.forEach(t),G=l(m),N=r(m,"P",{});var we=s(N);ye=d(we,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),we.forEach(t),U=l(m),M=r(m,"UL",{});var K=s(M);C=r(K,"LI",{});var V=s(C);Pe=d(V,"a single Tensor with "),J=r(V,"CODE",{});var ut=s(J);Q=d(ut,"input_ids"),ut.forEach(t),Ne=d(V," only and nothing else: "),W=r(V,"CODE",{});var Je=s(W);Ae=d(Je,"model(input_ids)"),Je.forEach(t),V.forEach(t),Oe=l(K),A=r(K,"LI",{});var O=s(A);ue=d(O,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=r(O,"CODE",{});var Be=s(re);$e=d(Be,"model([input_ids, attention_mask])"),Be.forEach(t),me=d(O," or "),H=r(O,"CODE",{});var mt=s(H);Ie=d(mt,"model([input_ids, attention_mask, token_type_ids])"),mt.forEach(t),O.forEach(t),qe=l(K),E=r(K,"LI",{});var se=s(E);Se=d(se,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),R=r(se,"CODE",{});var ft=s(R);Le=d(ft,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ft.forEach(t),se.forEach(t),K.forEach(t)},m(m,$){p(m,u,$),e(u,x),p(m,f,$),p(m,T,$),e(T,z),e(z,B),e(T,w),e(T,F),e(F,Ee),p(m,he,$),p(m,q,$),e(q,ve),e(q,te),e(te,Ce),e(q,oe),e(q,ne),e(ne,je),e(q,Fe),p(m,G,$),p(m,N,$),e(N,ye),p(m,U,$),p(m,M,$),e(M,C),e(C,Pe),e(C,J),e(J,Q),e(C,Ne),e(C,W),e(W,Ae),e(M,Oe),e(M,A),e(A,ue),e(A,re),e(re,$e),e(A,me),e(A,H),e(H,Ie),e(M,qe),e(M,E),e(E,Se),e(E,R),e(R,Le)},d(m){m&&t(u),m&&t(f),m&&t(T),m&&t(he),m&&t(q),m&&t(G),m&&t(N),m&&t(U),m&&t(M)}}}function jf(D){let u,x,f,T,z;return{c(){u=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){u=r(B,"P",{});var w=s(u);x=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(w,"CODE",{});var F=s(f);T=d(F,"Module"),F.forEach(t),z=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(B,w){p(B,u,w),e(u,x),e(u,f),e(f,T),e(u,z)},d(B){B&&t(u)}}}function Pf(D){let u,x,f,T,z;return{c(){u=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){u=r(B,"P",{});var w=s(u);x=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(w,"CODE",{});var F=s(f);T=d(F,"Module"),F.forEach(t),z=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(B,w){p(B,u,w),e(u,x),e(u,f),e(f,T),e(u,z)},d(B){B&&t(u)}}}function Nf(D){let u,x,f,T,z;return{c(){u=n("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n("code"),T=a("Module"),z=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(B){u=r(B,"P",{});var w=s(u);x=d(w,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=r(w,"CODE",{});var F=s(f);T=d(F,"Module"),F.forEach(t),z=d(w,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),w.forEach(t)},m(B,w){p(B,u,w),e(u,x),e(u,f),e(f,T),e(u,z)},d(B){B&&t(u)}}}function Af(D){let u,x,f,T,z,B,w,F,Ee,he,q,ve,te,Ce,oe,ne,je,Fe,G,N,ye,U,M,C,Pe,J,Q,Ne,W,Ae,Oe,A,ue,re,$e,me,H,Ie,qe,E,Se,R,Le,m,$,Te,ht,Ve,j,fe,De,we,K,V,ut,Je,O,Be,mt,se,ft,$d,qd,uo,Md,mo,Ed,Cd,jd,Ge,Pd,qr,Nd,Ad,Mr,Od,Id,Rn,Sd,Ld,Ta,_t,At,Er,fo,Dd,Cr,Gd,wa,Un,Wd,Ba,_o,za,gt,Ot,jr,go,Rd,Pr,Ud,xa,ae,bo,Hd,bt,Kd,Hn,Vd,Jd,ko,Qd,Xd,Yd,kt,Zd,Kn,ei,ti,Vn,oi,ni,ri,Nr,si,ai,vo,Fa,vt,It,Ar,yo,di,Or,ii,$a,de,To,li,Ir,ci,pi,St,Sr,hi,ui,Jn,mi,fi,_i,wo,gi,Qn,bi,ki,vi,Qe,Bo,yi,Lr,Ti,wi,Dr,Xn,Bi,Gr,zi,qa,yt,Lt,Wr,zo,xi,Rr,Fi,Ma,ie,xo,$i,Fo,qi,Ur,Mi,Ei,Ci,Dt,Hr,ji,Pi,Yn,Ni,Ai,Oi,$o,Ii,Zn,Si,Li,Di,Xe,qo,Gi,Kr,Wi,Ri,Vr,er,Ui,Jr,Hi,Ea,Tt,Gt,Qr,Mo,Ki,Xr,Vi,Ca,We,Ji,Yr,Qi,Xi,Zr,Yi,Zi,es,el,ja,ze,Eo,tl,Co,ol,tr,nl,rl,sl,jo,al,Po,dl,il,ll,_e,No,cl,wt,pl,or,hl,ul,ts,ml,fl,_l,Wt,gl,os,bl,kl,Ao,Pa,Bt,Rt,ns,Oo,vl,rs,yl,Na,Re,Tl,nr,wl,Bl,ss,zl,xl,as,Fl,Aa,xe,Io,$l,So,ql,rr,Ml,El,Cl,Lo,jl,Do,Pl,Nl,Al,ge,Go,Ol,zt,Il,sr,Sl,Ll,ds,Dl,Gl,Wl,Ut,Rl,is,Ul,Hl,Wo,Oa,xt,Ht,ls,Ro,Kl,cs,Vl,Ia,Ft,Uo,Jl,Ye,Ho,Ql,ps,Xl,Yl,Ko,Sa,$t,Kt,hs,Vo,Zl,us,ec,La,le,Jo,tc,Qo,oc,ar,nc,rc,sc,Xo,ac,Yo,dc,ic,lc,Vt,cc,be,Zo,pc,qt,hc,dr,uc,mc,ms,fc,_c,gc,Jt,bc,fs,kc,vc,en,Da,Mt,Qt,_s,tn,yc,gs,Tc,Ga,ce,on,wc,nn,Bc,ir,zc,xc,Fc,rn,$c,sn,qc,Mc,Ec,Xt,Cc,X,an,jc,Et,Pc,lr,Nc,Ac,bs,Oc,Ic,Sc,Yt,Lc,ks,Dc,Gc,vs,ys,Ts,ws,Wc,Rc,Bs,zs,xs,dn,Uc,Fs,Hc,Kc,Wa,Ct,Zt,$s,ln,Vc,qs,Jc,Ra,I,cn,Qc,pn,Xc,cr,Yc,Zc,ep,hn,tp,un,op,np,rp,Ms,sp,ap,Ue,Es,mn,dp,ip,Cs,fn,lp,cp,js,_n,pp,hp,Ps,gn,up,mp,ke,bn,fp,jt,_p,Ns,gp,bp,As,kp,vp,yp,eo,Tp,Os,wp,Bp,kn,zp,Ze,vn,xp,Is,Fp,$p,yn,qp,et,Tn,Mp,Ss,Ep,Cp,wn,Ua,Pt,to,Ls,Bn,jp,Ds,Pp,Ha,S,zn,Np,xn,Ap,pr,Op,Ip,Sp,Fn,Lp,$n,Dp,Gp,Wp,Gs,Rp,Up,He,Ws,qn,Hp,Kp,Rs,Mn,Vp,Jp,Us,En,Qp,Xp,Hs,Cn,Yp,Zp,L,jn,eh,Nt,th,Ks,oh,nh,Vs,rh,sh,ah,oo,dh,Js,ih,lh,Qs,Xs,Ys,Zs,ch,ph,ea,ta,oa,na,hh,uh,ra,sa,aa,da,mh,fh,ia,la,Pn,no,ro,ca,Nn,_h,pa,gh,bh,ha,kh,vh,tt,An,yh,ua,Th,wh,On,Bh,ot,In,zh,ma,xh,Fh,Sn,Ka;return B=new pe({}),U=new pe({}),we=new pe({}),fo=new pe({}),_o=new Ke({props:{code:`from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration mname = "facebook/blenderbot-400M-distill" model = BlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) UTTERANCE = "My friends are cool but they eat too many carbs." inputs = tokenizer([UTTERANCE], return_tensors="pt") reply_ids = model.generate(**inputs) print(tokenizer.batch_decode(reply_ids))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>mname = <span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForConditionalGeneration.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>UTTERANCE = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([UTTERANCE], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>reply_ids = model.generate(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.batch_decode(reply_ids)) [<span class="hljs-string">&quot;&lt;s&gt; That&#x27;s unfortunate. Are they trying to lose weight or are they just trying to be healthier?&lt;/s&gt;&quot;</span>]`}}),go=new pe({}),bo=new P({props:{name:"class transformers.BlenderbotConfig",anchor:"transformers.BlenderbotConfig",parameters:[{name:"vocab_size",val:" = 8008"},{name:"max_position_embeddings",val:" = 128"},{name:"encoder_layers",val:" = 2"},{name:"encoder_ffn_dim",val:" = 10240"},{name:"encoder_attention_heads",val:" = 32"},{name:"decoder_layers",val:" = 24"},{name:"decoder_ffn_dim",val:" = 10240"},{name:"decoder_attention_heads",val:" = 32"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 2560"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 1"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"encoder_no_repeat_ngram_size",val:" = 3"},{name:"forced_eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/configuration_blenderbot.py#L29",parametersDescription:[{anchor:"transformers.BlenderbotConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotModel">BlenderbotModel</a> or <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.TFBlenderbotModel">TFBlenderbotModel</a>.`,name:"vocab_size"},{anchor:"transformers.BlenderbotConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.BlenderbotConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.BlenderbotConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.BlenderbotConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.BlenderbotConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.BlenderbotConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.BlenderbotConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.BlenderbotConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.BlenderbotConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.BlenderbotConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.BlenderbotConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.BlenderbotConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.BlenderbotConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.BlenderbotConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.BlenderbotConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.BlenderbotConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"},{anchor:"transformers.BlenderbotConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}]}}),vo=new Ke({props:{code:`from transformers import BlenderbotModel, BlenderbotConfig # Initializing a Blenderbot facebook/blenderbot-3B style configuration configuration = BlenderbotConfig() # Initializing a model from the facebook/blenderbot-3B style configuration model = BlenderbotModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotModel, BlenderbotConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Blenderbot facebook/blenderbot-3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = BlenderbotConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/blenderbot-3B style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),yo=new pe({}),To=new P({props:{name:"class transformers.BlenderbotTokenizer",anchor:"transformers.BlenderbotTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot.py#L46"}}),Bo=new P({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BlenderbotTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot.py#L59",parametersDescription:[{anchor:"transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.BlenderbotTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Will be ignored`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),zo=new pe({}),xo=new P({props:{name:"class transformers.BlenderbotTokenizerFast",anchor:"transformers.BlenderbotTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"trim_offsets",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py#L47"}}),qo=new P({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py#L61",parametersDescription:[{anchor:"transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added`,name:"token_ids_0"},{anchor:"transformers.BlenderbotTokenizerFast.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Will be ignored`,name:"token_ids_1"}],returnDescription:` <p>list of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Mo=new pe({}),Eo=new P({props:{name:"class transformers.BlenderbotModel",anchor:"transformers.BlenderbotModel",parameters:[{name:"config",val:": BlenderbotConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1080",parametersDescription:[{anchor:"transformers.BlenderbotModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),No=new P({props:{name:"forward",anchor:"transformers.BlenderbotModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1118",parametersDescription:[{anchor:"transformers.BlenderbotModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.BlenderbotModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.BlenderbotModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BlenderbotModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BlenderbotModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BlenderbotModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BlenderbotModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BlenderbotModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Wt=new ho({props:{$$slots:{default:[$f]},$$scope:{ctx:D}}}),Ao=new Ke({props:{code:`from transformers import BlenderbotTokenizer, BlenderbotModel model = BlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill") tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state list(last_hidden_states.shape)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">6</span>, <span class="hljs-number">1280</span>]`}}),Oo=new pe({}),Io=new P({props:{name:"class transformers.BlenderbotForConditionalGeneration",anchor:"transformers.BlenderbotForConditionalGeneration",parameters:[{name:"config",val:": BlenderbotConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1216",parametersDescription:[{anchor:"transformers.BlenderbotForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Go=new P({props:{name:"forward",anchor:"transformers.BlenderbotForConditionalGeneration.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1273",parametersDescription:[{anchor:"transformers.BlenderbotForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>. inputs_embeds (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>): Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"past_key_values"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.BlenderbotForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ut=new ho({props:{$$slots:{default:[qf]},$$scope:{ctx:D}}}),Wo=new Ke({props:{code:`from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration mname = "facebook/blenderbot-400M-distill" model = BlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) UTTERANCE = "My friends are cool but they eat too many carbs." print("Human: ", UTTERANCE) inputs = tokenizer([UTTERANCE], return_tensors="pt") reply_ids = model.generate(**inputs) print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) REPLY = "I'm not sure" print("Human: ", REPLY) NEXT_UTTERANCE = ( "My friends are cool but they eat too many carbs.</s> <s>That's unfortunate. " "Are they trying to lose weight or are they just trying to be healthier?</s> " "<s> I'm not sure." ) inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt") next_reply_ids = model.generate(**inputs) print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>mname = <span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForConditionalGeneration.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(mname) <span class="hljs-meta">&gt;&gt;&gt; </span>UTTERANCE = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Human: &quot;</span>, UTTERANCE) Human: My friends are cool but they eat too many carbs. <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([UTTERANCE], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>reply_ids = model.generate(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Bot: &quot;</span>, tokenizer.batch_decode(reply_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>]) Bot: That<span class="hljs-string">&#x27;s unfortunate. Are they trying to lose weight or are they just trying to be healthier? &gt;&gt;&gt; REPLY = &quot;I&#x27;</span>m <span class="hljs-keyword">not</span> sure<span class="hljs-string">&quot; &gt;&gt;&gt; print(&quot;</span>Human: <span class="hljs-string">&quot;, REPLY) Human: I&#x27;m not sure &gt;&gt;&gt; NEXT_UTTERANCE = ( ... &quot;</span>My friends are cool but they eat too many carbs.&lt;/s&gt; &lt;s&gt;That<span class="hljs-string">&#x27;s unfortunate. &quot; ... &quot;Are they trying to lose weight or are they just trying to be healthier?&lt;/s&gt; &quot; ... &quot;&lt;s&gt; I&#x27;</span>m <span class="hljs-keyword">not</span> sure.<span class="hljs-string">&quot; ... ) &gt;&gt;&gt; inputs = tokenizer([NEXT_UTTERANCE], return_tensors=&quot;</span>pt<span class="hljs-string">&quot;) &gt;&gt;&gt; next_reply_ids = model.generate(**inputs) &gt;&gt;&gt; print(&quot;</span>Bot: <span class="hljs-string">&quot;, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) Bot: That&#x27;s too bad. Have you tried encouraging them to change their eating habits?</span>`}}),Ro=new pe({}),Uo=new P({props:{name:"class transformers.BlenderbotForCausalLM",anchor:"transformers.BlenderbotForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1409"}}),Ho=new P({props:{name:"forward",anchor:"transformers.BlenderbotForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_blenderbot.py#L1440",parametersDescription:[{anchor:"transformers.BlenderbotForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BlenderbotForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.BlenderbotForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.BlenderbotForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.BlenderbotForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.BlenderbotForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.BlenderbotForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.BlenderbotForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.BlenderbotForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ko=new Ke({props:{code:`from transformers import BlenderbotTokenizer, BlenderbotForCausalLM tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = BlenderbotForCausalLM.from_pretrained( "facebook/blenderbot-400M-distill", add_cross_attention=False ) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] list(logits.shape) == expected_shape`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, BlenderbotForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BlenderbotForCausalLM.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>, add_cross_attention=<span class="hljs-literal">False</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>expected_shape = [<span class="hljs-number">1</span>, inputs.input_ids.shape[-<span class="hljs-number">1</span>], model.config.vocab_size] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) == expected_shape <span class="hljs-literal">True</span>`}}),Vo=new pe({}),Jo=new P({props:{name:"class transformers.TFBlenderbotModel",anchor:"transformers.TFBlenderbotModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1166",parametersDescription:[{anchor:"transformers.TFBlenderbotModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Vt=new ho({props:{$$slots:{default:[Mf]},$$scope:{ctx:D}}}),Zo=new P({props:{name:"call",anchor:"transformers.TFBlenderbotModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1191",parametersDescription:[{anchor:"transformers.TFBlenderbotModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBlenderbotModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBlenderbotModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFBlenderbotModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBlenderbotModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBlenderbotModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBlenderbotModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBlenderbotModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBlenderbotModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBlenderbotModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBlenderbotModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBlenderbotModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBlenderbotModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBlenderbotModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Jt=new ho({props:{$$slots:{default:[Ef]},$$scope:{ctx:D}}}),en=new Ke({props:{code:`from transformers import BlenderbotTokenizer, TFBlenderbotModel import tensorflow as tf tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = TFBlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, TFBlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBlenderbotModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),tn=new pe({}),on=new P({props:{name:"class transformers.TFBlenderbotForConditionalGeneration",anchor:"transformers.TFBlenderbotForConditionalGeneration",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1286",parametersDescription:[{anchor:"transformers.TFBlenderbotForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xt=new ho({props:{$$slots:{default:[Cf]},$$scope:{ctx:D}}}),an=new P({props:{name:"call",anchor:"transformers.TFBlenderbotForConditionalGeneration.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_tf_blenderbot.py#L1332",parametersDescription:[{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Blenderbot uses the <code>bos_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFBlenderbotForConditionalGeneration.call.labels",description:`<strong>labels</strong> (<code>tf.tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Yt=new ho({props:{$$slots:{default:[jf]},$$scope:{ctx:D}}}),ln=new pe({}),cn=new P({props:{name:"class transformers.FlaxBlenderbotModel",anchor:"transformers.FlaxBlenderbotModel",parameters:[{name:"config",val:": BlenderbotConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1193",parametersDescription:[{anchor:"transformers.FlaxBlenderbotModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bn=new P({props:{name:"__call__",anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1128",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),eo=new ho({props:{$$slots:{default:[Pf]},$$scope:{ctx:D}}}),kn=new Ke({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotModel tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = FlaxBlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill") inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotModel.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),vn=new P({props:{name:"encode",anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L949",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),yn=new Ke({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill") tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors="jax") encoder_outputs = model.encode(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),Tn=new P({props:{name:"decode",anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1012",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),wn=new Ke({props:{code:`import jax.numpy as jnp from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill") tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors="jax") encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) last_decoder_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_decoder_hidden_states = outputs.last_hidden_state`}}),Bn=new pe({}),zn=new P({props:{name:"class transformers.FlaxBlenderbotForConditionalGeneration",anchor:"transformers.FlaxBlenderbotForConditionalGeneration",parameters:[{name:"config",val:": BlenderbotConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1280",parametersDescription:[{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig">BlenderbotConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),jn=new P({props:{name:"__call__",anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1128",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotConfig" >BlenderbotConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),oo=new ho({props:{$$slots:{default:[Nf]},$$scope:{ctx:D}}}),Nn=new pe({}),An=new P({props:{name:"encode",anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L949",parametersDescription:[{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotPreTrainedModel.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),On=new Ke({props:{code:`from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill") tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors="jax") encoder_outputs = model.encode(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),In=new P({props:{name:"decode",anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/blenderbot/modeling_flax_blenderbot.py#L1284",parametersDescription:[{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotTokenizer">BlenderbotTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxBlenderbotForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Sn=new Ke({props:{code:`import jax.numpy as jnp from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill") tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill") text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, max_length=1024, return_tensors="jax") encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBlenderbotForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = BlenderbotTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/blenderbot-400M-distill&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, max_length=<span class="hljs-number">1024</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){u=n("meta"),x=i(),f=n("h1"),T=n("a"),z=n("span"),_(B.$$.fragment),w=i(),F=n("span"),Ee=a("Blenderbot"),he=i(),q=n("p"),ve=n("strong"),te=a("DISCLAIMER:"),Ce=a(" If you see something strange, file a "),oe=n("a"),ne=a("Github Issue"),je=a(" ."),Fe=i(),G=n("h2"),N=n("a"),ye=n("span"),_(U.$$.fragment),M=i(),C=n("span"),Pe=a("Overview"),J=i(),Q=n("p"),Ne=a("The Blender chatbot model was proposed in "),W=n("a"),Ae=a("Recipes for building an open-domain chatbot"),Oe=a(` Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.`),A=i(),ue=n("p"),re=a("The abstract of the paper is the following:"),$e=i(),me=n("p"),H=n("em"),Ie=a(`Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.`),qe=i(),E=n("p"),Se=a("This model was contributed by "),R=n("a"),Le=a("sshleifer"),m=a(". The authors\u2019 code can be found "),$=n("a"),Te=a("here"),ht=a(" ."),Ve=i(),j=n("h2"),fe=n("a"),De=n("span"),_(we.$$.fragment),K=i(),V=n("span"),ut=a("Implementation Notes"),Je=i(),O=n("ul"),Be=n("li"),mt=a("Blenderbot uses a standard "),se=n("a"),ft=a("seq2seq model transformer"),$d=a(" based architecture."),qd=i(),uo=n("li"),Md=a("Available checkpoints can be found in the "),mo=n("a"),Ed=a("model hub"),Cd=a("."),jd=i(),Ge=n("li"),Pd=a("This is the "),qr=n("em"),Nd=a("default"),Ad=a(` Blenderbot model class. However, some smaller checkpoints, such as `),Mr=n("code"),Od=a("facebook/blenderbot_small_90M"),Id=a(`, have a different architecture and consequently should be used with `),Rn=n("a"),Sd=a("BlenderbotSmall"),Ld=a("."),Ta=i(),_t=n("h2"),At=n("a"),Er=n("span"),_(fo.$$.fragment),Dd=i(),Cr=n("span"),Gd=a("Usage"),wa=i(),Un=n("p"),Wd=a("Here is an example of model usage:"),Ba=i(),_(_o.$$.fragment),za=i(),gt=n("h2"),Ot=n("a"),jr=n("span"),_(go.$$.fragment),Rd=i(),Pr=n("span"),Ud=a("BlenderbotConfig"),xa=i(),ae=n("div"),_(bo.$$.fragment),Hd=i(),bt=n("p"),Kd=a("This is the configuration class to store the configuration of a "),Hn=n("a"),Vd=a("BlenderbotModel"),Jd=a(`. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot `),ko=n("a"),Qd=a("facebook/blenderbot-3B"),Xd=a(" architecture."),Yd=i(),kt=n("p"),Zd=a("Configuration objects inherit from "),Kn=n("a"),ei=a("PretrainedConfig"),ti=a(` and can be used to control the model outputs. Read the documentation from `),Vn=n("a"),oi=a("PretrainedConfig"),ni=a(" for more information."),ri=i(),Nr=n("p"),si=a("Example:"),ai=i(),_(vo.$$.fragment),Fa=i(),vt=n("h2"),It=n("a"),Ar=n("span"),_(yo.$$.fragment),di=i(),Or=n("span"),ii=a("BlenderbotTokenizer"),$a=i(),de=n("div"),_(To.$$.fragment),li=i(),Ir=n("p"),ci=a("Construct a Blenderbot tokenizer."),pi=i(),St=n("p"),Sr=n("code"),hi=a("Blenderbot"),ui=a(" is nearly identical to "),Jn=n("a"),mi=a("RobertaTokenizer"),fi=a(` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),_i=i(),wo=n("p"),gi=a("Refer to superclass "),Qn=n("a"),bi=a("RobertaTokenizer"),ki=a(" for usage examples and documentation concerning parameters."),vi=i(),Qe=n("div"),_(Bo.$$.fragment),yi=i(),Lr=n("p"),Ti=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),wi=i(),Dr=n("ul"),Xn=n("li"),Bi=a("single sequence: "),Gr=n("code"),zi=a("X </s>"),qa=i(),yt=n("h2"),Lt=n("a"),Wr=n("span"),_(zo.$$.fragment),xi=i(),Rr=n("span"),Fi=a("BlenderbotTokenizerFast"),Ma=i(),ie=n("div"),_(xo.$$.fragment),$i=i(),Fo=n("p"),qi=a("Construct a \u201Cfast\u201D Blenderbot tokenizer (backed by HuggingFace\u2019s "),Ur=n("em"),Mi=a("tokenizers"),Ei=a(" library)."),Ci=i(),Dt=n("p"),Hr=n("code"),ji=a("BlenderbotFast"),Pi=a(" is nearly identical to "),Yn=n("a"),Ni=a("RobertaTokenizerFast"),Ai=a(` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),Oi=i(),$o=n("p"),Ii=a("Refer to superclass "),Zn=n("a"),Si=a("RobertaTokenizerFast"),Li=a(" for usage examples and documentation concerning parameters."),Di=i(),Xe=n("div"),_(qo.$$.fragment),Gi=i(),Kr=n("p"),Wi=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),Ri=i(),Vr=n("ul"),er=n("li"),Ui=a("single sequence: "),Jr=n("code"),Hi=a("X </s>"),Ea=i(),Tt=n("h2"),Gt=n("a"),Qr=n("span"),_(Mo.$$.fragment),Ki=i(),Xr=n("span"),Vi=a("BlenderbotModel"),Ca=i(),We=n("p"),Ji=a("See "),Yr=n("code"),Qi=a("transformers.BartModel"),Xi=a(" for arguments to "),Zr=n("em"),Yi=a("forward"),Zi=a(" and "),es=n("em"),el=a("generate"),ja=i(),ze=n("div"),_(Eo.$$.fragment),tl=i(),Co=n("p"),ol=a(`The bare Blenderbot Model outputting raw hidden-states without any specific head on top. This model inherits from `),tr=n("a"),nl=a("PreTrainedModel"),rl=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sl=i(),jo=n("p"),al=a("This model is also a PyTorch "),Po=n("a"),dl=a("torch.nn.Module"),il=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ll=i(),_e=n("div"),_(No.$$.fragment),cl=i(),wt=n("p"),pl=a("The "),or=n("a"),hl=a("BlenderbotModel"),ul=a(" forward method, overrides the "),ts=n("code"),ml=a("__call__"),fl=a(" special method."),_l=i(),_(Wt.$$.fragment),gl=i(),os=n("p"),bl=a("Example:"),kl=i(),_(Ao.$$.fragment),Pa=i(),Bt=n("h2"),Rt=n("a"),ns=n("span"),_(Oo.$$.fragment),vl=i(),rs=n("span"),yl=a("BlenderbotForConditionalGeneration"),Na=i(),Re=n("p"),Tl=a("See "),nr=n("a"),wl=a("BartForConditionalGeneration"),Bl=a(" for arguments to "),ss=n("em"),zl=a("forward"),xl=a(" and "),as=n("em"),Fl=a("generate"),Aa=i(),xe=n("div"),_(Io.$$.fragment),$l=i(),So=n("p"),ql=a(`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),rr=n("a"),Ml=a("PreTrainedModel"),El=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Cl=i(),Lo=n("p"),jl=a("This model is also a PyTorch "),Do=n("a"),Pl=a("torch.nn.Module"),Nl=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Al=i(),ge=n("div"),_(Go.$$.fragment),Ol=i(),zt=n("p"),Il=a("The "),sr=n("a"),Sl=a("BlenderbotForConditionalGeneration"),Ll=a(" forward method, overrides the "),ds=n("code"),Dl=a("__call__"),Gl=a(" special method."),Wl=i(),_(Ut.$$.fragment),Rl=i(),is=n("p"),Ul=a("Conversation example:"),Hl=i(),_(Wo.$$.fragment),Oa=i(),xt=n("h2"),Ht=n("a"),ls=n("span"),_(Ro.$$.fragment),Kl=i(),cs=n("span"),Vl=a("BlenderbotForCausalLM"),Ia=i(),Ft=n("div"),_(Uo.$$.fragment),Jl=i(),Ye=n("div"),_(Ho.$$.fragment),Ql=i(),ps=n("p"),Xl=a("Example:"),Yl=i(),_(Ko.$$.fragment),Sa=i(),$t=n("h2"),Kt=n("a"),hs=n("span"),_(Vo.$$.fragment),Zl=i(),us=n("span"),ec=a("TFBlenderbotModel"),La=i(),le=n("div"),_(Jo.$$.fragment),tc=i(),Qo=n("p"),oc=a(`The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top. This model inherits from `),ar=n("a"),nc=a("TFPreTrainedModel"),rc=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sc=i(),Xo=n("p"),ac=a("This model is also a "),Yo=n("a"),dc=a("tf.keras.Model"),ic=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),lc=i(),_(Vt.$$.fragment),cc=i(),be=n("div"),_(Zo.$$.fragment),pc=i(),qt=n("p"),hc=a("The "),dr=n("a"),uc=a("TFBlenderbotModel"),mc=a(" forward method, overrides the "),ms=n("code"),fc=a("__call__"),_c=a(" special method."),gc=i(),_(Jt.$$.fragment),bc=i(),fs=n("p"),kc=a("Example:"),vc=i(),_(en.$$.fragment),Da=i(),Mt=n("h2"),Qt=n("a"),_s=n("span"),_(tn.$$.fragment),yc=i(),gs=n("span"),Tc=a("TFBlenderbotForConditionalGeneration"),Ga=i(),ce=n("div"),_(on.$$.fragment),wc=i(),nn=n("p"),Bc=a(`The BLENDERBOT Model with a language modeling head. Can be used for summarization. This model inherits from `),ir=n("a"),zc=a("TFPreTrainedModel"),xc=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fc=i(),rn=n("p"),$c=a("This model is also a "),sn=n("a"),qc=a("tf.keras.Model"),Mc=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ec=i(),_(Xt.$$.fragment),Cc=i(),X=n("div"),_(an.$$.fragment),jc=i(),Et=n("p"),Pc=a("The "),lr=n("a"),Nc=a("TFBlenderbotForConditionalGeneration"),Ac=a(" forward method, overrides the "),bs=n("code"),Oc=a("__call__"),Ic=a(" special method."),Sc=i(),_(Yt.$$.fragment),Lc=i(),ks=n("p"),Dc=a("Conversation example::"),Gc=i(),vs=n("blockquote"),ys=n("blockquote"),Ts=n("blockquote"),ws=n("p"),Wc=a(`from transformers import BlenderbotTokenizer, TFBlenderbotForConditionalGeneration >>> mname = \u2018facebook/blenderbot-400M-distill\u2019 >>> model = TFBlenderbotForConditionalGeneration.from_pretrained(mname) >>> tokenizer = BlenderbotTokenizer.from_pretrained(mname) >>> UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D >>> print(\u201CHuman: \u201D, UTTERANCE) >>> inputs = tokenizer([UTTERANCE], return_tensors=\u2018tf\u2019) >>> reply_ids = model.generate(**inputs) >>> print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])`),Rc=i(),Bs=n("blockquote"),zs=n("blockquote"),xs=n("blockquote"),dn=n("p"),Uc=a(`REPLY = \u201CI\u2019m not sure\u201D >>> print(\u201CHuman: \u201D, REPLY) >>> NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> `),Fs=n("s"),Hc=a(`That\u2019s unfortunate. \u201D \u2026 \u201CAre they trying to lose weight or are they just trying to be healthier?`),Kc=a(` \u201D \u2026 \u201D<s> I\u2019m not sure.\u201D \u2026 ) >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018tf\u2019) next_reply_ids = model.generate(**inputs) >>> print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),Wa=i(),Ct=n("h2"),Zt=n("a"),$s=n("span"),_(ln.$$.fragment),Vc=i(),qs=n("span"),Jc=a("FlaxBlenderbotModel"),Ra=i(),I=n("div"),_(cn.$$.fragment),Qc=i(),pn=n("p"),Xc=a(`The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),cr=n("a"),Yc=a("FlaxPreTrainedModel"),Zc=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ep=i(),hn=n("p"),tp=a(`This model is also a Flax Linen `),un=n("a"),op=a("flax.nn.Module"),np=a(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),rp=i(),Ms=n("p"),sp=a("Finally, this model supports inherent JAX features such as:"),ap=i(),Ue=n("ul"),Es=n("li"),mn=n("a"),dp=a("Just-In-Time (JIT) compilation"),ip=i(),Cs=n("li"),fn=n("a"),lp=a("Automatic Differentiation"),cp=i(),js=n("li"),_n=n("a"),pp=a("Vectorization"),hp=i(),Ps=n("li"),gn=n("a"),up=a("Parallelization"),mp=i(),ke=n("div"),_(bn.$$.fragment),fp=i(),jt=n("p"),_p=a("The "),Ns=n("code"),gp=a("FlaxBlenderbotPreTrainedModel"),bp=a("forward method, overrides the "),As=n("code"),kp=a("__call__"),vp=a(" special method."),yp=i(),_(eo.$$.fragment),Tp=i(),Os=n("p"),wp=a("Example:"),Bp=i(),_(kn.$$.fragment),zp=i(),Ze=n("div"),_(vn.$$.fragment),xp=i(),Is=n("p"),Fp=a("Example:"),$p=i(),_(yn.$$.fragment),qp=i(),et=n("div"),_(Tn.$$.fragment),Mp=i(),Ss=n("p"),Ep=a("Example:"),Cp=i(),_(wn.$$.fragment),Ua=i(),Pt=n("h2"),to=n("a"),Ls=n("span"),_(Bn.$$.fragment),jp=i(),Ds=n("span"),Pp=a("FlaxBlenderbotForConditionalGeneration"),Ha=i(),S=n("div"),_(zn.$$.fragment),Np=i(),xn=n("p"),Ap=a(`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),pr=n("a"),Op=a("FlaxPreTrainedModel"),Ip=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sp=i(),Fn=n("p"),Lp=a(`This model is also a Flax Linen `),$n=n("a"),Dp=a("flax.nn.Module"),Gp=a(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Wp=i(),Gs=n("p"),Rp=a("Finally, this model supports inherent JAX features such as:"),Up=i(),He=n("ul"),Ws=n("li"),qn=n("a"),Hp=a("Just-In-Time (JIT) compilation"),Kp=i(),Rs=n("li"),Mn=n("a"),Vp=a("Automatic Differentiation"),Jp=i(),Us=n("li"),En=n("a"),Qp=a("Vectorization"),Xp=i(),Hs=n("li"),Cn=n("a"),Yp=a("Parallelization"),Zp=i(),L=n("div"),_(jn.$$.fragment),eh=i(),Nt=n("p"),th=a("The "),Ks=n("code"),oh=a("FlaxBlenderbotPreTrainedModel"),nh=a("forward method, overrides the "),Vs=n("code"),rh=a("__call__"),sh=a(" special method."),ah=i(),_(oo.$$.fragment),dh=i(),Js=n("p"),ih=a("Conversation example::"),lh=i(),Qs=n("blockquote"),Xs=n("blockquote"),Ys=n("blockquote"),Zs=n("p"),ch=a("from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration, BlenderbotConfig"),ph=i(),ea=n("blockquote"),ta=n("blockquote"),oa=n("blockquote"),na=n("p"),hh=a(`model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019) >>> tokenizer = BlenderbotTokenizer.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019)`),uh=i(),ra=n("blockquote"),sa=n("blockquote"),aa=n("blockquote"),da=n("p"),mh=a(`UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D >>> inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors=\u2018np\u2019)`),fh=i(),ia=n("blockquote"),la=n("blockquote"),Pn=n("blockquote"),no=n("h1"),ro=n("a"),ca=n("span"),_(Nn.$$.fragment),_h=i(),pa=n("span"),gh=a("Generate Reply >>> reply_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5,"),bh=i(),ha=n("p"),kh=a(`early_stopping=True).sequences >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])`),vh=i(),tt=n("div"),_(An.$$.fragment),yh=i(),ua=n("p"),Th=a("Example:"),wh=i(),_(On.$$.fragment),Bh=i(),ot=n("div"),_(In.$$.fragment),zh=i(),ma=n("p"),xh=a("Example:"),Fh=i(),_(Sn.$$.fragment),this.h()},l(o){const h=Ff('[data-svelte="svelte-1phssyn"]',document.head);u=r(h,"META",{name:!0,content:!0}),h.forEach(t),x=l(o),f=r(o,"H1",{class:!0});var Ln=s(f);T=r(Ln,"A",{id:!0,class:!0,href:!0});var fa=s(T);z=r(fa,"SPAN",{});var _a=s(z);g(B.$$.fragment,_a),_a.forEach(t),fa.forEach(t),w=l(Ln),F=r(Ln,"SPAN",{});var ga=s(F);Ee=d(ga,"Blenderbot"),ga.forEach(t),Ln.forEach(t),he=l(o),q=r(o,"P",{});var so=s(q);ve=r(so,"STRONG",{});var ba=s(ve);te=d(ba,"DISCLAIMER:"),ba.forEach(t),Ce=d(so," If you see something strange, file a "),oe=r(so,"A",{href:!0,rel:!0});var ka=s(oe);ne=d(ka,"Github Issue"),ka.forEach(t),je=d(so," ."),so.forEach(t),Fe=l(o),G=r(o,"H2",{class:!0});var Dn=s(G);N=r(Dn,"A",{id:!0,class:!0,href:!0});var Mh=s(N);ye=r(Mh,"SPAN",{});var Eh=s(ye);g(U.$$.fragment,Eh),Eh.forEach(t),Mh.forEach(t),M=l(Dn),C=r(Dn,"SPAN",{});var Ch=s(C);Pe=d(Ch,"Overview"),Ch.forEach(t),Dn.forEach(t),J=l(o),Q=r(o,"P",{});var Va=s(Q);Ne=d(Va,"The Blender chatbot model was proposed in "),W=r(Va,"A",{href:!0,rel:!0});var jh=s(W);Ae=d(jh,"Recipes for building an open-domain chatbot"),jh.forEach(t),Oe=d(Va,` Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.`),Va.forEach(t),A=l(o),ue=r(o,"P",{});var Ph=s(ue);re=d(Ph,"The abstract of the paper is the following:"),Ph.forEach(t),$e=l(o),me=r(o,"P",{});var Nh=s(me);H=r(Nh,"EM",{});var Ah=s(H);Ie=d(Ah,`Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.`),Ah.forEach(t),Nh.forEach(t),qe=l(o),E=r(o,"P",{});var hr=s(E);Se=d(hr,"This model was contributed by "),R=r(hr,"A",{href:!0,rel:!0});var Oh=s(R);Le=d(Oh,"sshleifer"),Oh.forEach(t),m=d(hr,". The authors\u2019 code can be found "),$=r(hr,"A",{href:!0,rel:!0});var Ih=s($);Te=d(Ih,"here"),Ih.forEach(t),ht=d(hr," ."),hr.forEach(t),Ve=l(o),j=r(o,"H2",{class:!0});var Ja=s(j);fe=r(Ja,"A",{id:!0,class:!0,href:!0});var Sh=s(fe);De=r(Sh,"SPAN",{});var Lh=s(De);g(we.$$.fragment,Lh),Lh.forEach(t),Sh.forEach(t),K=l(Ja),V=r(Ja,"SPAN",{});var Dh=s(V);ut=d(Dh,"Implementation Notes"),Dh.forEach(t),Ja.forEach(t),Je=l(o),O=r(o,"UL",{});var ur=s(O);Be=r(ur,"LI",{});var Qa=s(Be);mt=d(Qa,"Blenderbot uses a standard "),se=r(Qa,"A",{href:!0,rel:!0});var Gh=s(se);ft=d(Gh,"seq2seq model transformer"),Gh.forEach(t),$d=d(Qa," based architecture."),Qa.forEach(t),qd=l(ur),uo=r(ur,"LI",{});var Xa=s(uo);Md=d(Xa,"Available checkpoints can be found in the "),mo=r(Xa,"A",{href:!0,rel:!0});var Wh=s(mo);Ed=d(Wh,"model hub"),Wh.forEach(t),Cd=d(Xa,"."),Xa.forEach(t),jd=l(ur),Ge=r(ur,"LI",{});var ao=s(Ge);Pd=d(ao,"This is the "),qr=r(ao,"EM",{});var Rh=s(qr);Nd=d(Rh,"default"),Rh.forEach(t),Ad=d(ao,` Blenderbot model class. However, some smaller checkpoints, such as `),Mr=r(ao,"CODE",{});var Uh=s(Mr);Od=d(Uh,"facebook/blenderbot_small_90M"),Uh.forEach(t),Id=d(ao,`, have a different architecture and consequently should be used with `),Rn=r(ao,"A",{href:!0});var Hh=s(Rn);Sd=d(Hh,"BlenderbotSmall"),Hh.forEach(t),Ld=d(ao,"."),ao.forEach(t),ur.forEach(t),Ta=l(o),_t=r(o,"H2",{class:!0});var Ya=s(_t);At=r(Ya,"A",{id:!0,class:!0,href:!0});var Kh=s(At);Er=r(Kh,"SPAN",{});var Vh=s(Er);g(fo.$$.fragment,Vh),Vh.forEach(t),Kh.forEach(t),Dd=l(Ya),Cr=r(Ya,"SPAN",{});var Jh=s(Cr);Gd=d(Jh,"Usage"),Jh.forEach(t),Ya.forEach(t),wa=l(o),Un=r(o,"P",{});var Qh=s(Un);Wd=d(Qh,"Here is an example of model usage:"),Qh.forEach(t),Ba=l(o),g(_o.$$.fragment,o),za=l(o),gt=r(o,"H2",{class:!0});var Za=s(gt);Ot=r(Za,"A",{id:!0,class:!0,href:!0});var Xh=s(Ot);jr=r(Xh,"SPAN",{});var Yh=s(jr);g(go.$$.fragment,Yh),Yh.forEach(t),Xh.forEach(t),Rd=l(Za),Pr=r(Za,"SPAN",{});var Zh=s(Pr);Ud=d(Zh,"BlenderbotConfig"),Zh.forEach(t),Za.forEach(t),xa=l(o),ae=r(o,"DIV",{class:!0});var nt=s(ae);g(bo.$$.fragment,nt),Hd=l(nt),bt=r(nt,"P",{});var mr=s(bt);Kd=d(mr,"This is the configuration class to store the configuration of a "),Hn=r(mr,"A",{href:!0});var eu=s(Hn);Vd=d(eu,"BlenderbotModel"),eu.forEach(t),Jd=d(mr,`. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot `),ko=r(mr,"A",{href:!0,rel:!0});var tu=s(ko);Qd=d(tu,"facebook/blenderbot-3B"),tu.forEach(t),Xd=d(mr," architecture."),mr.forEach(t),Yd=l(nt),kt=r(nt,"P",{});var fr=s(kt);Zd=d(fr,"Configuration objects inherit from "),Kn=r(fr,"A",{href:!0});var ou=s(Kn);ei=d(ou,"PretrainedConfig"),ou.forEach(t),ti=d(fr,` and can be used to control the model outputs. Read the documentation from `),Vn=r(fr,"A",{href:!0});var nu=s(Vn);oi=d(nu,"PretrainedConfig"),nu.forEach(t),ni=d(fr," for more information."),fr.forEach(t),ri=l(nt),Nr=r(nt,"P",{});var ru=s(Nr);si=d(ru,"Example:"),ru.forEach(t),ai=l(nt),g(vo.$$.fragment,nt),nt.forEach(t),Fa=l(o),vt=r(o,"H2",{class:!0});var ed=s(vt);It=r(ed,"A",{id:!0,class:!0,href:!0});var su=s(It);Ar=r(su,"SPAN",{});var au=s(Ar);g(yo.$$.fragment,au),au.forEach(t),su.forEach(t),di=l(ed),Or=r(ed,"SPAN",{});var du=s(Or);ii=d(du,"BlenderbotTokenizer"),du.forEach(t),ed.forEach(t),$a=l(o),de=r(o,"DIV",{class:!0});var rt=s(de);g(To.$$.fragment,rt),li=l(rt),Ir=r(rt,"P",{});var iu=s(Ir);ci=d(iu,"Construct a Blenderbot tokenizer."),iu.forEach(t),pi=l(rt),St=r(rt,"P",{});var va=s(St);Sr=r(va,"CODE",{});var lu=s(Sr);hi=d(lu,"Blenderbot"),lu.forEach(t),ui=d(va," is nearly identical to "),Jn=r(va,"A",{href:!0});var cu=s(Jn);mi=d(cu,"RobertaTokenizer"),cu.forEach(t),fi=d(va,` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),va.forEach(t),_i=l(rt),wo=r(rt,"P",{});var td=s(wo);gi=d(td,"Refer to superclass "),Qn=r(td,"A",{href:!0});var pu=s(Qn);bi=d(pu,"RobertaTokenizer"),pu.forEach(t),ki=d(td," for usage examples and documentation concerning parameters."),td.forEach(t),vi=l(rt),Qe=r(rt,"DIV",{class:!0});var _r=s(Qe);g(Bo.$$.fragment,_r),yi=l(_r),Lr=r(_r,"P",{});var hu=s(Lr);Ti=d(hu,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),hu.forEach(t),wi=l(_r),Dr=r(_r,"UL",{});var uu=s(Dr);Xn=r(uu,"LI",{});var $h=s(Xn);Bi=d($h,"single sequence: "),Gr=r($h,"CODE",{});var mu=s(Gr);zi=d(mu,"X </s>"),mu.forEach(t),$h.forEach(t),uu.forEach(t),_r.forEach(t),rt.forEach(t),qa=l(o),yt=r(o,"H2",{class:!0});var od=s(yt);Lt=r(od,"A",{id:!0,class:!0,href:!0});var fu=s(Lt);Wr=r(fu,"SPAN",{});var _u=s(Wr);g(zo.$$.fragment,_u),_u.forEach(t),fu.forEach(t),xi=l(od),Rr=r(od,"SPAN",{});var gu=s(Rr);Fi=d(gu,"BlenderbotTokenizerFast"),gu.forEach(t),od.forEach(t),Ma=l(o),ie=r(o,"DIV",{class:!0});var st=s(ie);g(xo.$$.fragment,st),$i=l(st),Fo=r(st,"P",{});var nd=s(Fo);qi=d(nd,"Construct a \u201Cfast\u201D Blenderbot tokenizer (backed by HuggingFace\u2019s "),Ur=r(nd,"EM",{});var bu=s(Ur);Mi=d(bu,"tokenizers"),bu.forEach(t),Ei=d(nd," library)."),nd.forEach(t),Ci=l(st),Dt=r(st,"P",{});var ya=s(Dt);Hr=r(ya,"CODE",{});var ku=s(Hr);ji=d(ku,"BlenderbotFast"),ku.forEach(t),Pi=d(ya," is nearly identical to "),Yn=r(ya,"A",{href:!0});var vu=s(Yn);Ni=d(vu,"RobertaTokenizerFast"),vu.forEach(t),Ai=d(ya,` and runs end-to-end tokenization: punctuation splitting and wordpiece. The only difference is that it doesn\u2019t add BOS token to the beginning of sequences.`),ya.forEach(t),Oi=l(st),$o=r(st,"P",{});var rd=s($o);Ii=d(rd,"Refer to superclass "),Zn=r(rd,"A",{href:!0});var yu=s(Zn);Si=d(yu,"RobertaTokenizerFast"),yu.forEach(t),Li=d(rd," for usage examples and documentation concerning parameters."),rd.forEach(t),Di=l(st),Xe=r(st,"DIV",{class:!0});var gr=s(Xe);g(qo.$$.fragment,gr),Gi=l(gr),Kr=r(gr,"P",{});var Tu=s(Kr);Wi=d(Tu,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format:`),Tu.forEach(t),Ri=l(gr),Vr=r(gr,"UL",{});var wu=s(Vr);er=r(wu,"LI",{});var qh=s(er);Ui=d(qh,"single sequence: "),Jr=r(qh,"CODE",{});var Bu=s(Jr);Hi=d(Bu,"X </s>"),Bu.forEach(t),qh.forEach(t),wu.forEach(t),gr.forEach(t),st.forEach(t),Ea=l(o),Tt=r(o,"H2",{class:!0});var sd=s(Tt);Gt=r(sd,"A",{id:!0,class:!0,href:!0});var zu=s(Gt);Qr=r(zu,"SPAN",{});var xu=s(Qr);g(Mo.$$.fragment,xu),xu.forEach(t),zu.forEach(t),Ki=l(sd),Xr=r(sd,"SPAN",{});var Fu=s(Xr);Vi=d(Fu,"BlenderbotModel"),Fu.forEach(t),sd.forEach(t),Ca=l(o),We=r(o,"P",{});var Gn=s(We);Ji=d(Gn,"See "),Yr=r(Gn,"CODE",{});var $u=s(Yr);Qi=d($u,"transformers.BartModel"),$u.forEach(t),Xi=d(Gn," for arguments to "),Zr=r(Gn,"EM",{});var qu=s(Zr);Yi=d(qu,"forward"),qu.forEach(t),Zi=d(Gn," and "),es=r(Gn,"EM",{});var Mu=s(es);el=d(Mu,"generate"),Mu.forEach(t),Gn.forEach(t),ja=l(o),ze=r(o,"DIV",{class:!0});var io=s(ze);g(Eo.$$.fragment,io),tl=l(io),Co=r(io,"P",{});var ad=s(Co);ol=d(ad,`The bare Blenderbot Model outputting raw hidden-states without any specific head on top. This model inherits from `),tr=r(ad,"A",{href:!0});var Eu=s(tr);nl=d(Eu,"PreTrainedModel"),Eu.forEach(t),rl=d(ad,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ad.forEach(t),sl=l(io),jo=r(io,"P",{});var dd=s(jo);al=d(dd,"This model is also a PyTorch "),Po=r(dd,"A",{href:!0,rel:!0});var Cu=s(Po);dl=d(Cu,"torch.nn.Module"),Cu.forEach(t),il=d(dd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dd.forEach(t),ll=l(io),_e=r(io,"DIV",{class:!0});var at=s(_e);g(No.$$.fragment,at),cl=l(at),wt=r(at,"P",{});var br=s(wt);pl=d(br,"The "),or=r(br,"A",{href:!0});var ju=s(or);hl=d(ju,"BlenderbotModel"),ju.forEach(t),ul=d(br," forward method, overrides the "),ts=r(br,"CODE",{});var Pu=s(ts);ml=d(Pu,"__call__"),Pu.forEach(t),fl=d(br," special method."),br.forEach(t),_l=l(at),g(Wt.$$.fragment,at),gl=l(at),os=r(at,"P",{});var Nu=s(os);bl=d(Nu,"Example:"),Nu.forEach(t),kl=l(at),g(Ao.$$.fragment,at),at.forEach(t),io.forEach(t),Pa=l(o),Bt=r(o,"H2",{class:!0});var id=s(Bt);Rt=r(id,"A",{id:!0,class:!0,href:!0});var Au=s(Rt);ns=r(Au,"SPAN",{});var Ou=s(ns);g(Oo.$$.fragment,Ou),Ou.forEach(t),Au.forEach(t),vl=l(id),rs=r(id,"SPAN",{});var Iu=s(rs);yl=d(Iu,"BlenderbotForConditionalGeneration"),Iu.forEach(t),id.forEach(t),Na=l(o),Re=r(o,"P",{});var Wn=s(Re);Tl=d(Wn,"See "),nr=r(Wn,"A",{href:!0});var Su=s(nr);wl=d(Su,"BartForConditionalGeneration"),Su.forEach(t),Bl=d(Wn," for arguments to "),ss=r(Wn,"EM",{});var Lu=s(ss);zl=d(Lu,"forward"),Lu.forEach(t),xl=d(Wn," and "),as=r(Wn,"EM",{});var Du=s(as);Fl=d(Du,"generate"),Du.forEach(t),Wn.forEach(t),Aa=l(o),xe=r(o,"DIV",{class:!0});var lo=s(xe);g(Io.$$.fragment,lo),$l=l(lo),So=r(lo,"P",{});var ld=s(So);ql=d(ld,`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),rr=r(ld,"A",{href:!0});var Gu=s(rr);Ml=d(Gu,"PreTrainedModel"),Gu.forEach(t),El=d(ld,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ld.forEach(t),Cl=l(lo),Lo=r(lo,"P",{});var cd=s(Lo);jl=d(cd,"This model is also a PyTorch "),Do=r(cd,"A",{href:!0,rel:!0});var Wu=s(Do);Pl=d(Wu,"torch.nn.Module"),Wu.forEach(t),Nl=d(cd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cd.forEach(t),Al=l(lo),ge=r(lo,"DIV",{class:!0});var dt=s(ge);g(Go.$$.fragment,dt),Ol=l(dt),zt=r(dt,"P",{});var kr=s(zt);Il=d(kr,"The "),sr=r(kr,"A",{href:!0});var Ru=s(sr);Sl=d(Ru,"BlenderbotForConditionalGeneration"),Ru.forEach(t),Ll=d(kr," forward method, overrides the "),ds=r(kr,"CODE",{});var Uu=s(ds);Dl=d(Uu,"__call__"),Uu.forEach(t),Gl=d(kr," special method."),kr.forEach(t),Wl=l(dt),g(Ut.$$.fragment,dt),Rl=l(dt),is=r(dt,"P",{});var Hu=s(is);Ul=d(Hu,"Conversation example:"),Hu.forEach(t),Hl=l(dt),g(Wo.$$.fragment,dt),dt.forEach(t),lo.forEach(t),Oa=l(o),xt=r(o,"H2",{class:!0});var pd=s(xt);Ht=r(pd,"A",{id:!0,class:!0,href:!0});var Ku=s(Ht);ls=r(Ku,"SPAN",{});var Vu=s(ls);g(Ro.$$.fragment,Vu),Vu.forEach(t),Ku.forEach(t),Kl=l(pd),cs=r(pd,"SPAN",{});var Ju=s(cs);Vl=d(Ju,"BlenderbotForCausalLM"),Ju.forEach(t),pd.forEach(t),Ia=l(o),Ft=r(o,"DIV",{class:!0});var hd=s(Ft);g(Uo.$$.fragment,hd),Jl=l(hd),Ye=r(hd,"DIV",{class:!0});var vr=s(Ye);g(Ho.$$.fragment,vr),Ql=l(vr),ps=r(vr,"P",{});var Qu=s(ps);Xl=d(Qu,"Example:"),Qu.forEach(t),Yl=l(vr),g(Ko.$$.fragment,vr),vr.forEach(t),hd.forEach(t),Sa=l(o),$t=r(o,"H2",{class:!0});var ud=s($t);Kt=r(ud,"A",{id:!0,class:!0,href:!0});var Xu=s(Kt);hs=r(Xu,"SPAN",{});var Yu=s(hs);g(Vo.$$.fragment,Yu),Yu.forEach(t),Xu.forEach(t),Zl=l(ud),us=r(ud,"SPAN",{});var Zu=s(us);ec=d(Zu,"TFBlenderbotModel"),Zu.forEach(t),ud.forEach(t),La=l(o),le=r(o,"DIV",{class:!0});var it=s(le);g(Jo.$$.fragment,it),tc=l(it),Qo=r(it,"P",{});var md=s(Qo);oc=d(md,`The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top. This model inherits from `),ar=r(md,"A",{href:!0});var em=s(ar);nc=d(em,"TFPreTrainedModel"),em.forEach(t),rc=d(md,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),md.forEach(t),sc=l(it),Xo=r(it,"P",{});var fd=s(Xo);ac=d(fd,"This model is also a "),Yo=r(fd,"A",{href:!0,rel:!0});var tm=s(Yo);dc=d(tm,"tf.keras.Model"),tm.forEach(t),ic=d(fd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fd.forEach(t),lc=l(it),g(Vt.$$.fragment,it),cc=l(it),be=r(it,"DIV",{class:!0});var lt=s(be);g(Zo.$$.fragment,lt),pc=l(lt),qt=r(lt,"P",{});var yr=s(qt);hc=d(yr,"The "),dr=r(yr,"A",{href:!0});var om=s(dr);uc=d(om,"TFBlenderbotModel"),om.forEach(t),mc=d(yr," forward method, overrides the "),ms=r(yr,"CODE",{});var nm=s(ms);fc=d(nm,"__call__"),nm.forEach(t),_c=d(yr," special method."),yr.forEach(t),gc=l(lt),g(Jt.$$.fragment,lt),bc=l(lt),fs=r(lt,"P",{});var rm=s(fs);kc=d(rm,"Example:"),rm.forEach(t),vc=l(lt),g(en.$$.fragment,lt),lt.forEach(t),it.forEach(t),Da=l(o),Mt=r(o,"H2",{class:!0});var _d=s(Mt);Qt=r(_d,"A",{id:!0,class:!0,href:!0});var sm=s(Qt);_s=r(sm,"SPAN",{});var am=s(_s);g(tn.$$.fragment,am),am.forEach(t),sm.forEach(t),yc=l(_d),gs=r(_d,"SPAN",{});var dm=s(gs);Tc=d(dm,"TFBlenderbotForConditionalGeneration"),dm.forEach(t),_d.forEach(t),Ga=l(o),ce=r(o,"DIV",{class:!0});var ct=s(ce);g(on.$$.fragment,ct),wc=l(ct),nn=r(ct,"P",{});var gd=s(nn);Bc=d(gd,`The BLENDERBOT Model with a language modeling head. Can be used for summarization. This model inherits from `),ir=r(gd,"A",{href:!0});var im=s(ir);zc=d(im,"TFPreTrainedModel"),im.forEach(t),xc=d(gd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),gd.forEach(t),Fc=l(ct),rn=r(ct,"P",{});var bd=s(rn);$c=d(bd,"This model is also a "),sn=r(bd,"A",{href:!0,rel:!0});var lm=s(sn);qc=d(lm,"tf.keras.Model"),lm.forEach(t),Mc=d(bd,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),bd.forEach(t),Ec=l(ct),g(Xt.$$.fragment,ct),Cc=l(ct),X=r(ct,"DIV",{class:!0});var Me=s(X);g(an.$$.fragment,Me),jc=l(Me),Et=r(Me,"P",{});var Tr=s(Et);Pc=d(Tr,"The "),lr=r(Tr,"A",{href:!0});var cm=s(lr);Nc=d(cm,"TFBlenderbotForConditionalGeneration"),cm.forEach(t),Ac=d(Tr," forward method, overrides the "),bs=r(Tr,"CODE",{});var pm=s(bs);Oc=d(pm,"__call__"),pm.forEach(t),Ic=d(Tr," special method."),Tr.forEach(t),Sc=l(Me),g(Yt.$$.fragment,Me),Lc=l(Me),ks=r(Me,"P",{});var hm=s(ks);Dc=d(hm,"Conversation example::"),hm.forEach(t),Gc=l(Me),vs=r(Me,"BLOCKQUOTE",{});var um=s(vs);ys=r(um,"BLOCKQUOTE",{});var mm=s(ys);Ts=r(mm,"BLOCKQUOTE",{});var fm=s(Ts);ws=r(fm,"P",{});var _m=s(ws);Wc=d(_m,`from transformers import BlenderbotTokenizer, TFBlenderbotForConditionalGeneration >>> mname = \u2018facebook/blenderbot-400M-distill\u2019 >>> model = TFBlenderbotForConditionalGeneration.from_pretrained(mname) >>> tokenizer = BlenderbotTokenizer.from_pretrained(mname) >>> UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D >>> print(\u201CHuman: \u201D, UTTERANCE) >>> inputs = tokenizer([UTTERANCE], return_tensors=\u2018tf\u2019) >>> reply_ids = model.generate(**inputs) >>> print(\u201CBot: \u201D, tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])`),_m.forEach(t),fm.forEach(t),mm.forEach(t),um.forEach(t),Rc=l(Me),Bs=r(Me,"BLOCKQUOTE",{});var gm=s(Bs);zs=r(gm,"BLOCKQUOTE",{});var bm=s(zs);xs=r(bm,"BLOCKQUOTE",{});var km=s(xs);dn=r(km,"P",{});var kd=s(dn);Uc=d(kd,`REPLY = \u201CI\u2019m not sure\u201D >>> print(\u201CHuman: \u201D, REPLY) >>> NEXT_UTTERANCE = ( \u2026 \u201CMy friends are cool but they eat too many carbs.</s> `),Fs=r(kd,"S",{});var vm=s(Fs);Hc=d(vm,`That\u2019s unfortunate. \u201D \u2026 \u201CAre they trying to lose weight or are they just trying to be healthier?`),vm.forEach(t),Kc=d(kd,` \u201D \u2026 \u201D<s> I\u2019m not sure.\u201D \u2026 ) >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors=\u2018tf\u2019) next_reply_ids = model.generate(**inputs) >>> print(\u201CBot: \u201D, tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])`),kd.forEach(t),km.forEach(t),bm.forEach(t),gm.forEach(t),Me.forEach(t),ct.forEach(t),Wa=l(o),Ct=r(o,"H2",{class:!0});var vd=s(Ct);Zt=r(vd,"A",{id:!0,class:!0,href:!0});var ym=s(Zt);$s=r(ym,"SPAN",{});var Tm=s($s);g(ln.$$.fragment,Tm),Tm.forEach(t),ym.forEach(t),Vc=l(vd),qs=r(vd,"SPAN",{});var wm=s(qs);Jc=d(wm,"FlaxBlenderbotModel"),wm.forEach(t),vd.forEach(t),Ra=l(o),I=r(o,"DIV",{class:!0});var Y=s(I);g(cn.$$.fragment,Y),Qc=l(Y),pn=r(Y,"P",{});var yd=s(pn);Xc=d(yd,`The bare MBart Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),cr=r(yd,"A",{href:!0});var Bm=s(cr);Yc=d(Bm,"FlaxPreTrainedModel"),Bm.forEach(t),Zc=d(yd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yd.forEach(t),ep=l(Y),hn=r(Y,"P",{});var Td=s(hn);tp=d(Td,`This model is also a Flax Linen `),un=r(Td,"A",{href:!0,rel:!0});var zm=s(un);op=d(zm,"flax.nn.Module"),zm.forEach(t),np=d(Td,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Td.forEach(t),rp=l(Y),Ms=r(Y,"P",{});var xm=s(Ms);sp=d(xm,"Finally, this model supports inherent JAX features such as:"),xm.forEach(t),ap=l(Y),Ue=r(Y,"UL",{});var co=s(Ue);Es=r(co,"LI",{});var Fm=s(Es);mn=r(Fm,"A",{href:!0,rel:!0});var $m=s(mn);dp=d($m,"Just-In-Time (JIT) compilation"),$m.forEach(t),Fm.forEach(t),ip=l(co),Cs=r(co,"LI",{});var qm=s(Cs);fn=r(qm,"A",{href:!0,rel:!0});var Mm=s(fn);lp=d(Mm,"Automatic Differentiation"),Mm.forEach(t),qm.forEach(t),cp=l(co),js=r(co,"LI",{});var Em=s(js);_n=r(Em,"A",{href:!0,rel:!0});var Cm=s(_n);pp=d(Cm,"Vectorization"),Cm.forEach(t),Em.forEach(t),hp=l(co),Ps=r(co,"LI",{});var jm=s(Ps);gn=r(jm,"A",{href:!0,rel:!0});var Pm=s(gn);up=d(Pm,"Parallelization"),Pm.forEach(t),jm.forEach(t),co.forEach(t),mp=l(Y),ke=r(Y,"DIV",{class:!0});var pt=s(ke);g(bn.$$.fragment,pt),fp=l(pt),jt=r(pt,"P",{});var wr=s(jt);_p=d(wr,"The "),Ns=r(wr,"CODE",{});var Nm=s(Ns);gp=d(Nm,"FlaxBlenderbotPreTrainedModel"),Nm.forEach(t),bp=d(wr,"forward method, overrides the "),As=r(wr,"CODE",{});var Am=s(As);kp=d(Am,"__call__"),Am.forEach(t),vp=d(wr," special method."),wr.forEach(t),yp=l(pt),g(eo.$$.fragment,pt),Tp=l(pt),Os=r(pt,"P",{});var Om=s(Os);wp=d(Om,"Example:"),Om.forEach(t),Bp=l(pt),g(kn.$$.fragment,pt),pt.forEach(t),zp=l(Y),Ze=r(Y,"DIV",{class:!0});var Br=s(Ze);g(vn.$$.fragment,Br),xp=l(Br),Is=r(Br,"P",{});var Im=s(Is);Fp=d(Im,"Example:"),Im.forEach(t),$p=l(Br),g(yn.$$.fragment,Br),Br.forEach(t),qp=l(Y),et=r(Y,"DIV",{class:!0});var zr=s(et);g(Tn.$$.fragment,zr),Mp=l(zr),Ss=r(zr,"P",{});var Sm=s(Ss);Ep=d(Sm,"Example:"),Sm.forEach(t),Cp=l(zr),g(wn.$$.fragment,zr),zr.forEach(t),Y.forEach(t),Ua=l(o),Pt=r(o,"H2",{class:!0});var wd=s(Pt);to=r(wd,"A",{id:!0,class:!0,href:!0});var Lm=s(to);Ls=r(Lm,"SPAN",{});var Dm=s(Ls);g(Bn.$$.fragment,Dm),Dm.forEach(t),Lm.forEach(t),jp=l(wd),Ds=r(wd,"SPAN",{});var Gm=s(Ds);Pp=d(Gm,"FlaxBlenderbotForConditionalGeneration"),Gm.forEach(t),wd.forEach(t),Ha=l(o),S=r(o,"DIV",{class:!0});var Z=s(S);g(zn.$$.fragment,Z),Np=l(Z),xn=r(Z,"P",{});var Bd=s(xn);Ap=d(Bd,`The Blenderbot Model with a language modeling head. Can be used for summarization. This model inherits from `),pr=r(Bd,"A",{href:!0});var Wm=s(pr);Op=d(Wm,"FlaxPreTrainedModel"),Wm.forEach(t),Ip=d(Bd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bd.forEach(t),Sp=l(Z),Fn=r(Z,"P",{});var zd=s(Fn);Lp=d(zd,`This model is also a Flax Linen `),$n=r(zd,"A",{href:!0,rel:!0});var Rm=s($n);Dp=d(Rm,"flax.nn.Module"),Rm.forEach(t),Gp=d(zd,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),zd.forEach(t),Wp=l(Z),Gs=r(Z,"P",{});var Um=s(Gs);Rp=d(Um,"Finally, this model supports inherent JAX features such as:"),Um.forEach(t),Up=l(Z),He=r(Z,"UL",{});var po=s(He);Ws=r(po,"LI",{});var Hm=s(Ws);qn=r(Hm,"A",{href:!0,rel:!0});var Km=s(qn);Hp=d(Km,"Just-In-Time (JIT) compilation"),Km.forEach(t),Hm.forEach(t),Kp=l(po),Rs=r(po,"LI",{});var Vm=s(Rs);Mn=r(Vm,"A",{href:!0,rel:!0});var Jm=s(Mn);Vp=d(Jm,"Automatic Differentiation"),Jm.forEach(t),Vm.forEach(t),Jp=l(po),Us=r(po,"LI",{});var Qm=s(Us);En=r(Qm,"A",{href:!0,rel:!0});var Xm=s(En);Qp=d(Xm,"Vectorization"),Xm.forEach(t),Qm.forEach(t),Xp=l(po),Hs=r(po,"LI",{});var Ym=s(Hs);Cn=r(Ym,"A",{href:!0,rel:!0});var Zm=s(Cn);Yp=d(Zm,"Parallelization"),Zm.forEach(t),Ym.forEach(t),po.forEach(t),Zp=l(Z),L=r(Z,"DIV",{class:!0});var ee=s(L);g(jn.$$.fragment,ee),eh=l(ee),Nt=r(ee,"P",{});var xr=s(Nt);th=d(xr,"The "),Ks=r(xr,"CODE",{});var ef=s(Ks);oh=d(ef,"FlaxBlenderbotPreTrainedModel"),ef.forEach(t),nh=d(xr,"forward method, overrides the "),Vs=r(xr,"CODE",{});var tf=s(Vs);rh=d(tf,"__call__"),tf.forEach(t),sh=d(xr," special method."),xr.forEach(t),ah=l(ee),g(oo.$$.fragment,ee),dh=l(ee),Js=r(ee,"P",{});var of=s(Js);ih=d(of,"Conversation example::"),of.forEach(t),lh=l(ee),Qs=r(ee,"BLOCKQUOTE",{});var nf=s(Qs);Xs=r(nf,"BLOCKQUOTE",{});var rf=s(Xs);Ys=r(rf,"BLOCKQUOTE",{});var sf=s(Ys);Zs=r(sf,"P",{});var af=s(Zs);ch=d(af,"from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration, BlenderbotConfig"),af.forEach(t),sf.forEach(t),rf.forEach(t),nf.forEach(t),ph=l(ee),ea=r(ee,"BLOCKQUOTE",{});var df=s(ea);ta=r(df,"BLOCKQUOTE",{});var lf=s(ta);oa=r(lf,"BLOCKQUOTE",{});var cf=s(oa);na=r(cf,"P",{});var pf=s(na);hh=d(pf,`model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019) >>> tokenizer = BlenderbotTokenizer.from_pretrained(\u2018facebook/blenderbot-400M-distill\u2019)`),pf.forEach(t),cf.forEach(t),lf.forEach(t),df.forEach(t),uh=l(ee),ra=r(ee,"BLOCKQUOTE",{});var hf=s(ra);sa=r(hf,"BLOCKQUOTE",{});var uf=s(sa);aa=r(uf,"BLOCKQUOTE",{});var mf=s(aa);da=r(mf,"P",{});var ff=s(da);mh=d(ff,`UTTERANCE = \u201CMy friends are cool but they eat too many carbs.\u201D >>> inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors=\u2018np\u2019)`),ff.forEach(t),mf.forEach(t),uf.forEach(t),hf.forEach(t),fh=l(ee),ia=r(ee,"BLOCKQUOTE",{});var _f=s(ia);la=r(_f,"BLOCKQUOTE",{});var gf=s(la);Pn=r(gf,"BLOCKQUOTE",{});var xd=s(Pn);no=r(xd,"H1",{class:!0});var Fd=s(no);ro=r(Fd,"A",{id:!0,class:!0,href:!0});var bf=s(ro);ca=r(bf,"SPAN",{});var kf=s(ca);g(Nn.$$.fragment,kf),kf.forEach(t),bf.forEach(t),_h=l(Fd),pa=r(Fd,"SPAN",{});var vf=s(pa);gh=d(vf,"Generate Reply >>> reply_ids = model.generate(inputs[\u2018input_ids\u2019], num_beams=4, max_length=5,"),vf.forEach(t),Fd.forEach(t),bh=l(xd),ha=r(xd,"P",{});var yf=s(ha);kh=d(yf,`early_stopping=True).sequences >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])`),yf.forEach(t),xd.forEach(t),gf.forEach(t),_f.forEach(t),ee.forEach(t),vh=l(Z),tt=r(Z,"DIV",{class:!0});var Fr=s(tt);g(An.$$.fragment,Fr),yh=l(Fr),ua=r(Fr,"P",{});var Tf=s(ua);Th=d(Tf,"Example:"),Tf.forEach(t),wh=l(Fr),g(On.$$.fragment,Fr),Fr.forEach(t),Bh=l(Z),ot=r(Z,"DIV",{class:!0});var $r=s(ot);g(In.$$.fragment,$r),zh=l($r),ma=r($r,"P",{});var wf=s(ma);xh=d(wf,"Example:"),wf.forEach(t),Fh=l($r),g(Sn.$$.fragment,$r),$r.forEach(t),Z.forEach(t),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(Of)),c(T,"id","blenderbot"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#blenderbot"),c(f,"class","relative group"),c(oe,"href","https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title"),c(oe,"rel","nofollow"),c(N,"id","overview"),c(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(N,"href","#overview"),c(G,"class","relative group"),c(W,"href","https://arxiv.org/pdf/2004.13637.pdf"),c(W,"rel","nofollow"),c(R,"href","https://huggingface.co/sshleifer"),c(R,"rel","nofollow"),c($,"href","https://github.com/facebookresearch/ParlAI"),c($,"rel","nofollow"),c(fe,"id","implementation-notes"),c(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fe,"href","#implementation-notes"),c(j,"class","relative group"),c(se,"href","https://arxiv.org/pdf/1706.03762.pdf"),c(se,"rel","nofollow"),c(mo,"href","https://huggingface.co/models?search=blenderbot"),c(mo,"rel","nofollow"),c(Rn,"href","blenderbot-small"),c(At,"id","usage"),c(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(At,"href","#usage"),c(_t,"class","relative group"),c(Ot,"id","transformers.BlenderbotConfig"),c(Ot,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ot,"href","#transformers.BlenderbotConfig"),c(gt,"class","relative group"),c(Hn,"href","/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotModel"),c(ko,"href","https://huggingface.co/facebook/blenderbot-3B"),c(ko,"rel","nofollow"),c(Kn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(Vn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(ae,"class","docstring"),c(It,"id","transformers.BlenderbotTokenizer"),c(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(It,"href","#transformers.BlenderbotTokenizer"),c(vt,"class","relative group"),c(Jn,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Qn,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer"),c(Qe,"class","docstring"),c(de,"class","docstring"),c(Lt,"id","transformers.BlenderbotTokenizerFast"),c(Lt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lt,"href","#transformers.BlenderbotTokenizerFast"),c(yt,"class","relative group"),c(Yn,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizerFast"),c(Zn,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizerFast"),c(Xe,"class","docstring"),c(ie,"class","docstring"),c(Gt,"id","transformers.BlenderbotModel"),c(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gt,"href","#transformers.BlenderbotModel"),c(Tt,"class","relative group"),c(tr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Po,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Po,"rel","nofollow"),c(or,"href","/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotModel"),c(_e,"class","docstring"),c(ze,"class","docstring"),c(Rt,"id","transformers.BlenderbotForConditionalGeneration"),c(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rt,"href","#transformers.BlenderbotForConditionalGeneration"),c(Bt,"class","relative group"),c(nr,"href","/docs/transformers/pr_16143/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(rr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Do,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Do,"rel","nofollow"),c(sr,"href","/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.BlenderbotForConditionalGeneration"),c(ge,"class","docstring"),c(xe,"class","docstring"),c(Ht,"id","transformers.BlenderbotForCausalLM"),c(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ht,"href","#transformers.BlenderbotForCausalLM"),c(xt,"class","relative group"),c(Ye,"class","docstring"),c(Ft,"class","docstring"),c(Kt,"id","transformers.TFBlenderbotModel"),c(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Kt,"href","#transformers.TFBlenderbotModel"),c($t,"class","relative group"),c(ar,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(Yo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Yo,"rel","nofollow"),c(dr,"href","/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.TFBlenderbotModel"),c(be,"class","docstring"),c(le,"class","docstring"),c(Qt,"id","transformers.TFBlenderbotForConditionalGeneration"),c(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qt,"href","#transformers.TFBlenderbotForConditionalGeneration"),c(Mt,"class","relative group"),c(ir,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(sn,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(sn,"rel","nofollow"),c(lr,"href","/docs/transformers/pr_16143/en/model_doc/blenderbot#transformers.TFBlenderbotForConditionalGeneration"),c(X,"class","docstring"),c(ce,"class","docstring"),c(Zt,"id","transformers.FlaxBlenderbotModel"),c(Zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Zt,"href","#transformers.FlaxBlenderbotModel"),c(Ct,"class","relative group"),c(cr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(un,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c(un,"rel","nofollow"),c(mn,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(mn,"rel","nofollow"),c(fn,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(fn,"rel","nofollow"),c(_n,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(_n,"rel","nofollow"),c(gn,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(gn,"rel","nofollow"),c(ke,"class","docstring"),c(Ze,"class","docstring"),c(et,"class","docstring"),c(I,"class","docstring"),c(to,"id","transformers.FlaxBlenderbotForConditionalGeneration"),c(to,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(to,"href","#transformers.FlaxBlenderbotForConditionalGeneration"),c(Pt,"class","relative group"),c(pr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c($n,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),c($n,"rel","nofollow"),c(qn,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(qn,"rel","nofollow"),c(Mn,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Mn,"rel","nofollow"),c(En,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(En,"rel","nofollow"),c(Cn,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Cn,"rel","nofollow"),c(ro,"id","generate-reply->>>-reply_ids-=-model.generate(inputs[\u2018input_ids\u2019],-num_beams=4,-max_length=5,"),c(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ro,"href","#generate-reply->>>-reply_ids-=-model.generate(inputs[\u2018input_ids\u2019],-num_beams=4,-max_length=5,"),c(no,"class","relative group"),c(L,"class","docstring"),c(tt,"class","docstring"),c(ot,"class","docstring"),c(S,"class","docstring")},m(o,h){e(document.head,u),p(o,x,h),p(o,f,h),e(f,T),e(T,z),b(B,z,null),e(f,w),e(f,F),e(F,Ee),p(o,he,h),p(o,q,h),e(q,ve),e(ve,te),e(q,Ce),e(q,oe),e(oe,ne),e(q,je),p(o,Fe,h),p(o,G,h),e(G,N),e(N,ye),b(U,ye,null),e(G,M),e(G,C),e(C,Pe),p(o,J,h),p(o,Q,h),e(Q,Ne),e(Q,W),e(W,Ae),e(Q,Oe),p(o,A,h),p(o,ue,h),e(ue,re),p(o,$e,h),p(o,me,h),e(me,H),e(H,Ie),p(o,qe,h),p(o,E,h),e(E,Se),e(E,R),e(R,Le),e(E,m),e(E,$),e($,Te),e(E,ht),p(o,Ve,h),p(o,j,h),e(j,fe),e(fe,De),b(we,De,null),e(j,K),e(j,V),e(V,ut),p(o,Je,h),p(o,O,h),e(O,Be),e(Be,mt),e(Be,se),e(se,ft),e(Be,$d),e(O,qd),e(O,uo),e(uo,Md),e(uo,mo),e(mo,Ed),e(uo,Cd),e(O,jd),e(O,Ge),e(Ge,Pd),e(Ge,qr),e(qr,Nd),e(Ge,Ad),e(Ge,Mr),e(Mr,Od),e(Ge,Id),e(Ge,Rn),e(Rn,Sd),e(Ge,Ld),p(o,Ta,h),p(o,_t,h),e(_t,At),e(At,Er),b(fo,Er,null),e(_t,Dd),e(_t,Cr),e(Cr,Gd),p(o,wa,h),p(o,Un,h),e(Un,Wd),p(o,Ba,h),b(_o,o,h),p(o,za,h),p(o,gt,h),e(gt,Ot),e(Ot,jr),b(go,jr,null),e(gt,Rd),e(gt,Pr),e(Pr,Ud),p(o,xa,h),p(o,ae,h),b(bo,ae,null),e(ae,Hd),e(ae,bt),e(bt,Kd),e(bt,Hn),e(Hn,Vd),e(bt,Jd),e(bt,ko),e(ko,Qd),e(bt,Xd),e(ae,Yd),e(ae,kt),e(kt,Zd),e(kt,Kn),e(Kn,ei),e(kt,ti),e(kt,Vn),e(Vn,oi),e(kt,ni),e(ae,ri),e(ae,Nr),e(Nr,si),e(ae,ai),b(vo,ae,null),p(o,Fa,h),p(o,vt,h),e(vt,It),e(It,Ar),b(yo,Ar,null),e(vt,di),e(vt,Or),e(Or,ii),p(o,$a,h),p(o,de,h),b(To,de,null),e(de,li),e(de,Ir),e(Ir,ci),e(de,pi),e(de,St),e(St,Sr),e(Sr,hi),e(St,ui),e(St,Jn),e(Jn,mi),e(St,fi),e(de,_i),e(de,wo),e(wo,gi),e(wo,Qn),e(Qn,bi),e(wo,ki),e(de,vi),e(de,Qe),b(Bo,Qe,null),e(Qe,yi),e(Qe,Lr),e(Lr,Ti),e(Qe,wi),e(Qe,Dr),e(Dr,Xn),e(Xn,Bi),e(Xn,Gr),e(Gr,zi),p(o,qa,h),p(o,yt,h),e(yt,Lt),e(Lt,Wr),b(zo,Wr,null),e(yt,xi),e(yt,Rr),e(Rr,Fi),p(o,Ma,h),p(o,ie,h),b(xo,ie,null),e(ie,$i),e(ie,Fo),e(Fo,qi),e(Fo,Ur),e(Ur,Mi),e(Fo,Ei),e(ie,Ci),e(ie,Dt),e(Dt,Hr),e(Hr,ji),e(Dt,Pi),e(Dt,Yn),e(Yn,Ni),e(Dt,Ai),e(ie,Oi),e(ie,$o),e($o,Ii),e($o,Zn),e(Zn,Si),e($o,Li),e(ie,Di),e(ie,Xe),b(qo,Xe,null),e(Xe,Gi),e(Xe,Kr),e(Kr,Wi),e(Xe,Ri),e(Xe,Vr),e(Vr,er),e(er,Ui),e(er,Jr),e(Jr,Hi),p(o,Ea,h),p(o,Tt,h),e(Tt,Gt),e(Gt,Qr),b(Mo,Qr,null),e(Tt,Ki),e(Tt,Xr),e(Xr,Vi),p(o,Ca,h),p(o,We,h),e(We,Ji),e(We,Yr),e(Yr,Qi),e(We,Xi),e(We,Zr),e(Zr,Yi),e(We,Zi),e(We,es),e(es,el),p(o,ja,h),p(o,ze,h),b(Eo,ze,null),e(ze,tl),e(ze,Co),e(Co,ol),e(Co,tr),e(tr,nl),e(Co,rl),e(ze,sl),e(ze,jo),e(jo,al),e(jo,Po),e(Po,dl),e(jo,il),e(ze,ll),e(ze,_e),b(No,_e,null),e(_e,cl),e(_e,wt),e(wt,pl),e(wt,or),e(or,hl),e(wt,ul),e(wt,ts),e(ts,ml),e(wt,fl),e(_e,_l),b(Wt,_e,null),e(_e,gl),e(_e,os),e(os,bl),e(_e,kl),b(Ao,_e,null),p(o,Pa,h),p(o,Bt,h),e(Bt,Rt),e(Rt,ns),b(Oo,ns,null),e(Bt,vl),e(Bt,rs),e(rs,yl),p(o,Na,h),p(o,Re,h),e(Re,Tl),e(Re,nr),e(nr,wl),e(Re,Bl),e(Re,ss),e(ss,zl),e(Re,xl),e(Re,as),e(as,Fl),p(o,Aa,h),p(o,xe,h),b(Io,xe,null),e(xe,$l),e(xe,So),e(So,ql),e(So,rr),e(rr,Ml),e(So,El),e(xe,Cl),e(xe,Lo),e(Lo,jl),e(Lo,Do),e(Do,Pl),e(Lo,Nl),e(xe,Al),e(xe,ge),b(Go,ge,null),e(ge,Ol),e(ge,zt),e(zt,Il),e(zt,sr),e(sr,Sl),e(zt,Ll),e(zt,ds),e(ds,Dl),e(zt,Gl),e(ge,Wl),b(Ut,ge,null),e(ge,Rl),e(ge,is),e(is,Ul),e(ge,Hl),b(Wo,ge,null),p(o,Oa,h),p(o,xt,h),e(xt,Ht),e(Ht,ls),b(Ro,ls,null),e(xt,Kl),e(xt,cs),e(cs,Vl),p(o,Ia,h),p(o,Ft,h),b(Uo,Ft,null),e(Ft,Jl),e(Ft,Ye),b(Ho,Ye,null),e(Ye,Ql),e(Ye,ps),e(ps,Xl),e(Ye,Yl),b(Ko,Ye,null),p(o,Sa,h),p(o,$t,h),e($t,Kt),e(Kt,hs),b(Vo,hs,null),e($t,Zl),e($t,us),e(us,ec),p(o,La,h),p(o,le,h),b(Jo,le,null),e(le,tc),e(le,Qo),e(Qo,oc),e(Qo,ar),e(ar,nc),e(Qo,rc),e(le,sc),e(le,Xo),e(Xo,ac),e(Xo,Yo),e(Yo,dc),e(Xo,ic),e(le,lc),b(Vt,le,null),e(le,cc),e(le,be),b(Zo,be,null),e(be,pc),e(be,qt),e(qt,hc),e(qt,dr),e(dr,uc),e(qt,mc),e(qt,ms),e(ms,fc),e(qt,_c),e(be,gc),b(Jt,be,null),e(be,bc),e(be,fs),e(fs,kc),e(be,vc),b(en,be,null),p(o,Da,h),p(o,Mt,h),e(Mt,Qt),e(Qt,_s),b(tn,_s,null),e(Mt,yc),e(Mt,gs),e(gs,Tc),p(o,Ga,h),p(o,ce,h),b(on,ce,null),e(ce,wc),e(ce,nn),e(nn,Bc),e(nn,ir),e(ir,zc),e(nn,xc),e(ce,Fc),e(ce,rn),e(rn,$c),e(rn,sn),e(sn,qc),e(rn,Mc),e(ce,Ec),b(Xt,ce,null),e(ce,Cc),e(ce,X),b(an,X,null),e(X,jc),e(X,Et),e(Et,Pc),e(Et,lr),e(lr,Nc),e(Et,Ac),e(Et,bs),e(bs,Oc),e(Et,Ic),e(X,Sc),b(Yt,X,null),e(X,Lc),e(X,ks),e(ks,Dc),e(X,Gc),e(X,vs),e(vs,ys),e(ys,Ts),e(Ts,ws),e(ws,Wc),e(X,Rc),e(X,Bs),e(Bs,zs),e(zs,xs),e(xs,dn),e(dn,Uc),e(dn,Fs),e(Fs,Hc),e(dn,Kc),p(o,Wa,h),p(o,Ct,h),e(Ct,Zt),e(Zt,$s),b(ln,$s,null),e(Ct,Vc),e(Ct,qs),e(qs,Jc),p(o,Ra,h),p(o,I,h),b(cn,I,null),e(I,Qc),e(I,pn),e(pn,Xc),e(pn,cr),e(cr,Yc),e(pn,Zc),e(I,ep),e(I,hn),e(hn,tp),e(hn,un),e(un,op),e(hn,np),e(I,rp),e(I,Ms),e(Ms,sp),e(I,ap),e(I,Ue),e(Ue,Es),e(Es,mn),e(mn,dp),e(Ue,ip),e(Ue,Cs),e(Cs,fn),e(fn,lp),e(Ue,cp),e(Ue,js),e(js,_n),e(_n,pp),e(Ue,hp),e(Ue,Ps),e(Ps,gn),e(gn,up),e(I,mp),e(I,ke),b(bn,ke,null),e(ke,fp),e(ke,jt),e(jt,_p),e(jt,Ns),e(Ns,gp),e(jt,bp),e(jt,As),e(As,kp),e(jt,vp),e(ke,yp),b(eo,ke,null),e(ke,Tp),e(ke,Os),e(Os,wp),e(ke,Bp),b(kn,ke,null),e(I,zp),e(I,Ze),b(vn,Ze,null),e(Ze,xp),e(Ze,Is),e(Is,Fp),e(Ze,$p),b(yn,Ze,null),e(I,qp),e(I,et),b(Tn,et,null),e(et,Mp),e(et,Ss),e(Ss,Ep),e(et,Cp),b(wn,et,null),p(o,Ua,h),p(o,Pt,h),e(Pt,to),e(to,Ls),b(Bn,Ls,null),e(Pt,jp),e(Pt,Ds),e(Ds,Pp),p(o,Ha,h),p(o,S,h),b(zn,S,null),e(S,Np),e(S,xn),e(xn,Ap),e(xn,pr),e(pr,Op),e(xn,Ip),e(S,Sp),e(S,Fn),e(Fn,Lp),e(Fn,$n),e($n,Dp),e(Fn,Gp),e(S,Wp),e(S,Gs),e(Gs,Rp),e(S,Up),e(S,He),e(He,Ws),e(Ws,qn),e(qn,Hp),e(He,Kp),e(He,Rs),e(Rs,Mn),e(Mn,Vp),e(He,Jp),e(He,Us),e(Us,En),e(En,Qp),e(He,Xp),e(He,Hs),e(Hs,Cn),e(Cn,Yp),e(S,Zp),e(S,L),b(jn,L,null),e(L,eh),e(L,Nt),e(Nt,th),e(Nt,Ks),e(Ks,oh),e(Nt,nh),e(Nt,Vs),e(Vs,rh),e(Nt,sh),e(L,ah),b(oo,L,null),e(L,dh),e(L,Js),e(Js,ih),e(L,lh),e(L,Qs),e(Qs,Xs),e(Xs,Ys),e(Ys,Zs),e(Zs,ch),e(L,ph),e(L,ea),e(ea,ta),e(ta,oa),e(oa,na),e(na,hh),e(L,uh),e(L,ra),e(ra,sa),e(sa,aa),e(aa,da),e(da,mh),e(L,fh),e(L,ia),e(ia,la),e(la,Pn),e(Pn,no),e(no,ro),e(ro,ca),b(Nn,ca,null),e(no,_h),e(no,pa),e(pa,gh),e(Pn,bh),e(Pn,ha),e(ha,kh),e(S,vh),e(S,tt),b(An,tt,null),e(tt,yh),e(tt,ua),e(ua,Th),e(tt,wh),b(On,tt,null),e(S,Bh),e(S,ot),b(In,ot,null),e(ot,zh),e(ot,ma),e(ma,xh),e(ot,Fh),b(Sn,ot,null),Ka=!0},p(o,[h]){const Ln={};h&2&&(Ln.$$scope={dirty:h,ctx:o}),Wt.$set(Ln);const fa={};h&2&&(fa.$$scope={dirty:h,ctx:o}),Ut.$set(fa);const _a={};h&2&&(_a.$$scope={dirty:h,ctx:o}),Vt.$set(_a);const ga={};h&2&&(ga.$$scope={dirty:h,ctx:o}),Jt.$set(ga);const so={};h&2&&(so.$$scope={dirty:h,ctx:o}),Xt.$set(so);const ba={};h&2&&(ba.$$scope={dirty:h,ctx:o}),Yt.$set(ba);const ka={};h&2&&(ka.$$scope={dirty:h,ctx:o}),eo.$set(ka);const Dn={};h&2&&(Dn.$$scope={dirty:h,ctx:o}),oo.$set(Dn)},i(o){Ka||(k(B.$$.fragment,o),k(U.$$.fragment,o),k(we.$$.fragment,o),k(fo.$$.fragment,o),k(_o.$$.fragment,o),k(go.$$.fragment,o),k(bo.$$.fragment,o),k(vo.$$.fragment,o),k(yo.$$.fragment,o),k(To.$$.fragment,o),k(Bo.$$.fragment,o),k(zo.$$.fragment,o),k(xo.$$.fragment,o),k(qo.$$.fragment,o),k(Mo.$$.fragment,o),k(Eo.$$.fragment,o),k(No.$$.fragment,o),k(Wt.$$.fragment,o),k(Ao.$$.fragment,o),k(Oo.$$.fragment,o),k(Io.$$.fragment,o),k(Go.$$.fragment,o),k(Ut.$$.fragment,o),k(Wo.$$.fragment,o),k(Ro.$$.fragment,o),k(Uo.$$.fragment,o),k(Ho.$$.fragment,o),k(Ko.$$.fragment,o),k(Vo.$$.fragment,o),k(Jo.$$.fragment,o),k(Vt.$$.fragment,o),k(Zo.$$.fragment,o),k(Jt.$$.fragment,o),k(en.$$.fragment,o),k(tn.$$.fragment,o),k(on.$$.fragment,o),k(Xt.$$.fragment,o),k(an.$$.fragment,o),k(Yt.$$.fragment,o),k(ln.$$.fragment,o),k(cn.$$.fragment,o),k(bn.$$.fragment,o),k(eo.$$.fragment,o),k(kn.$$.fragment,o),k(vn.$$.fragment,o),k(yn.$$.fragment,o),k(Tn.$$.fragment,o),k(wn.$$.fragment,o),k(Bn.$$.fragment,o),k(zn.$$.fragment,o),k(jn.$$.fragment,o),k(oo.$$.fragment,o),k(Nn.$$.fragment,o),k(An.$$.fragment,o),k(On.$$.fragment,o),k(In.$$.fragment,o),k(Sn.$$.fragment,o),Ka=!0)},o(o){v(B.$$.fragment,o),v(U.$$.fragment,o),v(we.$$.fragment,o),v(fo.$$.fragment,o),v(_o.$$.fragment,o),v(go.$$.fragment,o),v(bo.$$.fragment,o),v(vo.$$.fragment,o),v(yo.$$.fragment,o),v(To.$$.fragment,o),v(Bo.$$.fragment,o),v(zo.$$.fragment,o),v(xo.$$.fragment,o),v(qo.$$.fragment,o),v(Mo.$$.fragment,o),v(Eo.$$.fragment,o),v(No.$$.fragment,o),v(Wt.$$.fragment,o),v(Ao.$$.fragment,o),v(Oo.$$.fragment,o),v(Io.$$.fragment,o),v(Go.$$.fragment,o),v(Ut.$$.fragment,o),v(Wo.$$.fragment,o),v(Ro.$$.fragment,o),v(Uo.$$.fragment,o),v(Ho.$$.fragment,o),v(Ko.$$.fragment,o),v(Vo.$$.fragment,o),v(Jo.$$.fragment,o),v(Vt.$$.fragment,o),v(Zo.$$.fragment,o),v(Jt.$$.fragment,o),v(en.$$.fragment,o),v(tn.$$.fragment,o),v(on.$$.fragment,o),v(Xt.$$.fragment,o),v(an.$$.fragment,o),v(Yt.$$.fragment,o),v(ln.$$.fragment,o),v(cn.$$.fragment,o),v(bn.$$.fragment,o),v(eo.$$.fragment,o),v(kn.$$.fragment,o),v(vn.$$.fragment,o),v(yn.$$.fragment,o),v(Tn.$$.fragment,o),v(wn.$$.fragment,o),v(Bn.$$.fragment,o),v(zn.$$.fragment,o),v(jn.$$.fragment,o),v(oo.$$.fragment,o),v(Nn.$$.fragment,o),v(An.$$.fragment,o),v(On.$$.fragment,o),v(In.$$.fragment,o),v(Sn.$$.fragment,o),Ka=!1},d(o){t(u),o&&t(x),o&&t(f),y(B),o&&t(he),o&&t(q),o&&t(Fe),o&&t(G),y(U),o&&t(J),o&&t(Q),o&&t(A),o&&t(ue),o&&t($e),o&&t(me),o&&t(qe),o&&t(E),o&&t(Ve),o&&t(j),y(we),o&&t(Je),o&&t(O),o&&t(Ta),o&&t(_t),y(fo),o&&t(wa),o&&t(Un),o&&t(Ba),y(_o,o),o&&t(za),o&&t(gt),y(go),o&&t(xa),o&&t(ae),y(bo),y(vo),o&&t(Fa),o&&t(vt),y(yo),o&&t($a),o&&t(de),y(To),y(Bo),o&&t(qa),o&&t(yt),y(zo),o&&t(Ma),o&&t(ie),y(xo),y(qo),o&&t(Ea),o&&t(Tt),y(Mo),o&&t(Ca),o&&t(We),o&&t(ja),o&&t(ze),y(Eo),y(No),y(Wt),y(Ao),o&&t(Pa),o&&t(Bt),y(Oo),o&&t(Na),o&&t(Re),o&&t(Aa),o&&t(xe),y(Io),y(Go),y(Ut),y(Wo),o&&t(Oa),o&&t(xt),y(Ro),o&&t(Ia),o&&t(Ft),y(Uo),y(Ho),y(Ko),o&&t(Sa),o&&t($t),y(Vo),o&&t(La),o&&t(le),y(Jo),y(Vt),y(Zo),y(Jt),y(en),o&&t(Da),o&&t(Mt),y(tn),o&&t(Ga),o&&t(ce),y(on),y(Xt),y(an),y(Yt),o&&t(Wa),o&&t(Ct),y(ln),o&&t(Ra),o&&t(I),y(cn),y(bn),y(eo),y(kn),y(vn),y(yn),y(Tn),y(wn),o&&t(Ua),o&&t(Pt),y(Bn),o&&t(Ha),o&&t(S),y(zn),y(jn),y(oo),y(Nn),y(An),y(On),y(In),y(Sn)}}}const Of={local:"blenderbot",sections:[{local:"overview",title:"Overview"},{local:"implementation-notes",title:"Implementation Notes"},{local:"usage",title:"Usage"},{local:"transformers.BlenderbotConfig",title:"BlenderbotConfig"},{local:"transformers.BlenderbotTokenizer",title:"BlenderbotTokenizer"},{local:"transformers.BlenderbotTokenizerFast",title:"BlenderbotTokenizerFast"},{local:"transformers.BlenderbotModel",title:"BlenderbotModel"},{local:"transformers.BlenderbotForConditionalGeneration",title:"BlenderbotForConditionalGeneration"},{local:"transformers.BlenderbotForCausalLM",title:"BlenderbotForCausalLM"},{local:"transformers.TFBlenderbotModel",title:"TFBlenderbotModel"},{local:"transformers.TFBlenderbotForConditionalGeneration",title:"TFBlenderbotForConditionalGeneration"},{local:"transformers.FlaxBlenderbotModel",title:"FlaxBlenderbotModel"},{local:"transformers.FlaxBlenderbotForConditionalGeneration",title:"FlaxBlenderbotForConditionalGeneration"}],title:"Blenderbot"};function If(D,u,x){let{fw:f}=u;return D.$$set=T=>{"fw"in T&&x(0,f=T.fw)},[f]}class Uf extends Bf{constructor(u){super();zf(this,u,If,Af,xf,{fw:0})}}export{Uf as default,Of as metadata};
287
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/realm.mdx-d3389e24.js
import{S as Jm,i as Zm,s as Ym,e as n,k as d,w as f,t as s,M as ep,c as r,d as o,m as l,a,x as u,h as i,b as c,F as e,g as h,y as g,q as _,o as k,B as v}from"../../chunks/vendor-4833417e.js";import{T as Rr}from"../../chunks/Tip-fffd6df1.js";import{D as z}from"../../chunks/Docstring-4f315ed9.js";import{C as Xe}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as J}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function tp(N){let p,$,w,T,y;return{c(){p=n("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=n("code"),T=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(R){p=r(R,"P",{});var b=a(p);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r(b,"CODE",{});var q=a(w);T=i(q,"Module"),q.forEach(o),y=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(R,b){h(R,p,b),e(p,$),e(p,w),e(w,T),e(p,y)},d(R){R&&o(p)}}}function op(N){let p,$,w,T,y;return{c(){p=n("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=n("code"),T=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(R){p=r(R,"P",{});var b=a(p);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r(b,"CODE",{});var q=a(w);T=i(q,"Module"),q.forEach(o),y=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(R,b){h(R,p,b),e(p,$),e(p,w),e(w,T),e(p,y)},d(R){R&&o(p)}}}function np(N){let p,$,w,T,y;return{c(){p=n("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=n("code"),T=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(R){p=r(R,"P",{});var b=a(p);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r(b,"CODE",{});var q=a(w);T=i(q,"Module"),q.forEach(o),y=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(R,b){h(R,p,b),e(p,$),e(p,w),e(w,T),e(p,y)},d(R){R&&o(p)}}}function rp(N){let p,$,w,T,y;return{c(){p=n("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=n("code"),T=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(R){p=r(R,"P",{});var b=a(p);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r(b,"CODE",{});var q=a(w);T=i(q,"Module"),q.forEach(o),y=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(R,b){h(R,p,b),e(p,$),e(p,w),e(w,T),e(p,y)},d(R){R&&o(p)}}}function ap(N){let p,$,w,T,y;return{c(){p=n("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),w=n("code"),T=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(R){p=r(R,"P",{});var b=a(p);$=i(b,"Although the recipe for forward pass needs to be defined within this function, one should call the "),w=r(b,"CODE",{});var q=a(w);T=i(q,"Module"),q.forEach(o),y=i(b,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),b.forEach(o)},m(R,b){h(R,p,b),e(p,$),e(p,w),e(w,T),e(p,y)},d(R){R&&o(p)}}}function sp(N){let p,$,w,T,y,R,b,q,ya,Tr,se,ye,rn,Ge,Ea,an,za,$r,Ee,qa,Je,xa,Aa,yr,ho,Pa,Er,fo,sn,ja,zr,Z,La,Ze,Ma,Fa,Ye,Sa,Ca,qr,ie,ze,dn,et,Ia,ln,Da,xr,x,tt,Na,cn,Oa,Wa,L,mn,uo,Ka,Ba,pn,go,Qa,Ha,hn,_o,Va,Ua,fn,ko,Xa,Ga,un,vo,Ja,Za,gn,wo,Ya,es,ot,ts,nt,os,ns,rs,de,as,bo,ss,is,Ro,ds,ls,cs,_n,ms,ps,rt,Ar,le,qe,kn,at,hs,vn,fs,Pr,E,st,us,wn,gs,_s,xe,To,ks,vs,$o,ws,bs,Rs,it,Ts,yo,$s,ys,Es,Y,dt,zs,bn,qs,xs,lt,Eo,As,Rn,Ps,js,zo,Ls,Tn,Ms,Fs,Ae,ct,Ss,mt,Cs,$n,Is,Ds,Ns,W,pt,Os,yn,Ws,Ks,ht,Bs,ce,Qs,En,Hs,Vs,zn,Us,Xs,Gs,qo,ft,Js,P,ut,Zs,gt,Ys,qn,ei,ti,oi,me,xn,ni,ri,_t,ai,An,si,ii,di,kt,li,Pn,ci,mi,pi,vt,xo,hi,jn,fi,ui,Ao,gi,Ln,_i,ki,Mn,vi,wi,wt,jr,pe,Pe,Fn,bt,bi,Sn,Ri,Lr,M,Rt,Ti,Tt,$i,Cn,yi,Ei,zi,je,Po,qi,xi,jo,Ai,Pi,ji,$t,Li,Lo,Mi,Fi,Si,j,yt,Ci,Et,Ii,In,Di,Ni,Oi,he,Dn,Wi,Ki,zt,Bi,Nn,Qi,Hi,Vi,qt,Ui,On,Xi,Gi,Ji,xt,Mo,Zi,Wn,Yi,ed,Fo,td,Kn,od,nd,Bn,rd,ad,At,Mr,fe,Le,Qn,Pt,sd,Hn,id,Fr,H,jt,dd,Vn,ld,cd,Me,Lt,md,Un,pd,Sr,ue,Fe,Xn,Mt,hd,Gn,fd,Cr,V,Ft,ud,St,gd,Ct,_d,kd,vd,F,It,wd,ge,bd,So,Rd,Td,Jn,$d,yd,Ed,Se,zd,Zn,qd,xd,Dt,Ir,_e,Ce,Yn,Nt,Ad,er,Pd,Dr,U,Ot,jd,Wt,Ld,Kt,Md,Fd,Sd,S,Bt,Cd,ke,Id,Co,Dd,Nd,tr,Od,Wd,Kd,Ie,Bd,or,Qd,Hd,Qt,Nr,ve,De,nr,Ht,Vd,rr,Ud,Or,X,Vt,Xd,Ut,Gd,Xt,Jd,Zd,Yd,C,Gt,el,we,tl,Io,ol,nl,ar,rl,al,sl,Ne,il,sr,dl,ll,Jt,Wr,be,Oe,ir,Zt,cl,dr,ml,Kr,G,Yt,pl,eo,hl,to,fl,ul,gl,ee,oo,_l,Re,kl,Do,vl,wl,lr,bl,Rl,Tl,We,Br,Te,Ke,cr,no,$l,mr,yl,Qr,O,ro,El,Be,pr,zl,ql,ao,xl,Al,Pl,Qe,so,jl,io,Ll,hr,Ml,Fl,Sl,I,lo,Cl,$e,Il,No,Dl,Nl,fr,Ol,Wl,Kl,He,Bl,ur,Ql,Hl,co,Hr;return R=new J({}),Ge=new J({}),et=new J({}),tt=new z({props:{name:"class transformers.RealmConfig",anchor:"transformers.RealmConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"retriever_proj_size",val:" = 128"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"num_candidates",val:" = 8"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu_new'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"span_hidden_size",val:" = 256"},{name:"max_span_width",val:" = 10"},{name:"reader_layer_norm_eps",val:" = 0.001"},{name:"reader_beam_size",val:" = 5"},{name:"reader_seq_len",val:" = 320"},{name:"num_block_records",val:" = 13353718"},{name:"searcher_beam_size",val:" = 5000"},{name:"searcher_seq_len",val:" = 64"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/configuration_realm.py#L36",parametersDescription:[{anchor:"transformers.RealmConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the REALM model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmEmbedder">RealmEmbedder</a>, <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmScorer">RealmScorer</a>, <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmKnowledgeAugEncoder">RealmKnowledgeAugEncoder</a>, or <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmReader">RealmReader</a>.`,name:"vocab_size"},{anchor:"transformers.RealmConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.RealmConfig.retriever_proj_size",description:`<strong>retriever_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Dimension of the retriever(embedder) projection.`,name:"retriever_proj_size"},{anchor:"transformers.RealmConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.RealmConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.RealmConfig.num_candidates",description:`<strong>num_candidates</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of candidates inputted to the RealmScorer or RealmKnowledgeAugEncoder.`,name:"num_candidates"},{anchor:"transformers.RealmConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.RealmConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu_new&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.RealmConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.RealmConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.RealmConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.RealmConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmEmbedder">RealmEmbedder</a>, <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmScorer">RealmScorer</a>, <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmKnowledgeAugEncoder">RealmKnowledgeAugEncoder</a>, or <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmReader">RealmReader</a>.`,name:"type_vocab_size"},{anchor:"transformers.RealmConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.RealmConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.RealmConfig.span_hidden_size",description:`<strong>span_hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimension of the reader&#x2019;s spans.`,name:"span_hidden_size"},{anchor:"transformers.RealmConfig.max_span_width",description:`<strong>max_span_width</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Max span width of the reader.`,name:"max_span_width"},{anchor:"transformers.RealmConfig.reader_layer_norm_eps",description:`<strong>reader_layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The epsilon used by the reader&#x2019;s layer normalization layers.`,name:"reader_layer_norm_eps"},{anchor:"transformers.RealmConfig.reader_beam_size",description:`<strong>reader_beam_size</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; Beam size of the reader.`,name:"reader_beam_size"},{anchor:"transformers.RealmConfig.reader_seq_len",description:`<strong>reader_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 288+32) &#x2014; Maximum sequence length of the reader.`,name:"reader_seq_len"},{anchor:"transformers.RealmConfig.num_block_records",description:`<strong>num_block_records</strong> (<code>int</code>, <em>optional</em>, defaults to 13353718) &#x2014; Number of block records.`,name:"num_block_records"},{anchor:"transformers.RealmConfig.searcher_beam_size",description:`<strong>searcher_beam_size</strong> (<code>int</code>, <em>optional</em>, defaults to 5000) &#x2014; Beam size of the searcher. Note that when eval mode is enabled, <em>searcher_beam_size</em> will be the same as <em>reader_beam_size</em>.`,name:"searcher_beam_size"},{anchor:"transformers.RealmConfig.searcher_seq_len",description:`<strong>searcher_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Maximum sequence length of the searcher.`,name:"searcher_seq_len"}]}}),rt=new Xe({props:{code:`from transformers import RealmEmbedder, RealmConfig # Initializing a REALM realm-cc-news-pretrained-* style configuration configuration = RealmConfig() # Initializing a model from the google/realm-cc-news-pretrained-embedder style configuration model = RealmEmbedder(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RealmEmbedder, RealmConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a REALM realm-cc-news-pretrained-* style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = RealmConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the google/realm-cc-news-pretrained-embedder style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RealmEmbedder(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),at=new J({}),st=new z({props:{name:"class transformers.RealmTokenizer",anchor:"transformers.RealmTokenizer",parameters:[{name:"vocab_file",val:""},{name:"do_lower_case",val:" = True"},{name:"do_basic_tokenize",val:" = True"},{name:"never_split",val:" = None"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm.py#L88",parametersDescription:[{anchor:"transformers.RealmTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.RealmTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.RealmTokenizer.do_basic_tokenize",description:`<strong>do_basic_tokenize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to do basic tokenization before WordPiece.`,name:"do_basic_tokenize"},{anchor:"transformers.RealmTokenizer.never_split",description:`<strong>never_split</strong> (<code>Iterable</code>, <em>optional</em>) &#x2014; Collection of tokens which will never be split during tokenization. Only has an effect when <code>do_basic_tokenize=True</code>`,name:"never_split"},{anchor:"transformers.RealmTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.RealmTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.RealmTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.RealmTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.RealmTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.RealmTokenizer.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters.</p> <p>This should likely be deactivated for Japanese (see this <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">issue</a>). strip_accents &#x2014; (<code>bool</code>, <em>optional</em>): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"tokenize_chinese_chars"}]}}),dt=new z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RealmTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm.py#L295",parametersDescription:[{anchor:"transformers.RealmTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.RealmTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ct=new z({props:{name:"get_special_tokens_mask",anchor:"transformers.RealmTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm.py#L320",parametersDescription:[{anchor:"transformers.RealmTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RealmTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.RealmTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),pt=new z({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.RealmTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm.py#L348",parametersDescription:[{anchor:"transformers.RealmTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RealmTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#token-type-ids">token type IDs</a> according to the given sequence(s).</p> `,returnType:` <p><code>List[int]</code></p> `}}),ht=new Xe({props:{code:`0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |`,highlighted:`0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 0 </span>0<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1<span class="hljs-number"> 1 </span>1 1 | first sequence | second sequence |`}}),ft=new z({props:{name:"save_vocabulary",anchor:"transformers.RealmTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm.py#L377"}}),ut=new z({props:{name:"batch_encode_candidates",anchor:"transformers.RealmTokenizer.batch_encode_candidates",parameters:[{name:"text",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm.py#L222",parametersDescription:[{anchor:"transformers.RealmTokenizer.batch_encode_candidates.text",description:`<strong>text</strong> (<code>List[List[str]]</code>) &#x2014; The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text).`,name:"text"},{anchor:"transformers.RealmTokenizer.batch_encode_candidates.text_pair",description:`<strong>text_pair</strong> (<code>List[List[str]]</code>, <em>optional</em>) &#x2014; The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs &#x2014; Keyword arguments of the <strong>call</strong> method.`,name:"text_pair"}],returnDescription:` <p>Encoded text or text pair.</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),wt=new Xe({props:{code:`from transformers import RealmTokenizer # batch_size = 2, num_candidates = 2 text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] tokenizer = RealmTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RealmTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># batch_size = 2, num_candidates = 2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = [[<span class="hljs-string">&quot;Hello world!&quot;</span>, <span class="hljs-string">&quot;Nice to meet you!&quot;</span>], [<span class="hljs-string">&quot;The cute cat.&quot;</span>, <span class="hljs-string">&quot;The adorable dog.&quot;</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RealmTokenizer.from_pretrained(<span class="hljs-string">&quot;google/realm-cc-news-pretrained-encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_text = tokenizer.batch_encode_candidates(text, max_length=<span class="hljs-number">10</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)`}}),bt=new J({}),Rt=new z({props:{name:"class transformers.RealmTokenizerFast",anchor:"transformers.RealmTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"do_lower_case",val:" = True"},{name:"unk_token",val:" = '[UNK]'"},{name:"sep_token",val:" = '[SEP]'"},{name:"pad_token",val:" = '[PAD]'"},{name:"cls_token",val:" = '[CLS]'"},{name:"mask_token",val:" = '[MASK]'"},{name:"tokenize_chinese_chars",val:" = True"},{name:"strip_accents",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm_fast.py#L79",parametersDescription:[{anchor:"transformers.RealmTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.RealmTokenizerFast.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.RealmTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[UNK]&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.RealmTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[SEP]&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.RealmTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[PAD]&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.RealmTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[CLS]&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.RealmTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;[MASK]&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.RealmTokenizerFast.clean_text",description:`<strong>clean_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one.`,name:"clean_text"},{anchor:"transformers.RealmTokenizerFast.tokenize_chinese_chars",description:`<strong>tokenize_chinese_chars</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see <a href="https://github.com/huggingface/transformers/issues/328" rel="nofollow">this issue</a>).`,name:"tokenize_chinese_chars"},{anchor:"transformers.RealmTokenizerFast.strip_accents",description:`<strong>strip_accents</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for <code>lowercase</code> (as in the original BERT).`,name:"strip_accents"},{anchor:"transformers.RealmTokenizerFast.wordpieces_prefix",description:`<strong>wordpieces_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;##&quot;</code>) &#x2014; The prefix for subwords.`,name:"wordpieces_prefix"}]}}),yt=new z({props:{name:"batch_encode_candidates",anchor:"transformers.RealmTokenizerFast.batch_encode_candidates",parameters:[{name:"text",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/tokenization_realm_fast.py#L170",parametersDescription:[{anchor:"transformers.RealmTokenizerFast.batch_encode_candidates.text",description:`<strong>text</strong> (<code>List[List[str]]</code>) &#x2014; The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text).`,name:"text"},{anchor:"transformers.RealmTokenizerFast.batch_encode_candidates.text_pair",description:`<strong>text_pair</strong> (<code>List[List[str]]</code>, <em>optional</em>) &#x2014; The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs &#x2014; Keyword arguments of the <strong>call</strong> method.`,name:"text_pair"}],returnDescription:` <p>Encoded text or text pair.</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),At=new Xe({props:{code:`from transformers import RealmTokenizerFast # batch_size = 2, num_candidates = 2 text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] tokenizer = RealmTokenizerFast.from_pretrained("google/realm-cc-news-pretrained-encoder") tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RealmTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># batch_size = 2, num_candidates = 2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = [[<span class="hljs-string">&quot;Hello world!&quot;</span>, <span class="hljs-string">&quot;Nice to meet you!&quot;</span>], [<span class="hljs-string">&quot;The cute cat.&quot;</span>, <span class="hljs-string">&quot;The adorable dog.&quot;</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RealmTokenizerFast.from_pretrained(<span class="hljs-string">&quot;google/realm-cc-news-pretrained-encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_text = tokenizer.batch_encode_candidates(text, max_length=<span class="hljs-number">10</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)`}}),Pt=new J({}),jt=new z({props:{name:"class transformers.RealmRetriever",anchor:"transformers.RealmRetriever",parameters:[{name:"block_records",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/retrieval_realm.py#L73",parametersDescription:[{anchor:"transformers.RealmRetriever.block_records",description:`<strong>block_records</strong> (<code>np.ndarray</code>) &#x2014; A numpy array which cantains evidence texts.`,name:"block_records"},{anchor:"transformers.RealmRetriever.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer">RealmTokenizer</a>) &#x2014; The tokenizer to encode retrieved texts.`,name:"tokenizer"}]}}),Lt=new z({props:{name:"block_has_answer",anchor:"transformers.RealmRetriever.block_has_answer",parameters:[{name:"concat_inputs",val:""},{name:"answer_ids",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/retrieval_realm.py#L130"}}),Mt=new J({}),Ft=new z({props:{name:"class transformers.RealmEmbedder",anchor:"transformers.RealmEmbedder",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1145",parametersDescription:[{anchor:"transformers.RealmEmbedder.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig">RealmConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),It=new z({props:{name:"forward",anchor:"transformers.RealmEmbedder.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1159",parametersDescription:[{anchor:"transformers.RealmEmbedder.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer">RealmTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RealmEmbedder.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RealmEmbedder.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RealmEmbedder.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RealmEmbedder.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RealmEmbedder.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RealmEmbedder.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RealmEmbedder.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RealmEmbedder.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.realm.modeling_realm.RealmEmbedderOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig" >RealmConfig</a>) and inputs.</p> <ul> <li> <p><strong>projected_score</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.retriever_proj_size)</code>) \u2014 Projected score.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.realm.modeling_realm.RealmEmbedderOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Se=new Rr({props:{$$slots:{default:[tp]},$$scope:{ctx:N}}}),Dt=new Xe({props:{code:`from transformers import RealmTokenizer, RealmEmbedder import torch tokenizer = RealmTokenizer.from_pretrained("google/realm-cc-news-pretrained-embedder") model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) projected_score = outputs.projected_score`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RealmTokenizer, RealmEmbedder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RealmTokenizer.from_pretrained(<span class="hljs-string">&quot;google/realm-cc-news-pretrained-embedder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RealmEmbedder.from_pretrained(<span class="hljs-string">&quot;google/realm-cc-news-pretrained-embedder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>projected_score = outputs.projected_score`}}),Nt=new J({}),Ot=new z({props:{name:"class transformers.RealmScorer",anchor:"transformers.RealmScorer",parameters:[{name:"config",val:""},{name:"query_embedder",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1225",parametersDescription:[{anchor:"transformers.RealmScorer.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig">RealmConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.RealmScorer.query_embedder",description:`<strong>query_embedder</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmEmbedder">RealmEmbedder</a>) &#x2014; Embedder for input sequences. If not specified, it will use the same embedder as candidate sequences.`,name:"query_embedder"}]}}),Bt=new z({props:{name:"forward",anchor:"transformers.RealmScorer.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"candidate_input_ids",val:" = None"},{name:"candidate_attention_mask",val:" = None"},{name:"candidate_token_type_ids",val:" = None"},{name:"candidate_inputs_embeds",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1241",parametersDescription:[{anchor:"transformers.RealmScorer.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer">RealmTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RealmScorer.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RealmScorer.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RealmScorer.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RealmScorer.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RealmScorer.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RealmScorer.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RealmScorer.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RealmScorer.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RealmScorer.forward.candidate_input_ids",description:`<strong>candidate_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_candidates, sequence_length)</code>) &#x2014; Indices of candidate input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer">RealmTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"candidate_input_ids"},{anchor:"transformers.RealmScorer.forward.candidate_attention_mask",description:`<strong>candidate_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_candidates, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"candidate_attention_mask"},{anchor:"transformers.RealmScorer.forward.candidate_token_type_ids",description:`<strong>candidate_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_candidates, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"candidate_token_type_ids"},{anchor:"transformers.RealmScorer.forward.candidate_inputs_embeds",description:`<strong>candidate_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_candidates, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>candidate_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>candidate_input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"candidate_inputs_embeds"}],returnDescription:` <p>A <code>transformers.models.realm.modeling_realm.RealmScorerOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig" >RealmConfig</a>) and inputs.</p> <ul> <li><strong>relevance_score</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_candidates)</code>) \u2014 The relevance score of document candidates (before softmax).</li> <li><strong>query_score</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.retriever_proj_size)</code>) \u2014 Query score derived from the query embedder.</li> <li><strong>candidate_score</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_candidates, config.retriever_proj_size)</code>) \u2014 Candidate score derived from the embedder.</li> </ul> `,returnType:` <p><code>transformers.models.realm.modeling_realm.RealmScorerOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Ie=new Rr({props:{$$slots:{default:[op]},$$scope:{ctx:N}}}),Qt=new Xe({props:{code:`import torch from transformers import RealmTokenizer, RealmScorer tokenizer = RealmTokenizer.from_pretrained("google/realm-cc-news-pretrained-scorer") model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer", num_candidates=2) # batch_size = 2, num_candidates = 2 input_texts = ["How are you?", "What is the item in the picture?"] candidates_texts = [["Hello world!", "Nice to meet you!"], ["A cute cat.", "An adorable dog."]] inputs = tokenizer(input_texts, return_tensors="pt") candidates_inputs = tokenizer.batch_encode_candidates(candidates_texts, max_length=10, return_tensors="pt") outputs = model( **inputs, candidate_input_ids=candidates_inputs.input_ids, candidate_attention_mask=candidates_inputs.attention_mask, candidate_token_type_ids=candidates_inputs.token_type_ids, ) relevance_score = outputs.relevance_score`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RealmTokenizer, RealmScorer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RealmTokenizer.from_pretrained(<span class="hljs-string">&quot;google/realm-cc-news-pretrained-scorer&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RealmScorer.from_pretrained(<span class="hljs-string">&quot;google/realm-cc-news-pretrained-scorer&quot;</span>, num_candidates=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># batch_size = 2, num_candidates = 2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_texts = [<span class="hljs-string">&quot;How are you?&quot;</span>, <span class="hljs-string">&quot;What is the item in the picture?&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>candidates_texts = [[<span class="hljs-string">&quot;Hello world!&quot;</span>, <span class="hljs-string">&quot;Nice to meet you!&quot;</span>], [<span class="hljs-string">&quot;A cute cat.&quot;</span>, <span class="hljs-string">&quot;An adorable dog.&quot;</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(input_texts, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>candidates_inputs = tokenizer.batch_encode_candidates(candidates_texts, max_length=<span class="hljs-number">10</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model( <span class="hljs-meta">... </span> **inputs, <span class="hljs-meta">... </span> candidate_input_ids=candidates_inputs.input_ids, <span class="hljs-meta">... </span> candidate_attention_mask=candidates_inputs.attention_mask, <span class="hljs-meta">... </span> candidate_token_type_ids=candidates_inputs.token_type_ids, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>relevance_score = outputs.relevance_score`}}),Ht=new J({}),Vt=new z({props:{name:"class transformers.RealmKnowledgeAugEncoder",anchor:"transformers.RealmKnowledgeAugEncoder",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1372",parametersDescription:[{anchor:"transformers.RealmKnowledgeAugEncoder.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig">RealmConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Gt=new z({props:{name:"forward",anchor:"transformers.RealmKnowledgeAugEncoder.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"relevance_score",val:" = None"},{name:"labels",val:" = None"},{name:"mlm_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1391",parametersDescription:[{anchor:"transformers.RealmKnowledgeAugEncoder.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_candidates, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer">RealmTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_candidates, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_candidates, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_candidates, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_candidates, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.relevance_score",description:`<strong>relevance_score</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_candidates)</code>, <em>optional</em>) &#x2014; Relevance score derived from RealmScorer, must be specified if you want to compute the masked language modeling loss.`,name:"relevance_score"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.RealmKnowledgeAugEncoder.forward.mlm_mask",description:`<strong>mlm_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid calculating joint loss on certain positions. If not specified, the loss will not be masked. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"mlm_mask"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig" >RealmConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new Rr({props:{$$slots:{default:[np]},$$scope:{ctx:N}}}),Jt=new Xe({props:{code:`import torch from transformers import RealmTokenizer, RealmKnowledgeAugEncoder tokenizer = RealmTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") model = RealmKnowledgeAugEncoder.from_pretrained( "google/realm-cc-news-pretrained-encoder", num_candidates=2 ) # batch_size = 2, num_candidates = 2 text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] inputs = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RealmTokenizer, RealmKnowledgeAugEncoder <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RealmTokenizer.from_pretrained(<span class="hljs-string">&quot;google/realm-cc-news-pretrained-encoder&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RealmKnowledgeAugEncoder.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/realm-cc-news-pretrained-encoder&quot;</span>, num_candidates=<span class="hljs-number">2</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># batch_size = 2, num_candidates = 2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = [[<span class="hljs-string">&quot;Hello world!&quot;</span>, <span class="hljs-string">&quot;Nice to meet you!&quot;</span>], [<span class="hljs-string">&quot;The cute cat.&quot;</span>, <span class="hljs-string">&quot;The adorable dog.&quot;</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer.batch_encode_candidates(text, max_length=<span class="hljs-number">10</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Zt=new J({}),Yt=new z({props:{name:"class transformers.RealmReader",anchor:"transformers.RealmReader",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1520",parametersDescription:[{anchor:"transformers.RealmReader.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig">RealmConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),oo=new z({props:{name:"forward",anchor:"transformers.RealmReader.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"relevance_score",val:" = None"},{name:"block_mask",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"has_answers",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1534",parametersDescription:[{anchor:"transformers.RealmReader.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(reader_beam_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer">RealmTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RealmReader.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(reader_beam_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RealmReader.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(reader_beam_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RealmReader.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(reader_beam_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RealmReader.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RealmReader.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(reader_beam_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RealmReader.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RealmReader.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RealmReader.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RealmReader.forward.relevance_score",description:`<strong>relevance_score</strong> (<code>torch.FloatTensor</code> of shape <code>(searcher_beam_size,)</code>, <em>optional</em>) &#x2014; Relevance score, which must be specified if you want to compute the logits and marginal log loss.`,name:"relevance_score"},{anchor:"transformers.RealmReader.forward.block_mask",description:`<strong>block_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(searcher_beam_size, sequence_length)</code>, <em>optional</em>) &#x2014; The mask of the evidence block, which must be specified if you want to compute the logits and marginal log loss.`,name:"block_mask"},{anchor:"transformers.RealmReader.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(searcher_beam_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.RealmReader.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(searcher_beam_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"},{anchor:"transformers.RealmReader.forward.has_answers",description:`<strong>has_answers</strong> (<code>torch.BoolTensor</code> of shape <code>(searcher_beam_size,)</code>, <em>optional</em>) &#x2014; Whether or not the evidence block has answer(s).`,name:"has_answers"}],returnDescription:` <p>A <code>transformers.models.realm.modeling_realm.RealmReaderOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig" >RealmConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>start_positions</code>, <code>end_positions</code>, <code>has_answers</code> are provided) \u2014 Total loss.</p> </li> <li> <p><strong>retriever_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>start_positions</code>, <code>end_positions</code>, <code>has_answers</code> are provided) \u2014 Retriever loss.</p> </li> <li> <p><strong>reader_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>start_positions</code>, <code>end_positions</code>, <code>has_answers</code> are provided) \u2014 Reader loss.</p> </li> <li> <p><strong>retriever_correct</strong> (<code>torch.BoolTensor</code> of shape <code>(config.searcher_beam_size,)</code>, <em>optional</em>) \u2014 Whether or not an evidence block contains answer.</p> </li> <li> <p><strong>reader_correct</strong> (<code>torch.BoolTensor</code> of shape <code>(config.reader_beam_size, num_candidates)</code>, <em>optional</em>) \u2014 Whether or not a span candidate contains answer.</p> </li> <li> <p><strong>block_idx</strong> (<code>torch.LongTensor</code> of shape <code>()</code>) \u2014 The index of the retrieved evidence block in which the predicted answer is most likely.</p> </li> <li> <p><strong>candidate</strong> (<code>torch.LongTensor</code> of shape <code>()</code>) \u2014 The index of the retrieved span candidates in which the predicted answer is most likely.</p> </li> <li> <p><strong>start_pos</strong> (<code>torch.IntTensor</code> of shape <code>()</code>) \u2014 Predicted answer starting position in <em>RealmReader</em>\u2019s inputs.</p> </li> <li> <p><strong>end_pos:</strong> (<code>torch.IntTensor</code> of shape <code>()</code>) \u2014 Predicted answer ending position in <em>RealmReader</em>\u2019s inputs.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.realm.modeling_realm.RealmReaderOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),We=new Rr({props:{$$slots:{default:[rp]},$$scope:{ctx:N}}}),no=new J({}),ro=new z({props:{name:"class transformers.RealmForOpenQA",anchor:"transformers.RealmForOpenQA",parameters:[{name:"config",val:""},{name:"retriever",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1727",parametersDescription:[{anchor:"transformers.RealmForOpenQA.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig">RealmConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),so=new z({props:{name:"block_embedding_to",anchor:"transformers.RealmForOpenQA.block_embedding_to",parameters:[{name:"device",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1750",parametersDescription:[{anchor:"transformers.RealmForOpenQA.block_embedding_to.device",description:`<strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to which <code>self.block_emb</code> will be sent.`,name:"device"}]}}),lo=new z({props:{name:"forward",anchor:"transformers.RealmForOpenQA.forward",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"answer_ids",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/realm/modeling_realm.py#L1760",parametersDescription:[{anchor:"transformers.RealmForOpenQA.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(1, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer">RealmTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RealmForOpenQA.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(1, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RealmForOpenQA.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(1, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token (should not be used in this model by design).</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RealmForOpenQA.forward.answer_ids",description:`<strong>answer_ids</strong> (<code>list</code> of shape <code>(num_answers, answer_length)</code>, <em>optional</em>) &#x2014; Answer ids for computing the marginal log-likelihood loss. Indices should be in <code>[-1, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-1</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"answer_ids"},{anchor:"transformers.RealmForOpenQA.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.realm.modeling_realm.RealmForOpenQAOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmConfig" >RealmConfig</a>) and inputs.</p> <ul> <li><strong>reader_output</strong> (<code>dict</code>) \u2014 Reader output.</li> <li><strong>predicted_answer_ids</strong> (<code>torch.LongTensor</code> of shape <code>(answer_sequence_length)</code>) \u2014 Predicted answer ids.</li> </ul> `,returnType:` <p><code>transformers.models.realm.modeling_realm.RealmForOpenQAOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),He=new Rr({props:{$$slots:{default:[ap]},$$scope:{ctx:N}}}),co=new Xe({props:{code:`import torch from transformers import RealmForOpenQA, RealmRetriever, RealmTokenizer retriever = RealmRetriever.from_pretrained("google/realm-orqa-nq-openqa") tokenizer = RealmTokenizer.from_pretrained("google/realm-orqa-nq-openqa") model = RealmForOpenQA.from_pretrained("google/realm-orqa-nq-openqa", retriever=retriever) question = "Who is the pioneer in modern computer science?" question_ids = tokenizer([question], return_tensors="pt") answer_ids = tokenizer( ["alan mathison turing"], add_special_tokens=False, return_token_type_ids=False, return_attention_mask=False, ).input_ids reader_output, predicted_answer_ids = model(**question_ids, answer_ids=answer_ids, return_dict=False) predicted_answer = tokenizer.decode(predicted_answer_ids) loss = reader_output.loss`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RealmForOpenQA, RealmRetriever, RealmTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>retriever = RealmRetriever.from_pretrained(<span class="hljs-string">&quot;google/realm-orqa-nq-openqa&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RealmTokenizer.from_pretrained(<span class="hljs-string">&quot;google/realm-orqa-nq-openqa&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RealmForOpenQA.from_pretrained(<span class="hljs-string">&quot;google/realm-orqa-nq-openqa&quot;</span>, retriever=retriever) <span class="hljs-meta">&gt;&gt;&gt; </span>question = <span class="hljs-string">&quot;Who is the pioneer in modern computer science?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>question_ids = tokenizer([question], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>answer_ids = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;alan mathison turing&quot;</span>], <span class="hljs-meta">... </span> add_special_tokens=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> return_token_type_ids=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> return_attention_mask=<span class="hljs-literal">False</span>, <span class="hljs-meta">&gt;&gt;&gt; </span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>reader_output, predicted_answer_ids = model(**question_ids, answer_ids=answer_ids, return_dict=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_answer = tokenizer.decode(predicted_answer_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = reader_output.loss`}}),{c(){p=n("meta"),$=d(),w=n("h1"),T=n("a"),y=n("span"),f(R.$$.fragment),b=d(),q=n("span"),ya=s("REALM"),Tr=d(),se=n("h2"),ye=n("a"),rn=n("span"),f(Ge.$$.fragment),Ea=d(),an=n("span"),za=s("Overview"),$r=d(),Ee=n("p"),qa=s("The REALM model was proposed in "),Je=n("a"),xa=s("REALM: Retrieval-Augmented Language Model Pre-Training"),Aa=s(` by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang. It\u2019s a retrieval-augmented language model that firstly retrieves documents from a textual knowledge corpus and then utilizes retrieved documents to process question answering tasks.`),yr=d(),ho=n("p"),Pa=s("The abstract from the paper is the following:"),Er=d(),fo=n("p"),sn=n("em"),ja=s(`Language model pre-training has been shown to capture a surprising amount of world knowledge, crucial for NLP tasks such as question answering. However, this knowledge is stored implicitly in the parameters of a neural network, requiring ever-larger networks to cover more facts. To capture knowledge in a more modular and interpretable way, we augment language model pre-training with a latent knowledge retriever, which allows the model to retrieve and attend over documents from a large corpus such as Wikipedia, used during pre-training, fine-tuning and inference. For the first time, we show how to pre-train such a knowledge retriever in an unsupervised manner, using masked language modeling as the learning signal and backpropagating through a retrieval step that considers millions of documents. We demonstrate the effectiveness of Retrieval-Augmented Language Model pre-training (REALM) by fine-tuning on the challenging task of Open-domain Question Answering (Open-QA). We compare against state-of-the-art models for both explicit and implicit knowledge storage on three popular Open-QA benchmarks, and find that we outperform all previous methods by a significant margin (4-16% absolute accuracy), while also providing qualitative benefits such as interpretability and modularity.`),zr=d(),Z=n("p"),La=s("This model was contributed by "),Ze=n("a"),Ma=s("qqaatw"),Fa=s(`. The original code can be found `),Ye=n("a"),Sa=s("here"),Ca=s("."),qr=d(),ie=n("h2"),ze=n("a"),dn=n("span"),f(et.$$.fragment),Ia=d(),ln=n("span"),Da=s("RealmConfig"),xr=d(),x=n("div"),f(tt.$$.fragment),Na=d(),cn=n("p"),Oa=s("This is the configuration class to store the configuration of"),Wa=d(),L=n("ol"),mn=n("li"),uo=n("a"),Ka=s("RealmEmbedder"),Ba=d(),pn=n("li"),go=n("a"),Qa=s("RealmScorer"),Ha=d(),hn=n("li"),_o=n("a"),Va=s("RealmKnowledgeAugEncoder"),Ua=d(),fn=n("li"),ko=n("a"),Xa=s("RealmRetriever"),Ga=d(),un=n("li"),vo=n("a"),Ja=s("RealmReader"),Za=d(),gn=n("li"),wo=n("a"),Ya=s("RealmForOpenQA"),es=d(),ot=n("p"),ts=s(`It is used to instantiate an REALM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the REALM `),nt=n("a"),os=s("realm-cc-news-pretrained"),ns=s(" architecture."),rs=d(),de=n("p"),as=s("Configuration objects inherit from "),bo=n("a"),ss=s("PretrainedConfig"),is=s(` and can be used to control the model outputs. Read the documentation from `),Ro=n("a"),ds=s("PretrainedConfig"),ls=s(" for more information."),cs=d(),_n=n("p"),ms=s("Example:"),ps=d(),f(rt.$$.fragment),Ar=d(),le=n("h2"),qe=n("a"),kn=n("span"),f(at.$$.fragment),hs=d(),vn=n("span"),fs=s("RealmTokenizer"),Pr=d(),E=n("div"),f(st.$$.fragment),us=d(),wn=n("p"),gs=s("Construct a REALM tokenizer."),_s=d(),xe=n("p"),To=n("a"),ks=s("RealmTokenizer"),vs=s(" is identical to "),$o=n("a"),ws=s("BertTokenizer"),bs=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),Rs=d(),it=n("p"),Ts=s("This tokenizer inherits from "),yo=n("a"),$s=s("PreTrainedTokenizer"),ys=s(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Es=d(),Y=n("div"),f(dt.$$.fragment),zs=d(),bn=n("p"),qs=s(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format:`),xs=d(),lt=n("ul"),Eo=n("li"),As=s("single sequence: "),Rn=n("code"),Ps=s("[CLS] X [SEP]"),js=d(),zo=n("li"),Ls=s("pair of sequences: "),Tn=n("code"),Ms=s("[CLS] A [SEP] B [SEP]"),Fs=d(),Ae=n("div"),f(ct.$$.fragment),Ss=d(),mt=n("p"),Cs=s(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),$n=n("code"),Is=s("prepare_for_model"),Ds=s(" method."),Ns=d(),W=n("div"),f(pt.$$.fragment),Os=d(),yn=n("p"),Ws=s(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A REALM sequence pair mask has the following format:`),Ks=d(),f(ht.$$.fragment),Bs=d(),ce=n("p"),Qs=s("If "),En=n("code"),Hs=s("token_ids_1"),Vs=s(" is "),zn=n("code"),Us=s("None"),Xs=s(", this method only returns the first portion of the mask (0s)."),Gs=d(),qo=n("div"),f(ft.$$.fragment),Js=d(),P=n("div"),f(ut.$$.fragment),Zs=d(),gt=n("p"),Ys=s("Encode a batch of text or text pair. This method is similar to regular "),qn=n("strong"),ei=s("call"),ti=s(` method but has the following differences:`),oi=d(),me=n("ol"),xn=n("li"),ni=s("Handle additional num_candidate axis. (batch_size, num_candidates, text)"),ri=d(),_t=n("li"),ai=s("Always pad the sequences to "),An=n("em"),si=s("max_length"),ii=s("."),di=d(),kt=n("li"),li=s("Must specify "),Pn=n("em"),ci=s("max_length"),mi=s(" in order to stack packs of candidates into a batch."),pi=d(),vt=n("ul"),xo=n("li"),hi=s("single sequence: "),jn=n("code"),fi=s("[CLS] X [SEP]"),ui=d(),Ao=n("li"),gi=s("pair of sequences: "),Ln=n("code"),_i=s("[CLS] A [SEP] B [SEP]"),ki=d(),Mn=n("p"),vi=s("Example:"),wi=d(),f(wt.$$.fragment),jr=d(),pe=n("h2"),Pe=n("a"),Fn=n("span"),f(bt.$$.fragment),bi=d(),Sn=n("span"),Ri=s("RealmTokenizerFast"),Lr=d(),M=n("div"),f(Rt.$$.fragment),Ti=d(),Tt=n("p"),$i=s("Construct a \u201Cfast\u201D REALM tokenizer (backed by HuggingFace\u2019s "),Cn=n("em"),yi=s("tokenizers"),Ei=s(" library). Based on WordPiece."),zi=d(),je=n("p"),Po=n("a"),qi=s("RealmTokenizerFast"),xi=s(" is identical to "),jo=n("a"),Ai=s("BertTokenizerFast"),Pi=s(` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),ji=d(),$t=n("p"),Li=s("This tokenizer inherits from "),Lo=n("a"),Mi=s("PreTrainedTokenizerFast"),Fi=s(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Si=d(),j=n("div"),f(yt.$$.fragment),Ci=d(),Et=n("p"),Ii=s("Encode a batch of text or text pair. This method is similar to regular "),In=n("strong"),Di=s("call"),Ni=s(` method but has the following differences:`),Oi=d(),he=n("ol"),Dn=n("li"),Wi=s("Handle additional num_candidate axis. (batch_size, num_candidates, text)"),Ki=d(),zt=n("li"),Bi=s("Always pad the sequences to "),Nn=n("em"),Qi=s("max_length"),Hi=s("."),Vi=d(),qt=n("li"),Ui=s("Must specify "),On=n("em"),Xi=s("max_length"),Gi=s(" in order to stack packs of candidates into a batch."),Ji=d(),xt=n("ul"),Mo=n("li"),Zi=s("single sequence: "),Wn=n("code"),Yi=s("[CLS] X [SEP]"),ed=d(),Fo=n("li"),td=s("pair of sequences: "),Kn=n("code"),od=s("[CLS] A [SEP] B [SEP]"),nd=d(),Bn=n("p"),rd=s("Example:"),ad=d(),f(At.$$.fragment),Mr=d(),fe=n("h2"),Le=n("a"),Qn=n("span"),f(Pt.$$.fragment),sd=d(),Hn=n("span"),id=s("RealmRetriever"),Fr=d(),H=n("div"),f(jt.$$.fragment),dd=d(),Vn=n("p"),ld=s(`The retriever of REALM outputting the retrieved evidence block and whether the block has answers as well as answer positions.\u201D`),cd=d(),Me=n("div"),f(Lt.$$.fragment),md=d(),Un=n("p"),pd=s("check if retrieved_blocks has answers."),Sr=d(),ue=n("h2"),Fe=n("a"),Xn=n("span"),f(Mt.$$.fragment),hd=d(),Gn=n("span"),fd=s("RealmEmbedder"),Cr=d(),V=n("div"),f(Ft.$$.fragment),ud=d(),St=n("p"),gd=s(`The embedder of REALM outputting projected score that will be used to calculate relevance score. This model is a PyTorch `),Ct=n("a"),_d=s("torch.nn.Module"),kd=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vd=d(),F=n("div"),f(It.$$.fragment),wd=d(),ge=n("p"),bd=s("The "),So=n("a"),Rd=s("RealmEmbedder"),Td=s(" forward method, overrides the "),Jn=n("code"),$d=s("__call__"),yd=s(" special method."),Ed=d(),f(Se.$$.fragment),zd=d(),Zn=n("p"),qd=s("Example:"),xd=d(),f(Dt.$$.fragment),Ir=d(),_e=n("h2"),Ce=n("a"),Yn=n("span"),f(Nt.$$.fragment),Ad=d(),er=n("span"),Pd=s("RealmScorer"),Dr=d(),U=n("div"),f(Ot.$$.fragment),jd=d(),Wt=n("p"),Ld=s(`The scorer of REALM outputting relevance scores representing the score of document candidates (before softmax). This model is a PyTorch `),Kt=n("a"),Md=s("torch.nn.Module"),Fd=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sd=d(),S=n("div"),f(Bt.$$.fragment),Cd=d(),ke=n("p"),Id=s("The "),Co=n("a"),Dd=s("RealmScorer"),Nd=s(" forward method, overrides the "),tr=n("code"),Od=s("__call__"),Wd=s(" special method."),Kd=d(),f(Ie.$$.fragment),Bd=d(),or=n("p"),Qd=s("Example:"),Hd=d(),f(Qt.$$.fragment),Nr=d(),ve=n("h2"),De=n("a"),nr=n("span"),f(Ht.$$.fragment),Vd=d(),rr=n("span"),Ud=s("RealmKnowledgeAugEncoder"),Or=d(),X=n("div"),f(Vt.$$.fragment),Xd=d(),Ut=n("p"),Gd=s(`The knowledge-augmented encoder of REALM outputting masked language model logits and marginal log-likelihood loss. This model is a PyTorch `),Xt=n("a"),Jd=s("torch.nn.Module"),Zd=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yd=d(),C=n("div"),f(Gt.$$.fragment),el=d(),we=n("p"),tl=s("The "),Io=n("a"),ol=s("RealmKnowledgeAugEncoder"),nl=s(" forward method, overrides the "),ar=n("code"),rl=s("__call__"),al=s(" special method."),sl=d(),f(Ne.$$.fragment),il=d(),sr=n("p"),dl=s("Example:"),ll=d(),f(Jt.$$.fragment),Wr=d(),be=n("h2"),Oe=n("a"),ir=n("span"),f(Zt.$$.fragment),cl=d(),dr=n("span"),ml=s("RealmReader"),Kr=d(),G=n("div"),f(Yt.$$.fragment),pl=d(),eo=n("p"),hl=s(`The reader of REALM. This model is a PyTorch `),to=n("a"),fl=s("torch.nn.Module"),ul=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gl=d(),ee=n("div"),f(oo.$$.fragment),_l=d(),Re=n("p"),kl=s("The "),Do=n("a"),vl=s("RealmReader"),wl=s(" forward method, overrides the "),lr=n("code"),bl=s("__call__"),Rl=s(" special method."),Tl=d(),f(We.$$.fragment),Br=d(),Te=n("h2"),Ke=n("a"),cr=n("span"),f(no.$$.fragment),$l=d(),mr=n("span"),yl=s("RealmForOpenQA"),Qr=d(),O=n("div"),f(ro.$$.fragment),El=d(),Be=n("p"),pr=n("code"),zl=s("RealmForOpenQA"),ql=s(` for end-to-end open domain question answering. This model is a PyTorch `),ao=n("a"),xl=s("torch.nn.Module"),Al=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pl=d(),Qe=n("div"),f(so.$$.fragment),jl=d(),io=n("p"),Ll=s("Send "),hr=n("code"),Ml=s("self.block_emb"),Fl=s(" to a specific device."),Sl=d(),I=n("div"),f(lo.$$.fragment),Cl=d(),$e=n("p"),Il=s("The "),No=n("a"),Dl=s("RealmForOpenQA"),Nl=s(" forward method, overrides the "),fr=n("code"),Ol=s("__call__"),Wl=s(" special method."),Kl=d(),f(He.$$.fragment),Bl=d(),ur=n("p"),Ql=s("Example:"),Hl=d(),f(co.$$.fragment),this.h()},l(t){const m=ep('[data-svelte="svelte-1phssyn"]',document.head);p=r(m,"META",{name:!0,content:!0}),m.forEach(o),$=l(t),w=r(t,"H1",{class:!0});var mo=a(w);T=r(mo,"A",{id:!0,class:!0,href:!0});var gr=a(T);y=r(gr,"SPAN",{});var _r=a(y);u(R.$$.fragment,_r),_r.forEach(o),gr.forEach(o),b=l(mo),q=r(mo,"SPAN",{});var kr=a(q);ya=i(kr,"REALM"),kr.forEach(o),mo.forEach(o),Tr=l(t),se=r(t,"H2",{class:!0});var po=a(se);ye=r(po,"A",{id:!0,class:!0,href:!0});var Yl=a(ye);rn=r(Yl,"SPAN",{});var ec=a(rn);u(Ge.$$.fragment,ec),ec.forEach(o),Yl.forEach(o),Ea=l(po),an=r(po,"SPAN",{});var tc=a(an);za=i(tc,"Overview"),tc.forEach(o),po.forEach(o),$r=l(t),Ee=r(t,"P",{});var Vr=a(Ee);qa=i(Vr,"The REALM model was proposed in "),Je=r(Vr,"A",{href:!0,rel:!0});var oc=a(Je);xa=i(oc,"REALM: Retrieval-Augmented Language Model Pre-Training"),oc.forEach(o),Aa=i(Vr,` by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang. It\u2019s a retrieval-augmented language model that firstly retrieves documents from a textual knowledge corpus and then utilizes retrieved documents to process question answering tasks.`),Vr.forEach(o),yr=l(t),ho=r(t,"P",{});var nc=a(ho);Pa=i(nc,"The abstract from the paper is the following:"),nc.forEach(o),Er=l(t),fo=r(t,"P",{});var rc=a(fo);sn=r(rc,"EM",{});var ac=a(sn);ja=i(ac,`Language model pre-training has been shown to capture a surprising amount of world knowledge, crucial for NLP tasks such as question answering. However, this knowledge is stored implicitly in the parameters of a neural network, requiring ever-larger networks to cover more facts. To capture knowledge in a more modular and interpretable way, we augment language model pre-training with a latent knowledge retriever, which allows the model to retrieve and attend over documents from a large corpus such as Wikipedia, used during pre-training, fine-tuning and inference. For the first time, we show how to pre-train such a knowledge retriever in an unsupervised manner, using masked language modeling as the learning signal and backpropagating through a retrieval step that considers millions of documents. We demonstrate the effectiveness of Retrieval-Augmented Language Model pre-training (REALM) by fine-tuning on the challenging task of Open-domain Question Answering (Open-QA). We compare against state-of-the-art models for both explicit and implicit knowledge storage on three popular Open-QA benchmarks, and find that we outperform all previous methods by a significant margin (4-16% absolute accuracy), while also providing qualitative benefits such as interpretability and modularity.`),ac.forEach(o),rc.forEach(o),zr=l(t),Z=r(t,"P",{});var Oo=a(Z);La=i(Oo,"This model was contributed by "),Ze=r(Oo,"A",{href:!0,rel:!0});var sc=a(Ze);Ma=i(sc,"qqaatw"),sc.forEach(o),Fa=i(Oo,`. The original code can be found `),Ye=r(Oo,"A",{href:!0,rel:!0});var ic=a(Ye);Sa=i(ic,"here"),ic.forEach(o),Ca=i(Oo,"."),Oo.forEach(o),qr=l(t),ie=r(t,"H2",{class:!0});var Ur=a(ie);ze=r(Ur,"A",{id:!0,class:!0,href:!0});var dc=a(ze);dn=r(dc,"SPAN",{});var lc=a(dn);u(et.$$.fragment,lc),lc.forEach(o),dc.forEach(o),Ia=l(Ur),ln=r(Ur,"SPAN",{});var cc=a(ln);Da=i(cc,"RealmConfig"),cc.forEach(o),Ur.forEach(o),xr=l(t),x=r(t,"DIV",{class:!0});var D=a(x);u(tt.$$.fragment,D),Na=l(D),cn=r(D,"P",{});var mc=a(cn);Oa=i(mc,"This is the configuration class to store the configuration of"),mc.forEach(o),Wa=l(D),L=r(D,"OL",{});var K=a(L);mn=r(K,"LI",{});var pc=a(mn);uo=r(pc,"A",{href:!0});var hc=a(uo);Ka=i(hc,"RealmEmbedder"),hc.forEach(o),pc.forEach(o),Ba=l(K),pn=r(K,"LI",{});var fc=a(pn);go=r(fc,"A",{href:!0});var uc=a(go);Qa=i(uc,"RealmScorer"),uc.forEach(o),fc.forEach(o),Ha=l(K),hn=r(K,"LI",{});var gc=a(hn);_o=r(gc,"A",{href:!0});var _c=a(_o);Va=i(_c,"RealmKnowledgeAugEncoder"),_c.forEach(o),gc.forEach(o),Ua=l(K),fn=r(K,"LI",{});var kc=a(fn);ko=r(kc,"A",{href:!0});var vc=a(ko);Xa=i(vc,"RealmRetriever"),vc.forEach(o),kc.forEach(o),Ga=l(K),un=r(K,"LI",{});var wc=a(un);vo=r(wc,"A",{href:!0});var bc=a(vo);Ja=i(bc,"RealmReader"),bc.forEach(o),wc.forEach(o),Za=l(K),gn=r(K,"LI",{});var Rc=a(gn);wo=r(Rc,"A",{href:!0});var Tc=a(wo);Ya=i(Tc,"RealmForOpenQA"),Tc.forEach(o),Rc.forEach(o),K.forEach(o),es=l(D),ot=r(D,"P",{});var Xr=a(ot);ts=i(Xr,`It is used to instantiate an REALM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the REALM `),nt=r(Xr,"A",{href:!0,rel:!0});var $c=a(nt);os=i($c,"realm-cc-news-pretrained"),$c.forEach(o),ns=i(Xr," architecture."),Xr.forEach(o),rs=l(D),de=r(D,"P",{});var Wo=a(de);as=i(Wo,"Configuration objects inherit from "),bo=r(Wo,"A",{href:!0});var yc=a(bo);ss=i(yc,"PretrainedConfig"),yc.forEach(o),is=i(Wo,` and can be used to control the model outputs. Read the documentation from `),Ro=r(Wo,"A",{href:!0});var Ec=a(Ro);ds=i(Ec,"PretrainedConfig"),Ec.forEach(o),ls=i(Wo," for more information."),Wo.forEach(o),cs=l(D),_n=r(D,"P",{});var zc=a(_n);ms=i(zc,"Example:"),zc.forEach(o),ps=l(D),u(rt.$$.fragment,D),D.forEach(o),Ar=l(t),le=r(t,"H2",{class:!0});var Gr=a(le);qe=r(Gr,"A",{id:!0,class:!0,href:!0});var qc=a(qe);kn=r(qc,"SPAN",{});var xc=a(kn);u(at.$$.fragment,xc),xc.forEach(o),qc.forEach(o),hs=l(Gr),vn=r(Gr,"SPAN",{});var Ac=a(vn);fs=i(Ac,"RealmTokenizer"),Ac.forEach(o),Gr.forEach(o),Pr=l(t),E=r(t,"DIV",{class:!0});var A=a(E);u(st.$$.fragment,A),us=l(A),wn=r(A,"P",{});var Pc=a(wn);gs=i(Pc,"Construct a REALM tokenizer."),Pc.forEach(o),_s=l(A),xe=r(A,"P",{});var vr=a(xe);To=r(vr,"A",{href:!0});var jc=a(To);ks=i(jc,"RealmTokenizer"),jc.forEach(o),vs=i(vr," is identical to "),$o=r(vr,"A",{href:!0});var Lc=a($o);ws=i(Lc,"BertTokenizer"),Lc.forEach(o),bs=i(vr,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),vr.forEach(o),Rs=l(A),it=r(A,"P",{});var Jr=a(it);Ts=i(Jr,"This tokenizer inherits from "),yo=r(Jr,"A",{href:!0});var Mc=a(yo);$s=i(Mc,"PreTrainedTokenizer"),Mc.forEach(o),ys=i(Jr,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Jr.forEach(o),Es=l(A),Y=r(A,"DIV",{class:!0});var Ko=a(Y);u(dt.$$.fragment,Ko),zs=l(Ko),bn=r(Ko,"P",{});var Fc=a(bn);qs=i(Fc,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format:`),Fc.forEach(o),xs=l(Ko),lt=r(Ko,"UL",{});var Zr=a(lt);Eo=r(Zr,"LI",{});var Vl=a(Eo);As=i(Vl,"single sequence: "),Rn=r(Vl,"CODE",{});var Sc=a(Rn);Ps=i(Sc,"[CLS] X [SEP]"),Sc.forEach(o),Vl.forEach(o),js=l(Zr),zo=r(Zr,"LI",{});var Ul=a(zo);Ls=i(Ul,"pair of sequences: "),Tn=r(Ul,"CODE",{});var Cc=a(Tn);Ms=i(Cc,"[CLS] A [SEP] B [SEP]"),Cc.forEach(o),Ul.forEach(o),Zr.forEach(o),Ko.forEach(o),Fs=l(A),Ae=r(A,"DIV",{class:!0});var Yr=a(Ae);u(ct.$$.fragment,Yr),Ss=l(Yr),mt=r(Yr,"P",{});var ea=a(mt);Cs=i(ea,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),$n=r(ea,"CODE",{});var Ic=a($n);Is=i(Ic,"prepare_for_model"),Ic.forEach(o),Ds=i(ea," method."),ea.forEach(o),Yr.forEach(o),Ns=l(A),W=r(A,"DIV",{class:!0});var Ve=a(W);u(pt.$$.fragment,Ve),Os=l(Ve),yn=r(Ve,"P",{});var Dc=a(yn);Ws=i(Dc,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. A REALM sequence pair mask has the following format:`),Dc.forEach(o),Ks=l(Ve),u(ht.$$.fragment,Ve),Bs=l(Ve),ce=r(Ve,"P",{});var Bo=a(ce);Qs=i(Bo,"If "),En=r(Bo,"CODE",{});var Nc=a(En);Hs=i(Nc,"token_ids_1"),Nc.forEach(o),Vs=i(Bo," is "),zn=r(Bo,"CODE",{});var Oc=a(zn);Us=i(Oc,"None"),Oc.forEach(o),Xs=i(Bo,", this method only returns the first portion of the mask (0s)."),Bo.forEach(o),Ve.forEach(o),Gs=l(A),qo=r(A,"DIV",{class:!0});var Wc=a(qo);u(ft.$$.fragment,Wc),Wc.forEach(o),Js=l(A),P=r(A,"DIV",{class:!0});var B=a(P);u(ut.$$.fragment,B),Zs=l(B),gt=r(B,"P",{});var ta=a(gt);Ys=i(ta,"Encode a batch of text or text pair. This method is similar to regular "),qn=r(ta,"STRONG",{});var Kc=a(qn);ei=i(Kc,"call"),Kc.forEach(o),ti=i(ta,` method but has the following differences:`),ta.forEach(o),oi=l(B),me=r(B,"OL",{});var Qo=a(me);xn=r(Qo,"LI",{});var Bc=a(xn);ni=i(Bc,"Handle additional num_candidate axis. (batch_size, num_candidates, text)"),Bc.forEach(o),ri=l(Qo),_t=r(Qo,"LI",{});var oa=a(_t);ai=i(oa,"Always pad the sequences to "),An=r(oa,"EM",{});var Qc=a(An);si=i(Qc,"max_length"),Qc.forEach(o),ii=i(oa,"."),oa.forEach(o),di=l(Qo),kt=r(Qo,"LI",{});var na=a(kt);li=i(na,"Must specify "),Pn=r(na,"EM",{});var Hc=a(Pn);ci=i(Hc,"max_length"),Hc.forEach(o),mi=i(na," in order to stack packs of candidates into a batch."),na.forEach(o),Qo.forEach(o),pi=l(B),vt=r(B,"UL",{});var ra=a(vt);xo=r(ra,"LI",{});var Xl=a(xo);hi=i(Xl,"single sequence: "),jn=r(Xl,"CODE",{});var Vc=a(jn);fi=i(Vc,"[CLS] X [SEP]"),Vc.forEach(o),Xl.forEach(o),ui=l(ra),Ao=r(ra,"LI",{});var Gl=a(Ao);gi=i(Gl,"pair of sequences: "),Ln=r(Gl,"CODE",{});var Uc=a(Ln);_i=i(Uc,"[CLS] A [SEP] B [SEP]"),Uc.forEach(o),Gl.forEach(o),ra.forEach(o),ki=l(B),Mn=r(B,"P",{});var Xc=a(Mn);vi=i(Xc,"Example:"),Xc.forEach(o),wi=l(B),u(wt.$$.fragment,B),B.forEach(o),A.forEach(o),jr=l(t),pe=r(t,"H2",{class:!0});var aa=a(pe);Pe=r(aa,"A",{id:!0,class:!0,href:!0});var Gc=a(Pe);Fn=r(Gc,"SPAN",{});var Jc=a(Fn);u(bt.$$.fragment,Jc),Jc.forEach(o),Gc.forEach(o),bi=l(aa),Sn=r(aa,"SPAN",{});var Zc=a(Sn);Ri=i(Zc,"RealmTokenizerFast"),Zc.forEach(o),aa.forEach(o),Lr=l(t),M=r(t,"DIV",{class:!0});var te=a(M);u(Rt.$$.fragment,te),Ti=l(te),Tt=r(te,"P",{});var sa=a(Tt);$i=i(sa,"Construct a \u201Cfast\u201D REALM tokenizer (backed by HuggingFace\u2019s "),Cn=r(sa,"EM",{});var Yc=a(Cn);yi=i(Yc,"tokenizers"),Yc.forEach(o),Ei=i(sa," library). Based on WordPiece."),sa.forEach(o),zi=l(te),je=r(te,"P",{});var wr=a(je);Po=r(wr,"A",{href:!0});var em=a(Po);qi=i(em,"RealmTokenizerFast"),em.forEach(o),xi=i(wr," is identical to "),jo=r(wr,"A",{href:!0});var tm=a(jo);Ai=i(tm,"BertTokenizerFast"),tm.forEach(o),Pi=i(wr,` and runs end-to-end tokenization: punctuation splitting and wordpiece.`),wr.forEach(o),ji=l(te),$t=r(te,"P",{});var ia=a($t);Li=i(ia,"This tokenizer inherits from "),Lo=r(ia,"A",{href:!0});var om=a(Lo);Mi=i(om,"PreTrainedTokenizerFast"),om.forEach(o),Fi=i(ia,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ia.forEach(o),Si=l(te),j=r(te,"DIV",{class:!0});var Q=a(j);u(yt.$$.fragment,Q),Ci=l(Q),Et=r(Q,"P",{});var da=a(Et);Ii=i(da,"Encode a batch of text or text pair. This method is similar to regular "),In=r(da,"STRONG",{});var nm=a(In);Di=i(nm,"call"),nm.forEach(o),Ni=i(da,` method but has the following differences:`),da.forEach(o),Oi=l(Q),he=r(Q,"OL",{});var Ho=a(he);Dn=r(Ho,"LI",{});var rm=a(Dn);Wi=i(rm,"Handle additional num_candidate axis. (batch_size, num_candidates, text)"),rm.forEach(o),Ki=l(Ho),zt=r(Ho,"LI",{});var la=a(zt);Bi=i(la,"Always pad the sequences to "),Nn=r(la,"EM",{});var am=a(Nn);Qi=i(am,"max_length"),am.forEach(o),Hi=i(la,"."),la.forEach(o),Vi=l(Ho),qt=r(Ho,"LI",{});var ca=a(qt);Ui=i(ca,"Must specify "),On=r(ca,"EM",{});var sm=a(On);Xi=i(sm,"max_length"),sm.forEach(o),Gi=i(ca," in order to stack packs of candidates into a batch."),ca.forEach(o),Ho.forEach(o),Ji=l(Q),xt=r(Q,"UL",{});var ma=a(xt);Mo=r(ma,"LI",{});var Jl=a(Mo);Zi=i(Jl,"single sequence: "),Wn=r(Jl,"CODE",{});var im=a(Wn);Yi=i(im,"[CLS] X [SEP]"),im.forEach(o),Jl.forEach(o),ed=l(ma),Fo=r(ma,"LI",{});var Zl=a(Fo);td=i(Zl,"pair of sequences: "),Kn=r(Zl,"CODE",{});var dm=a(Kn);od=i(dm,"[CLS] A [SEP] B [SEP]"),dm.forEach(o),Zl.forEach(o),ma.forEach(o),nd=l(Q),Bn=r(Q,"P",{});var lm=a(Bn);rd=i(lm,"Example:"),lm.forEach(o),ad=l(Q),u(At.$$.fragment,Q),Q.forEach(o),te.forEach(o),Mr=l(t),fe=r(t,"H2",{class:!0});var pa=a(fe);Le=r(pa,"A",{id:!0,class:!0,href:!0});var cm=a(Le);Qn=r(cm,"SPAN",{});var mm=a(Qn);u(Pt.$$.fragment,mm),mm.forEach(o),cm.forEach(o),sd=l(pa),Hn=r(pa,"SPAN",{});var pm=a(Hn);id=i(pm,"RealmRetriever"),pm.forEach(o),pa.forEach(o),Fr=l(t),H=r(t,"DIV",{class:!0});var Vo=a(H);u(jt.$$.fragment,Vo),dd=l(Vo),Vn=r(Vo,"P",{});var hm=a(Vn);ld=i(hm,`The retriever of REALM outputting the retrieved evidence block and whether the block has answers as well as answer positions.\u201D`),hm.forEach(o),cd=l(Vo),Me=r(Vo,"DIV",{class:!0});var ha=a(Me);u(Lt.$$.fragment,ha),md=l(ha),Un=r(ha,"P",{});var fm=a(Un);pd=i(fm,"check if retrieved_blocks has answers."),fm.forEach(o),ha.forEach(o),Vo.forEach(o),Sr=l(t),ue=r(t,"H2",{class:!0});var fa=a(ue);Fe=r(fa,"A",{id:!0,class:!0,href:!0});var um=a(Fe);Xn=r(um,"SPAN",{});var gm=a(Xn);u(Mt.$$.fragment,gm),gm.forEach(o),um.forEach(o),hd=l(fa),Gn=r(fa,"SPAN",{});var _m=a(Gn);fd=i(_m,"RealmEmbedder"),_m.forEach(o),fa.forEach(o),Cr=l(t),V=r(t,"DIV",{class:!0});var Uo=a(V);u(Ft.$$.fragment,Uo),ud=l(Uo),St=r(Uo,"P",{});var ua=a(St);gd=i(ua,`The embedder of REALM outputting projected score that will be used to calculate relevance score. This model is a PyTorch `),Ct=r(ua,"A",{href:!0,rel:!0});var km=a(Ct);_d=i(km,"torch.nn.Module"),km.forEach(o),kd=i(ua,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ua.forEach(o),vd=l(Uo),F=r(Uo,"DIV",{class:!0});var oe=a(F);u(It.$$.fragment,oe),wd=l(oe),ge=r(oe,"P",{});var Xo=a(ge);bd=i(Xo,"The "),So=r(Xo,"A",{href:!0});var vm=a(So);Rd=i(vm,"RealmEmbedder"),vm.forEach(o),Td=i(Xo," forward method, overrides the "),Jn=r(Xo,"CODE",{});var wm=a(Jn);$d=i(wm,"__call__"),wm.forEach(o),yd=i(Xo," special method."),Xo.forEach(o),Ed=l(oe),u(Se.$$.fragment,oe),zd=l(oe),Zn=r(oe,"P",{});var bm=a(Zn);qd=i(bm,"Example:"),bm.forEach(o),xd=l(oe),u(Dt.$$.fragment,oe),oe.forEach(o),Uo.forEach(o),Ir=l(t),_e=r(t,"H2",{class:!0});var ga=a(_e);Ce=r(ga,"A",{id:!0,class:!0,href:!0});var Rm=a(Ce);Yn=r(Rm,"SPAN",{});var Tm=a(Yn);u(Nt.$$.fragment,Tm),Tm.forEach(o),Rm.forEach(o),Ad=l(ga),er=r(ga,"SPAN",{});var $m=a(er);Pd=i($m,"RealmScorer"),$m.forEach(o),ga.forEach(o),Dr=l(t),U=r(t,"DIV",{class:!0});var Go=a(U);u(Ot.$$.fragment,Go),jd=l(Go),Wt=r(Go,"P",{});var _a=a(Wt);Ld=i(_a,`The scorer of REALM outputting relevance scores representing the score of document candidates (before softmax). This model is a PyTorch `),Kt=r(_a,"A",{href:!0,rel:!0});var ym=a(Kt);Md=i(ym,"torch.nn.Module"),ym.forEach(o),Fd=i(_a,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_a.forEach(o),Sd=l(Go),S=r(Go,"DIV",{class:!0});var ne=a(S);u(Bt.$$.fragment,ne),Cd=l(ne),ke=r(ne,"P",{});var Jo=a(ke);Id=i(Jo,"The "),Co=r(Jo,"A",{href:!0});var Em=a(Co);Dd=i(Em,"RealmScorer"),Em.forEach(o),Nd=i(Jo," forward method, overrides the "),tr=r(Jo,"CODE",{});var zm=a(tr);Od=i(zm,"__call__"),zm.forEach(o),Wd=i(Jo," special method."),Jo.forEach(o),Kd=l(ne),u(Ie.$$.fragment,ne),Bd=l(ne),or=r(ne,"P",{});var qm=a(or);Qd=i(qm,"Example:"),qm.forEach(o),Hd=l(ne),u(Qt.$$.fragment,ne),ne.forEach(o),Go.forEach(o),Nr=l(t),ve=r(t,"H2",{class:!0});var ka=a(ve);De=r(ka,"A",{id:!0,class:!0,href:!0});var xm=a(De);nr=r(xm,"SPAN",{});var Am=a(nr);u(Ht.$$.fragment,Am),Am.forEach(o),xm.forEach(o),Vd=l(ka),rr=r(ka,"SPAN",{});var Pm=a(rr);Ud=i(Pm,"RealmKnowledgeAugEncoder"),Pm.forEach(o),ka.forEach(o),Or=l(t),X=r(t,"DIV",{class:!0});var Zo=a(X);u(Vt.$$.fragment,Zo),Xd=l(Zo),Ut=r(Zo,"P",{});var va=a(Ut);Gd=i(va,`The knowledge-augmented encoder of REALM outputting masked language model logits and marginal log-likelihood loss. This model is a PyTorch `),Xt=r(va,"A",{href:!0,rel:!0});var jm=a(Xt);Jd=i(jm,"torch.nn.Module"),jm.forEach(o),Zd=i(va,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),va.forEach(o),Yd=l(Zo),C=r(Zo,"DIV",{class:!0});var re=a(C);u(Gt.$$.fragment,re),el=l(re),we=r(re,"P",{});var Yo=a(we);tl=i(Yo,"The "),Io=r(Yo,"A",{href:!0});var Lm=a(Io);ol=i(Lm,"RealmKnowledgeAugEncoder"),Lm.forEach(o),nl=i(Yo," forward method, overrides the "),ar=r(Yo,"CODE",{});var Mm=a(ar);rl=i(Mm,"__call__"),Mm.forEach(o),al=i(Yo," special method."),Yo.forEach(o),sl=l(re),u(Ne.$$.fragment,re),il=l(re),sr=r(re,"P",{});var Fm=a(sr);dl=i(Fm,"Example:"),Fm.forEach(o),ll=l(re),u(Jt.$$.fragment,re),re.forEach(o),Zo.forEach(o),Wr=l(t),be=r(t,"H2",{class:!0});var wa=a(be);Oe=r(wa,"A",{id:!0,class:!0,href:!0});var Sm=a(Oe);ir=r(Sm,"SPAN",{});var Cm=a(ir);u(Zt.$$.fragment,Cm),Cm.forEach(o),Sm.forEach(o),cl=l(wa),dr=r(wa,"SPAN",{});var Im=a(dr);ml=i(Im,"RealmReader"),Im.forEach(o),wa.forEach(o),Kr=l(t),G=r(t,"DIV",{class:!0});var en=a(G);u(Yt.$$.fragment,en),pl=l(en),eo=r(en,"P",{});var ba=a(eo);hl=i(ba,`The reader of REALM. This model is a PyTorch `),to=r(ba,"A",{href:!0,rel:!0});var Dm=a(to);fl=i(Dm,"torch.nn.Module"),Dm.forEach(o),ul=i(ba,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ba.forEach(o),gl=l(en),ee=r(en,"DIV",{class:!0});var tn=a(ee);u(oo.$$.fragment,tn),_l=l(tn),Re=r(tn,"P",{});var on=a(Re);kl=i(on,"The "),Do=r(on,"A",{href:!0});var Nm=a(Do);vl=i(Nm,"RealmReader"),Nm.forEach(o),wl=i(on," forward method, overrides the "),lr=r(on,"CODE",{});var Om=a(lr);bl=i(Om,"__call__"),Om.forEach(o),Rl=i(on," special method."),on.forEach(o),Tl=l(tn),u(We.$$.fragment,tn),tn.forEach(o),en.forEach(o),Br=l(t),Te=r(t,"H2",{class:!0});var Ra=a(Te);Ke=r(Ra,"A",{id:!0,class:!0,href:!0});var Wm=a(Ke);cr=r(Wm,"SPAN",{});var Km=a(cr);u(no.$$.fragment,Km),Km.forEach(o),Wm.forEach(o),$l=l(Ra),mr=r(Ra,"SPAN",{});var Bm=a(mr);yl=i(Bm,"RealmForOpenQA"),Bm.forEach(o),Ra.forEach(o),Qr=l(t),O=r(t,"DIV",{class:!0});var Ue=a(O);u(ro.$$.fragment,Ue),El=l(Ue),Be=r(Ue,"P",{});var br=a(Be);pr=r(br,"CODE",{});var Qm=a(pr);zl=i(Qm,"RealmForOpenQA"),Qm.forEach(o),ql=i(br,` for end-to-end open domain question answering. This model is a PyTorch `),ao=r(br,"A",{href:!0,rel:!0});var Hm=a(ao);xl=i(Hm,"torch.nn.Module"),Hm.forEach(o),Al=i(br,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),br.forEach(o),Pl=l(Ue),Qe=r(Ue,"DIV",{class:!0});var Ta=a(Qe);u(so.$$.fragment,Ta),jl=l(Ta),io=r(Ta,"P",{});var $a=a(io);Ll=i($a,"Send "),hr=r($a,"CODE",{});var Vm=a(hr);Ml=i(Vm,"self.block_emb"),Vm.forEach(o),Fl=i($a," to a specific device."),$a.forEach(o),Ta.forEach(o),Sl=l(Ue),I=r(Ue,"DIV",{class:!0});var ae=a(I);u(lo.$$.fragment,ae),Cl=l(ae),$e=r(ae,"P",{});var nn=a($e);Il=i(nn,"The "),No=r(nn,"A",{href:!0});var Um=a(No);Dl=i(Um,"RealmForOpenQA"),Um.forEach(o),Nl=i(nn," forward method, overrides the "),fr=r(nn,"CODE",{});var Xm=a(fr);Ol=i(Xm,"__call__"),Xm.forEach(o),Wl=i(nn," special method."),nn.forEach(o),Kl=l(ae),u(He.$$.fragment,ae),Bl=l(ae),ur=r(ae,"P",{});var Gm=a(ur);Ql=i(Gm,"Example:"),Gm.forEach(o),Hl=l(ae),u(co.$$.fragment,ae),ae.forEach(o),Ue.forEach(o),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(ip)),c(T,"id","realm"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#realm"),c(w,"class","relative group"),c(ye,"id","overview"),c(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ye,"href","#overview"),c(se,"class","relative group"),c(Je,"href","https://arxiv.org/abs/2002.08909"),c(Je,"rel","nofollow"),c(Ze,"href","https://huggingface.co/qqaatw"),c(Ze,"rel","nofollow"),c(Ye,"href","https://github.com/google-research/language/tree/master/language/realm"),c(Ye,"rel","nofollow"),c(ze,"id","transformers.RealmConfig"),c(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ze,"href","#transformers.RealmConfig"),c(ie,"class","relative group"),c(uo,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmEmbedder"),c(go,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmScorer"),c(_o,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmKnowledgeAugEncoder"),c(ko,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmRetriever"),c(vo,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmReader"),c(wo,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmForOpenQA"),c(nt,"href","https://huggingface.co/google/realm-cc-news-pretrained-embedder"),c(nt,"rel","nofollow"),c(bo,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(Ro,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(x,"class","docstring"),c(qe,"id","transformers.RealmTokenizer"),c(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(qe,"href","#transformers.RealmTokenizer"),c(le,"class","relative group"),c(To,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizer"),c($o,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),c(yo,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Y,"class","docstring"),c(Ae,"class","docstring"),c(W,"class","docstring"),c(qo,"class","docstring"),c(P,"class","docstring"),c(E,"class","docstring"),c(Pe,"id","transformers.RealmTokenizerFast"),c(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Pe,"href","#transformers.RealmTokenizerFast"),c(pe,"class","relative group"),c(Po,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmTokenizerFast"),c(jo,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizerFast"),c(Lo,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(j,"class","docstring"),c(M,"class","docstring"),c(Le,"id","transformers.RealmRetriever"),c(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Le,"href","#transformers.RealmRetriever"),c(fe,"class","relative group"),c(Me,"class","docstring"),c(H,"class","docstring"),c(Fe,"id","transformers.RealmEmbedder"),c(Fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fe,"href","#transformers.RealmEmbedder"),c(ue,"class","relative group"),c(Ct,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ct,"rel","nofollow"),c(So,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmEmbedder"),c(F,"class","docstring"),c(V,"class","docstring"),c(Ce,"id","transformers.RealmScorer"),c(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ce,"href","#transformers.RealmScorer"),c(_e,"class","relative group"),c(Kt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Kt,"rel","nofollow"),c(Co,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmScorer"),c(S,"class","docstring"),c(U,"class","docstring"),c(De,"id","transformers.RealmKnowledgeAugEncoder"),c(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(De,"href","#transformers.RealmKnowledgeAugEncoder"),c(ve,"class","relative group"),c(Xt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Xt,"rel","nofollow"),c(Io,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmKnowledgeAugEncoder"),c(C,"class","docstring"),c(X,"class","docstring"),c(Oe,"id","transformers.RealmReader"),c(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oe,"href","#transformers.RealmReader"),c(be,"class","relative group"),c(to,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(to,"rel","nofollow"),c(Do,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmReader"),c(ee,"class","docstring"),c(G,"class","docstring"),c(Ke,"id","transformers.RealmForOpenQA"),c(Ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ke,"href","#transformers.RealmForOpenQA"),c(Te,"class","relative group"),c(ao,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ao,"rel","nofollow"),c(Qe,"class","docstring"),c(No,"href","/docs/transformers/pr_16143/en/model_doc/realm#transformers.RealmForOpenQA"),c(I,"class","docstring"),c(O,"class","docstring")},m(t,m){e(document.head,p),h(t,$,m),h(t,w,m),e(w,T),e(T,y),g(R,y,null),e(w,b),e(w,q),e(q,ya),h(t,Tr,m),h(t,se,m),e(se,ye),e(ye,rn),g(Ge,rn,null),e(se,Ea),e(se,an),e(an,za),h(t,$r,m),h(t,Ee,m),e(Ee,qa),e(Ee,Je),e(Je,xa),e(Ee,Aa),h(t,yr,m),h(t,ho,m),e(ho,Pa),h(t,Er,m),h(t,fo,m),e(fo,sn),e(sn,ja),h(t,zr,m),h(t,Z,m),e(Z,La),e(Z,Ze),e(Ze,Ma),e(Z,Fa),e(Z,Ye),e(Ye,Sa),e(Z,Ca),h(t,qr,m),h(t,ie,m),e(ie,ze),e(ze,dn),g(et,dn,null),e(ie,Ia),e(ie,ln),e(ln,Da),h(t,xr,m),h(t,x,m),g(tt,x,null),e(x,Na),e(x,cn),e(cn,Oa),e(x,Wa),e(x,L),e(L,mn),e(mn,uo),e(uo,Ka),e(L,Ba),e(L,pn),e(pn,go),e(go,Qa),e(L,Ha),e(L,hn),e(hn,_o),e(_o,Va),e(L,Ua),e(L,fn),e(fn,ko),e(ko,Xa),e(L,Ga),e(L,un),e(un,vo),e(vo,Ja),e(L,Za),e(L,gn),e(gn,wo),e(wo,Ya),e(x,es),e(x,ot),e(ot,ts),e(ot,nt),e(nt,os),e(ot,ns),e(x,rs),e(x,de),e(de,as),e(de,bo),e(bo,ss),e(de,is),e(de,Ro),e(Ro,ds),e(de,ls),e(x,cs),e(x,_n),e(_n,ms),e(x,ps),g(rt,x,null),h(t,Ar,m),h(t,le,m),e(le,qe),e(qe,kn),g(at,kn,null),e(le,hs),e(le,vn),e(vn,fs),h(t,Pr,m),h(t,E,m),g(st,E,null),e(E,us),e(E,wn),e(wn,gs),e(E,_s),e(E,xe),e(xe,To),e(To,ks),e(xe,vs),e(xe,$o),e($o,ws),e(xe,bs),e(E,Rs),e(E,it),e(it,Ts),e(it,yo),e(yo,$s),e(it,ys),e(E,Es),e(E,Y),g(dt,Y,null),e(Y,zs),e(Y,bn),e(bn,qs),e(Y,xs),e(Y,lt),e(lt,Eo),e(Eo,As),e(Eo,Rn),e(Rn,Ps),e(lt,js),e(lt,zo),e(zo,Ls),e(zo,Tn),e(Tn,Ms),e(E,Fs),e(E,Ae),g(ct,Ae,null),e(Ae,Ss),e(Ae,mt),e(mt,Cs),e(mt,$n),e($n,Is),e(mt,Ds),e(E,Ns),e(E,W),g(pt,W,null),e(W,Os),e(W,yn),e(yn,Ws),e(W,Ks),g(ht,W,null),e(W,Bs),e(W,ce),e(ce,Qs),e(ce,En),e(En,Hs),e(ce,Vs),e(ce,zn),e(zn,Us),e(ce,Xs),e(E,Gs),e(E,qo),g(ft,qo,null),e(E,Js),e(E,P),g(ut,P,null),e(P,Zs),e(P,gt),e(gt,Ys),e(gt,qn),e(qn,ei),e(gt,ti),e(P,oi),e(P,me),e(me,xn),e(xn,ni),e(me,ri),e(me,_t),e(_t,ai),e(_t,An),e(An,si),e(_t,ii),e(me,di),e(me,kt),e(kt,li),e(kt,Pn),e(Pn,ci),e(kt,mi),e(P,pi),e(P,vt),e(vt,xo),e(xo,hi),e(xo,jn),e(jn,fi),e(vt,ui),e(vt,Ao),e(Ao,gi),e(Ao,Ln),e(Ln,_i),e(P,ki),e(P,Mn),e(Mn,vi),e(P,wi),g(wt,P,null),h(t,jr,m),h(t,pe,m),e(pe,Pe),e(Pe,Fn),g(bt,Fn,null),e(pe,bi),e(pe,Sn),e(Sn,Ri),h(t,Lr,m),h(t,M,m),g(Rt,M,null),e(M,Ti),e(M,Tt),e(Tt,$i),e(Tt,Cn),e(Cn,yi),e(Tt,Ei),e(M,zi),e(M,je),e(je,Po),e(Po,qi),e(je,xi),e(je,jo),e(jo,Ai),e(je,Pi),e(M,ji),e(M,$t),e($t,Li),e($t,Lo),e(Lo,Mi),e($t,Fi),e(M,Si),e(M,j),g(yt,j,null),e(j,Ci),e(j,Et),e(Et,Ii),e(Et,In),e(In,Di),e(Et,Ni),e(j,Oi),e(j,he),e(he,Dn),e(Dn,Wi),e(he,Ki),e(he,zt),e(zt,Bi),e(zt,Nn),e(Nn,Qi),e(zt,Hi),e(he,Vi),e(he,qt),e(qt,Ui),e(qt,On),e(On,Xi),e(qt,Gi),e(j,Ji),e(j,xt),e(xt,Mo),e(Mo,Zi),e(Mo,Wn),e(Wn,Yi),e(xt,ed),e(xt,Fo),e(Fo,td),e(Fo,Kn),e(Kn,od),e(j,nd),e(j,Bn),e(Bn,rd),e(j,ad),g(At,j,null),h(t,Mr,m),h(t,fe,m),e(fe,Le),e(Le,Qn),g(Pt,Qn,null),e(fe,sd),e(fe,Hn),e(Hn,id),h(t,Fr,m),h(t,H,m),g(jt,H,null),e(H,dd),e(H,Vn),e(Vn,ld),e(H,cd),e(H,Me),g(Lt,Me,null),e(Me,md),e(Me,Un),e(Un,pd),h(t,Sr,m),h(t,ue,m),e(ue,Fe),e(Fe,Xn),g(Mt,Xn,null),e(ue,hd),e(ue,Gn),e(Gn,fd),h(t,Cr,m),h(t,V,m),g(Ft,V,null),e(V,ud),e(V,St),e(St,gd),e(St,Ct),e(Ct,_d),e(St,kd),e(V,vd),e(V,F),g(It,F,null),e(F,wd),e(F,ge),e(ge,bd),e(ge,So),e(So,Rd),e(ge,Td),e(ge,Jn),e(Jn,$d),e(ge,yd),e(F,Ed),g(Se,F,null),e(F,zd),e(F,Zn),e(Zn,qd),e(F,xd),g(Dt,F,null),h(t,Ir,m),h(t,_e,m),e(_e,Ce),e(Ce,Yn),g(Nt,Yn,null),e(_e,Ad),e(_e,er),e(er,Pd),h(t,Dr,m),h(t,U,m),g(Ot,U,null),e(U,jd),e(U,Wt),e(Wt,Ld),e(Wt,Kt),e(Kt,Md),e(Wt,Fd),e(U,Sd),e(U,S),g(Bt,S,null),e(S,Cd),e(S,ke),e(ke,Id),e(ke,Co),e(Co,Dd),e(ke,Nd),e(ke,tr),e(tr,Od),e(ke,Wd),e(S,Kd),g(Ie,S,null),e(S,Bd),e(S,or),e(or,Qd),e(S,Hd),g(Qt,S,null),h(t,Nr,m),h(t,ve,m),e(ve,De),e(De,nr),g(Ht,nr,null),e(ve,Vd),e(ve,rr),e(rr,Ud),h(t,Or,m),h(t,X,m),g(Vt,X,null),e(X,Xd),e(X,Ut),e(Ut,Gd),e(Ut,Xt),e(Xt,Jd),e(Ut,Zd),e(X,Yd),e(X,C),g(Gt,C,null),e(C,el),e(C,we),e(we,tl),e(we,Io),e(Io,ol),e(we,nl),e(we,ar),e(ar,rl),e(we,al),e(C,sl),g(Ne,C,null),e(C,il),e(C,sr),e(sr,dl),e(C,ll),g(Jt,C,null),h(t,Wr,m),h(t,be,m),e(be,Oe),e(Oe,ir),g(Zt,ir,null),e(be,cl),e(be,dr),e(dr,ml),h(t,Kr,m),h(t,G,m),g(Yt,G,null),e(G,pl),e(G,eo),e(eo,hl),e(eo,to),e(to,fl),e(eo,ul),e(G,gl),e(G,ee),g(oo,ee,null),e(ee,_l),e(ee,Re),e(Re,kl),e(Re,Do),e(Do,vl),e(Re,wl),e(Re,lr),e(lr,bl),e(Re,Rl),e(ee,Tl),g(We,ee,null),h(t,Br,m),h(t,Te,m),e(Te,Ke),e(Ke,cr),g(no,cr,null),e(Te,$l),e(Te,mr),e(mr,yl),h(t,Qr,m),h(t,O,m),g(ro,O,null),e(O,El),e(O,Be),e(Be,pr),e(pr,zl),e(Be,ql),e(Be,ao),e(ao,xl),e(Be,Al),e(O,Pl),e(O,Qe),g(so,Qe,null),e(Qe,jl),e(Qe,io),e(io,Ll),e(io,hr),e(hr,Ml),e(io,Fl),e(O,Sl),e(O,I),g(lo,I,null),e(I,Cl),e(I,$e),e($e,Il),e($e,No),e(No,Dl),e($e,Nl),e($e,fr),e(fr,Ol),e($e,Wl),e(I,Kl),g(He,I,null),e(I,Bl),e(I,ur),e(ur,Ql),e(I,Hl),g(co,I,null),Hr=!0},p(t,[m]){const mo={};m&2&&(mo.$$scope={dirty:m,ctx:t}),Se.$set(mo);const gr={};m&2&&(gr.$$scope={dirty:m,ctx:t}),Ie.$set(gr);const _r={};m&2&&(_r.$$scope={dirty:m,ctx:t}),Ne.$set(_r);const kr={};m&2&&(kr.$$scope={dirty:m,ctx:t}),We.$set(kr);const po={};m&2&&(po.$$scope={dirty:m,ctx:t}),He.$set(po)},i(t){Hr||(_(R.$$.fragment,t),_(Ge.$$.fragment,t),_(et.$$.fragment,t),_(tt.$$.fragment,t),_(rt.$$.fragment,t),_(at.$$.fragment,t),_(st.$$.fragment,t),_(dt.$$.fragment,t),_(ct.$$.fragment,t),_(pt.$$.fragment,t),_(ht.$$.fragment,t),_(ft.$$.fragment,t),_(ut.$$.fragment,t),_(wt.$$.fragment,t),_(bt.$$.fragment,t),_(Rt.$$.fragment,t),_(yt.$$.fragment,t),_(At.$$.fragment,t),_(Pt.$$.fragment,t),_(jt.$$.fragment,t),_(Lt.$$.fragment,t),_(Mt.$$.fragment,t),_(Ft.$$.fragment,t),_(It.$$.fragment,t),_(Se.$$.fragment,t),_(Dt.$$.fragment,t),_(Nt.$$.fragment,t),_(Ot.$$.fragment,t),_(Bt.$$.fragment,t),_(Ie.$$.fragment,t),_(Qt.$$.fragment,t),_(Ht.$$.fragment,t),_(Vt.$$.fragment,t),_(Gt.$$.fragment,t),_(Ne.$$.fragment,t),_(Jt.$$.fragment,t),_(Zt.$$.fragment,t),_(Yt.$$.fragment,t),_(oo.$$.fragment,t),_(We.$$.fragment,t),_(no.$$.fragment,t),_(ro.$$.fragment,t),_(so.$$.fragment,t),_(lo.$$.fragment,t),_(He.$$.fragment,t),_(co.$$.fragment,t),Hr=!0)},o(t){k(R.$$.fragment,t),k(Ge.$$.fragment,t),k(et.$$.fragment,t),k(tt.$$.fragment,t),k(rt.$$.fragment,t),k(at.$$.fragment,t),k(st.$$.fragment,t),k(dt.$$.fragment,t),k(ct.$$.fragment,t),k(pt.$$.fragment,t),k(ht.$$.fragment,t),k(ft.$$.fragment,t),k(ut.$$.fragment,t),k(wt.$$.fragment,t),k(bt.$$.fragment,t),k(Rt.$$.fragment,t),k(yt.$$.fragment,t),k(At.$$.fragment,t),k(Pt.$$.fragment,t),k(jt.$$.fragment,t),k(Lt.$$.fragment,t),k(Mt.$$.fragment,t),k(Ft.$$.fragment,t),k(It.$$.fragment,t),k(Se.$$.fragment,t),k(Dt.$$.fragment,t),k(Nt.$$.fragment,t),k(Ot.$$.fragment,t),k(Bt.$$.fragment,t),k(Ie.$$.fragment,t),k(Qt.$$.fragment,t),k(Ht.$$.fragment,t),k(Vt.$$.fragment,t),k(Gt.$$.fragment,t),k(Ne.$$.fragment,t),k(Jt.$$.fragment,t),k(Zt.$$.fragment,t),k(Yt.$$.fragment,t),k(oo.$$.fragment,t),k(We.$$.fragment,t),k(no.$$.fragment,t),k(ro.$$.fragment,t),k(so.$$.fragment,t),k(lo.$$.fragment,t),k(He.$$.fragment,t),k(co.$$.fragment,t),Hr=!1},d(t){o(p),t&&o($),t&&o(w),v(R),t&&o(Tr),t&&o(se),v(Ge),t&&o($r),t&&o(Ee),t&&o(yr),t&&o(ho),t&&o(Er),t&&o(fo),t&&o(zr),t&&o(Z),t&&o(qr),t&&o(ie),v(et),t&&o(xr),t&&o(x),v(tt),v(rt),t&&o(Ar),t&&o(le),v(at),t&&o(Pr),t&&o(E),v(st),v(dt),v(ct),v(pt),v(ht),v(ft),v(ut),v(wt),t&&o(jr),t&&o(pe),v(bt),t&&o(Lr),t&&o(M),v(Rt),v(yt),v(At),t&&o(Mr),t&&o(fe),v(Pt),t&&o(Fr),t&&o(H),v(jt),v(Lt),t&&o(Sr),t&&o(ue),v(Mt),t&&o(Cr),t&&o(V),v(Ft),v(It),v(Se),v(Dt),t&&o(Ir),t&&o(_e),v(Nt),t&&o(Dr),t&&o(U),v(Ot),v(Bt),v(Ie),v(Qt),t&&o(Nr),t&&o(ve),v(Ht),t&&o(Or),t&&o(X),v(Vt),v(Gt),v(Ne),v(Jt),t&&o(Wr),t&&o(be),v(Zt),t&&o(Kr),t&&o(G),v(Yt),v(oo),v(We),t&&o(Br),t&&o(Te),v(no),t&&o(Qr),t&&o(O),v(ro),v(so),v(lo),v(He),v(co)}}}const ip={local:"realm",sections:[{local:"overview",title:"Overview"},{local:"transformers.RealmConfig",title:"RealmConfig"},{local:"transformers.RealmTokenizer",title:"RealmTokenizer"},{local:"transformers.RealmTokenizerFast",title:"RealmTokenizerFast"},{local:"transformers.RealmRetriever",title:"RealmRetriever"},{local:"transformers.RealmEmbedder",title:"RealmEmbedder"},{local:"transformers.RealmScorer",title:"RealmScorer"},{local:"transformers.RealmKnowledgeAugEncoder",title:"RealmKnowledgeAugEncoder"},{local:"transformers.RealmReader",title:"RealmReader"},{local:"transformers.RealmForOpenQA",title:"RealmForOpenQA"}],title:"REALM"};function dp(N,p,$){let{fw:w}=p;return N.$$set=T=>{"fw"in T&&$(0,w=T.fw)},[w]}class up extends Jm{constructor(p){super();Zm(this,p,dp,sp,Ym,{fw:0})}}export{up as default,ip as metadata};
288
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/t5v1.1.mdx-bd30f4e4.js
import{S as Po,i as Ao,s as Lo,e as r,k as p,w as St,t as n,M as Co,c as l,d as o,m as d,a,x as Gt,h as i,b as s,F as e,g as h,y as Nt,L as Io,q as qt,o as Dt,B as Bt}from"../../chunks/vendor-4833417e.js";import{I as $o}from"../../chunks/IconCopyLink-4b81c553.js";import{C as Oo}from"../../chunks/CodeBlock-6a3d1b46.js";import"../../chunks/CopyButton-dacfbfaf.js";function So(De){let _,V,c,m,Q,A,Be,W,Ue,we,x,y,X,L,Fe,Y,Re,Ee,T,Ve,C,je,He,be,j,Me,xe,I,ye,H,Je,Te,u,Z,O,ze,S,Ke,Qe,We,ee,te,Xe,Ye,oe,re,Ze,et,le,ae,tt,ot,ne,g,rt,ie,lt,at,se,nt,it,fe,st,ft,ke,k,ht,G,pt,dt,$e,M,ut,Pe,v,he,pe,N,vt,ct,de,ue,q,mt,gt,ve,ce,D,_t,wt,me,ge,B,Et,bt,_e,J,U,xt,yt,Ae,$,Tt,z,kt,$t,Le,w,Pt,F,At,Lt,R,Ct,It,Ce;return A=new $o({}),L=new $o({}),I=new Oo({props:{code:`from transformers import T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-base")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/t5-v1_1-base&quot;</span>)`}}),{c(){_=r("meta"),V=p(),c=r("h1"),m=r("a"),Q=r("span"),St(A.$$.fragment),Be=p(),W=r("span"),Ue=n("T5v1.1"),we=p(),x=r("h2"),y=r("a"),X=r("span"),St(L.$$.fragment),Fe=p(),Y=r("span"),Re=n("Overview"),Ee=p(),T=r("p"),Ve=n("T5v1.1 was released in the "),C=r("a"),je=n("google-research/text-to-text-transfer-transformer"),He=n(` repository by Colin Raffel et al. It\u2019s an improved version of the original T5 model.`),be=p(),j=r("p"),Me=n("One can directly plug in the weights of T5v1.1 into a T5 model, like so:"),xe=p(),St(I.$$.fragment),ye=p(),H=r("p"),Je=n("T5 Version 1.1 includes the following improvements compared to the original T5 model:"),Te=p(),u=r("ul"),Z=r("li"),O=r("p"),ze=n("GEGLU activation in the feed-forward hidden layer, rather than ReLU. See "),S=r("a"),Ke=n("this paper"),Qe=n("."),We=p(),ee=r("li"),te=r("p"),Xe=n("Dropout was turned off in pre-training (quality win). Dropout should be re-enabled during fine-tuning."),Ye=p(),oe=r("li"),re=r("p"),Ze=n("Pre-trained on C4 only without mixing in the downstream tasks."),et=p(),le=r("li"),ae=r("p"),tt=n("No parameter sharing between the embedding and classifier layer."),ot=p(),ne=r("li"),g=r("p"),rt=n("\u201Cxl\u201D and \u201Cxxl\u201D replace \u201C3B\u201D and \u201C11B\u201D. The model shapes are a bit different - larger "),ie=r("code"),lt=n("d_model"),at=n(` and smaller `),se=r("code"),nt=n("num_heads"),it=n(" and "),fe=r("code"),st=n("d_ff"),ft=n("."),ke=p(),k=r("p"),ht=n("Note: T5 Version 1.1 was only pre-trained on "),G=r("a"),pt=n("C4"),dt=n(` excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task, unlike the original T5 model. Since t5v1.1 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),$e=p(),M=r("p"),ut=n("Google has released the following variants:"),Pe=p(),v=r("ul"),he=r("li"),pe=r("p"),N=r("a"),vt=n("google/t5-v1_1-small"),ct=p(),de=r("li"),ue=r("p"),q=r("a"),mt=n("google/t5-v1_1-base"),gt=p(),ve=r("li"),ce=r("p"),D=r("a"),_t=n("google/t5-v1_1-large"),wt=p(),me=r("li"),ge=r("p"),B=r("a"),Et=n("google/t5-v1_1-xl"),bt=p(),_e=r("li"),J=r("p"),U=r("a"),xt=n("google/t5-v1_1-xxl"),yt=n("."),Ae=p(),$=r("p"),Tt=n("One can refer to "),z=r("a"),kt=n("T5\u2019s documentation page"),$t=n(" for all tips, code examples and notebooks."),Le=p(),w=r("p"),Pt=n("This model was contributed by "),F=r("a"),At=n("patrickvonplaten"),Lt=n(`. The original code can be found `),R=r("a"),Ct=n("here"),It=n("."),this.h()},l(t){const f=Co('[data-svelte="svelte-1phssyn"]',document.head);_=l(f,"META",{name:!0,content:!0}),f.forEach(o),V=d(t),c=l(t,"H1",{class:!0});var Ie=a(c);m=l(Ie,"A",{id:!0,class:!0,href:!0});var Ut=a(m);Q=l(Ut,"SPAN",{});var Ft=a(Q);Gt(A.$$.fragment,Ft),Ft.forEach(o),Ut.forEach(o),Be=d(Ie),W=l(Ie,"SPAN",{});var Rt=a(W);Ue=i(Rt,"T5v1.1"),Rt.forEach(o),Ie.forEach(o),we=d(t),x=l(t,"H2",{class:!0});var Oe=a(x);y=l(Oe,"A",{id:!0,class:!0,href:!0});var Vt=a(y);X=l(Vt,"SPAN",{});var jt=a(X);Gt(L.$$.fragment,jt),jt.forEach(o),Vt.forEach(o),Fe=d(Oe),Y=l(Oe,"SPAN",{});var Ht=a(Y);Re=i(Ht,"Overview"),Ht.forEach(o),Oe.forEach(o),Ee=d(t),T=l(t,"P",{});var Se=a(T);Ve=i(Se,"T5v1.1 was released in the "),C=l(Se,"A",{href:!0,rel:!0});var Mt=a(C);je=i(Mt,"google-research/text-to-text-transfer-transformer"),Mt.forEach(o),He=i(Se,` repository by Colin Raffel et al. It\u2019s an improved version of the original T5 model.`),Se.forEach(o),be=d(t),j=l(t,"P",{});var Jt=a(j);Me=i(Jt,"One can directly plug in the weights of T5v1.1 into a T5 model, like so:"),Jt.forEach(o),xe=d(t),Gt(I.$$.fragment,t),ye=d(t),H=l(t,"P",{});var zt=a(H);Je=i(zt,"T5 Version 1.1 includes the following improvements compared to the original T5 model:"),zt.forEach(o),Te=d(t),u=l(t,"UL",{});var E=a(u);Z=l(E,"LI",{});var Kt=a(Z);O=l(Kt,"P",{});var Ge=a(O);ze=i(Ge,"GEGLU activation in the feed-forward hidden layer, rather than ReLU. See "),S=l(Ge,"A",{href:!0,rel:!0});var Qt=a(S);Ke=i(Qt,"this paper"),Qt.forEach(o),Qe=i(Ge,"."),Ge.forEach(o),Kt.forEach(o),We=d(E),ee=l(E,"LI",{});var Wt=a(ee);te=l(Wt,"P",{});var Xt=a(te);Xe=i(Xt,"Dropout was turned off in pre-training (quality win). Dropout should be re-enabled during fine-tuning."),Xt.forEach(o),Wt.forEach(o),Ye=d(E),oe=l(E,"LI",{});var Yt=a(oe);re=l(Yt,"P",{});var Zt=a(re);Ze=i(Zt,"Pre-trained on C4 only without mixing in the downstream tasks."),Zt.forEach(o),Yt.forEach(o),et=d(E),le=l(E,"LI",{});var eo=a(le);ae=l(eo,"P",{});var to=a(ae);tt=i(to,"No parameter sharing between the embedding and classifier layer."),to.forEach(o),eo.forEach(o),ot=d(E),ne=l(E,"LI",{});var oo=a(ne);g=l(oo,"P",{});var P=a(g);rt=i(P,"\u201Cxl\u201D and \u201Cxxl\u201D replace \u201C3B\u201D and \u201C11B\u201D. The model shapes are a bit different - larger "),ie=l(P,"CODE",{});var ro=a(ie);lt=i(ro,"d_model"),ro.forEach(o),at=i(P,` and smaller `),se=l(P,"CODE",{});var lo=a(se);nt=i(lo,"num_heads"),lo.forEach(o),it=i(P," and "),fe=l(P,"CODE",{});var ao=a(fe);st=i(ao,"d_ff"),ao.forEach(o),ft=i(P,"."),P.forEach(o),oo.forEach(o),E.forEach(o),ke=d(t),k=l(t,"P",{});var Ne=a(k);ht=i(Ne,"Note: T5 Version 1.1 was only pre-trained on "),G=l(Ne,"A",{href:!0,rel:!0});var no=a(G);pt=i(no,"C4"),no.forEach(o),dt=i(Ne,` excluding any supervised training. Therefore, this model has to be fine-tuned before it is useable on a downstream task, unlike the original T5 model. Since t5v1.1 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),Ne.forEach(o),$e=d(t),M=l(t,"P",{});var io=a(M);ut=i(io,"Google has released the following variants:"),io.forEach(o),Pe=d(t),v=l(t,"UL",{});var b=a(v);he=l(b,"LI",{});var so=a(he);pe=l(so,"P",{});var fo=a(pe);N=l(fo,"A",{href:!0,rel:!0});var ho=a(N);vt=i(ho,"google/t5-v1_1-small"),ho.forEach(o),fo.forEach(o),so.forEach(o),ct=d(b),de=l(b,"LI",{});var po=a(de);ue=l(po,"P",{});var uo=a(ue);q=l(uo,"A",{href:!0,rel:!0});var vo=a(q);mt=i(vo,"google/t5-v1_1-base"),vo.forEach(o),uo.forEach(o),po.forEach(o),gt=d(b),ve=l(b,"LI",{});var co=a(ve);ce=l(co,"P",{});var mo=a(ce);D=l(mo,"A",{href:!0,rel:!0});var go=a(D);_t=i(go,"google/t5-v1_1-large"),go.forEach(o),mo.forEach(o),co.forEach(o),wt=d(b),me=l(b,"LI",{});var _o=a(me);ge=l(_o,"P",{});var wo=a(ge);B=l(wo,"A",{href:!0,rel:!0});var Eo=a(B);Et=i(Eo,"google/t5-v1_1-xl"),Eo.forEach(o),wo.forEach(o),_o.forEach(o),bt=d(b),_e=l(b,"LI",{});var bo=a(_e);J=l(bo,"P",{});var Ot=a(J);U=l(Ot,"A",{href:!0,rel:!0});var xo=a(U);xt=i(xo,"google/t5-v1_1-xxl"),xo.forEach(o),yt=i(Ot,"."),Ot.forEach(o),bo.forEach(o),b.forEach(o),Ae=d(t),$=l(t,"P",{});var qe=a($);Tt=i(qe,"One can refer to "),z=l(qe,"A",{href:!0});var yo=a(z);kt=i(yo,"T5\u2019s documentation page"),yo.forEach(o),$t=i(qe," for all tips, code examples and notebooks."),qe.forEach(o),Le=d(t),w=l(t,"P",{});var K=a(w);Pt=i(K,"This model was contributed by "),F=l(K,"A",{href:!0,rel:!0});var To=a(F);At=i(To,"patrickvonplaten"),To.forEach(o),Lt=i(K,`. The original code can be found `),R=l(K,"A",{href:!0,rel:!0});var ko=a(R);Ct=i(ko,"here"),ko.forEach(o),It=i(K,"."),K.forEach(o),this.h()},h(){s(_,"name","hf:doc:metadata"),s(_,"content",JSON.stringify(Go)),s(m,"id","t5v11"),s(m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),s(m,"href","#t5v11"),s(c,"class","relative group"),s(y,"id","overview"),s(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),s(y,"href","#overview"),s(x,"class","relative group"),s(C,"href","https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511"),s(C,"rel","nofollow"),s(S,"href","https://arxiv.org/abs/2002.05202"),s(S,"rel","nofollow"),s(G,"href","https://huggingface.co/datasets/c4"),s(G,"rel","nofollow"),s(N,"href","https://huggingface.co/google/t5-v1_1-small"),s(N,"rel","nofollow"),s(q,"href","https://huggingface.co/google/t5-v1_1-base"),s(q,"rel","nofollow"),s(D,"href","https://huggingface.co/google/t5-v1_1-large"),s(D,"rel","nofollow"),s(B,"href","https://huggingface.co/google/t5-v1_1-xl"),s(B,"rel","nofollow"),s(U,"href","https://huggingface.co/google/t5-v1_1-xxl"),s(U,"rel","nofollow"),s(z,"href","t5"),s(F,"href","https://huggingface.co/patrickvonplaten"),s(F,"rel","nofollow"),s(R,"href","https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511"),s(R,"rel","nofollow")},m(t,f){e(document.head,_),h(t,V,f),h(t,c,f),e(c,m),e(m,Q),Nt(A,Q,null),e(c,Be),e(c,W),e(W,Ue),h(t,we,f),h(t,x,f),e(x,y),e(y,X),Nt(L,X,null),e(x,Fe),e(x,Y),e(Y,Re),h(t,Ee,f),h(t,T,f),e(T,Ve),e(T,C),e(C,je),e(T,He),h(t,be,f),h(t,j,f),e(j,Me),h(t,xe,f),Nt(I,t,f),h(t,ye,f),h(t,H,f),e(H,Je),h(t,Te,f),h(t,u,f),e(u,Z),e(Z,O),e(O,ze),e(O,S),e(S,Ke),e(O,Qe),e(u,We),e(u,ee),e(ee,te),e(te,Xe),e(u,Ye),e(u,oe),e(oe,re),e(re,Ze),e(u,et),e(u,le),e(le,ae),e(ae,tt),e(u,ot),e(u,ne),e(ne,g),e(g,rt),e(g,ie),e(ie,lt),e(g,at),e(g,se),e(se,nt),e(g,it),e(g,fe),e(fe,st),e(g,ft),h(t,ke,f),h(t,k,f),e(k,ht),e(k,G),e(G,pt),e(k,dt),h(t,$e,f),h(t,M,f),e(M,ut),h(t,Pe,f),h(t,v,f),e(v,he),e(he,pe),e(pe,N),e(N,vt),e(v,ct),e(v,de),e(de,ue),e(ue,q),e(q,mt),e(v,gt),e(v,ve),e(ve,ce),e(ce,D),e(D,_t),e(v,wt),e(v,me),e(me,ge),e(ge,B),e(B,Et),e(v,bt),e(v,_e),e(_e,J),e(J,U),e(U,xt),e(J,yt),h(t,Ae,f),h(t,$,f),e($,Tt),e($,z),e(z,kt),e($,$t),h(t,Le,f),h(t,w,f),e(w,Pt),e(w,F),e(F,At),e(w,Lt),e(w,R),e(R,Ct),e(w,It),Ce=!0},p:Io,i(t){Ce||(qt(A.$$.fragment,t),qt(L.$$.fragment,t),qt(I.$$.fragment,t),Ce=!0)},o(t){Dt(A.$$.fragment,t),Dt(L.$$.fragment,t),Dt(I.$$.fragment,t),Ce=!1},d(t){o(_),t&&o(V),t&&o(c),Bt(A),t&&o(we),t&&o(x),Bt(L),t&&o(Ee),t&&o(T),t&&o(be),t&&o(j),t&&o(xe),Bt(I,t),t&&o(ye),t&&o(H),t&&o(Te),t&&o(u),t&&o(ke),t&&o(k),t&&o($e),t&&o(M),t&&o(Pe),t&&o(v),t&&o(Ae),t&&o($),t&&o(Le),t&&o(w)}}}const Go={local:"t5v11",sections:[{local:"overview",title:"Overview"}],title:"T5v1.1"};function No(De,_,V){let{fw:c}=_;return De.$$set=m=>{"fw"in m&&V(0,c=m.fw)},[c]}class Fo extends Po{constructor(_){super();Ao(this,_,No,So,Lo,{fw:0})}}export{Fo as default,Go as metadata};
289
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/vision-encoder-decoder.mdx-b5cd5518.js
import{S as rc,i as ac,s as sc,e as r,k as c,w as u,t,M as dc,c as a,d as o,m as l,a as s,x as g,h as n,b as i,F as e,g as w,y as _,q as v,o as b,B as T}from"../../chunks/vendor-4833417e.js";import{T as Vd}from"../../chunks/Tip-fffd6df1.js";import{D as B}from"../../chunks/Docstring-4f315ed9.js";import{C as yo}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as Yt}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function ic(Q){let p,P,m,M,F;return{c(){p=r("p"),P=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),M=t("Module"),F=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=a(y,"P",{});var j=s(p);P=n(j,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(j,"CODE",{});var C=s(m);M=n(C,"Module"),C.forEach(o),F=n(j,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),j.forEach(o)},m(y,j){w(y,p,j),e(p,P),e(p,m),e(m,M),e(p,F)},d(y){y&&o(p)}}}function cc(Q){let p,P,m,M,F;return{c(){p=r("p"),P=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),M=t("Module"),F=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=a(y,"P",{});var j=s(p);P=n(j,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(j,"CODE",{});var C=s(m);M=n(C,"Module"),C.forEach(o),F=n(j,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),j.forEach(o)},m(y,j){w(y,p,j),e(p,P),e(p,m),e(m,M),e(p,F)},d(y){y&&o(p)}}}function lc(Q){let p,P,m,M,F;return{c(){p=r("p"),P=t("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),M=t("Module"),F=t(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(y){p=a(y,"P",{});var j=s(p);P=n(j,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a(j,"CODE",{});var C=s(m);M=n(C,"Module"),C.forEach(o),F=n(j,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),j.forEach(o)},m(y,j){w(y,p,j),e(p,P),e(p,m),e(m,M),e(p,F)},d(y){y&&o(p)}}}function pc(Q){let p,P,m,M,F,y,j,C,Vn,Jt,f,$n,Eo,qn,zn,rt,Pn,Fn,ko,Cn,An,xo,Sn,In,jo,Ln,Nn,Do,On,Rn,at,Bn,Gn,Mo,Wn,Un,Vo,Hn,Zn,$o,Yn,Jn,qo,Kn,Qn,Kt,he,Xn,xe,er,or,Qt,G,tr,zo,nr,rr,Po,ar,sr,Xt,X,me,st,je,dr,dt,ir,en,z,De,cr,fe,Fo,lr,pr,Co,hr,mr,fr,ee,ur,Ao,gr,_r,So,vr,br,Tr,it,wr,yr,Me,Er,ue,Ve,kr,$e,xr,Io,jr,Dr,Mr,ge,qe,Vr,oe,$r,ct,qr,zr,lt,Pr,Fr,on,te,_e,pt,ze,Cr,ht,Ar,tn,E,Pe,Sr,ne,Ir,mt,Lr,Nr,ft,Or,Rr,Br,Fe,Gr,Ce,Wr,Ur,Hr,Ae,Zr,Se,Yr,Jr,Kr,ut,Qr,Xr,Ie,ea,Lo,oa,ta,na,Le,ra,Ne,aa,sa,da,W,No,ia,ca,gt,la,pa,_t,ha,ma,fa,A,Oe,ua,re,ga,Oo,_a,va,vt,ba,Ta,wa,ve,ya,bt,Ea,ka,Re,xa,S,Be,ja,Tt,Da,Ma,ae,Va,wt,$a,qa,yt,za,Pa,Fa,Et,Ca,Aa,Ge,nn,se,be,kt,We,Sa,xt,Ia,rn,k,Ue,La,de,Na,jt,Oa,Ra,Dt,Ba,Ga,Wa,He,Ua,Ze,Ha,Za,Ya,Ye,Ja,Je,Ka,Qa,Xa,Mt,es,os,Ke,ts,Ro,ns,rs,as,Qe,ss,Xe,ds,is,cs,U,Bo,ls,ps,Vt,hs,ms,$t,fs,us,gs,I,eo,_s,ie,vs,Go,bs,Ts,qt,ws,ys,Es,Te,ks,zt,xs,js,oo,Ds,O,to,Ms,Pt,Vs,$s,Ft,qs,zs,no,an,ce,we,Ct,ro,Ps,At,Fs,sn,x,ao,Cs,le,As,St,Ss,Is,It,Ls,Ns,Os,so,Rs,io,Bs,Gs,Ws,co,Us,lo,Hs,Zs,Ys,Lt,Js,Ks,po,Qs,Wo,Xs,ed,od,ho,td,mo,nd,rd,ad,H,Uo,sd,dd,Nt,id,cd,Ot,ld,pd,hd,L,fo,md,pe,fd,Ho,ud,gd,Rt,_d,vd,bd,ye,Td,Bt,wd,yd,uo,Ed,R,go,kd,Gt,xd,jd,Wt,Dd,Md,_o,dn;return y=new Yt({}),je=new Yt({}),De=new B({props:{name:"class transformers.VisionEncoderDecoderConfig",anchor:"transformers.VisionEncoderDecoderConfig",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L27",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderConfig.kwargs",description:`<strong>kwargs</strong> (<em>optional</em>) &#x2014; Dictionary of keyword arguments. Notably:</p> <ul> <li><strong>encoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the encoder config.</li> <li><strong>decoder</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; An instance of a configuration object that defines the decoder config.</li> </ul>`,name:"kwargs"}]}}),Me=new yo({props:{code:`from transformers import BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel # Initializing a ViT & BERT style configuration config_encoder = ViTConfig() config_decoder = BertConfig() config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) # Initializing a ViTBert model from a ViT & bert-base-uncased style configurations model = VisionEncoderDecoderModel(config=config) # Accessing the model configuration config_encoder = model.config.encoder config_decoder = model.config.decoder # set decoder config to causal lm config_decoder.is_decoder = True config_decoder.add_cross_attention = True # Saving the model, including its configuration model.save_pretrained("my-model") # loading model and config from pretrained folder encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained("my-model") model = VisionEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViT &amp; BERT style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = ViTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViTBert model from a ViT &amp; bert-base-uncased style configurations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel(config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_encoder = model.config.encoder <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder = model.config.decoder <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set decoder config to causal lm</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config_decoder.add_cross_attention = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Saving the model, including its configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># loading model and config from pretrained folder</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;my-model&quot;</span>, config=encoder_decoder_config)`}}),Ve=new B({props:{name:"from_encoder_decoder_configs",anchor:"transformers.VisionEncoderDecoderConfig.from_encoder_decoder_configs",parameters:[{name:"encoder_config",val:": PretrainedConfig"},{name:"decoder_config",val:": PretrainedConfig"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L93",returnDescription:` <p>An instance of a configuration object</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a></p> `}}),qe=new B({props:{name:"to_dict",anchor:"transformers.VisionEncoderDecoderConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py#L110",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, any]</code></p> `}}),ze=new Yt({}),Pe=new B({props:{name:"class transformers.VisionEncoderDecoderModel",anchor:"transformers.VisionEncoderDecoderModel",parameters:[{name:"config",val:": typing.Optional[transformers.configuration_utils.PretrainedConfig] = None"},{name:"encoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"},{name:"decoder",val:": typing.Optional[transformers.modeling_utils.PreTrainedModel] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L149",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Oe=new B({props:{name:"forward",anchor:"transformers.VisionEncoderDecoderModel.forward",parameters:[{name:"pixel_values",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L401",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using a feature extractor (e.g. if you use ViT as the encoder, you should use <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>). See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.VisionEncoderDecoderModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>For training, <code>decoder_input_ids</code> are automatically created by the model by shifting the <code>labels</code> to the right, replacing -100 by the <code>pad_token_id</code> and prepending them with the <code>decoder_start_token_id</code>.`,name:"decoder_input_ids"},{anchor:"transformers.VisionEncoderDecoderModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.VisionEncoderDecoderModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.VisionEncoderDecoderModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.VisionEncoderDecoderModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"decoder_inputs_embeds"},{anchor:"transformers.VisionEncoderDecoderModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.VisionEncoderDecoderModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.VisionEncoderDecoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.VisionEncoderDecoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.VisionEncoderDecoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple. kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul>`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ve=new Vd({props:{$$slots:{default:[ic]},$$scope:{ctx:Q}}}),Re=new yo({props:{code:`from transformers import TrOCRProcessor, VisionEncoderDecoderModel import requests from PIL import Image import torch processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") # load image from the IAM dataset url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") # training model.config.decoder_start_token_id = processor.tokenizer.cls_token_id model.config.pad_token_id = processor.tokenizer.pad_token_id model.config.vocab_size = model.config.decoder.vocab_size pixel_values = processor(image, return_tensors="pt").pixel_values text = "hello world" labels = processor.tokenizer(text, return_tensors="pt").input_ids outputs = model(pixel_values=pixel_values, labels=labels) loss = outputs.loss # inference (generation) generated_ids = model.generate(pixel_values) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrOCRProcessor, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>processor = TrOCRProcessor.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load image from the IAM dataset</span> <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.decoder_start_token_id = processor.tokenizer.cls_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = processor.tokenizer.pad_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.vocab_size = model.config.decoder.vocab_size <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;hello world&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = processor.tokenizer(text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># inference (generation)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(pixel_values) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_text = processor.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>]`}}),Be=new B({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": str = None"},{name:"decoder_pretrained_model_name_or_path",val:": str = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py#L246",parametersDescription:[{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Information necessary to initiate the image encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Information necessary to initiate the text decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.VisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),Ge=new yo({props:{code:`from transformers import VisionEncoderDecoderModel # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "bert-base-uncased" ) # saving model after fine-tuning model.save_pretrained("./vit-bert") # load fine-tuned model model = VisionEncoderDecoderModel.from_pretrained("./vit-bert")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>)`}}),We=new Yt({}),Ue=new B({props:{name:"class transformers.TFVisionEncoderDecoderModel",anchor:"transformers.TFVisionEncoderDecoderModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py#L176",parametersDescription:[{anchor:"transformers.TFVisionEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),eo=new B({props:{name:"call",anchor:"transformers.TFVisionEncoderDecoderModel.call",parameters:[{name:"pixel_values",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"encoder_outputs",val:" = None"},{name:"past_key_values",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py#L513",parametersDescription:[{anchor:"transformers.TFVisionEncoderDecoderModel.call.pixel_values",description:`<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using the vision&#x2019;s model&#x2019;s feature extractor. For example, using <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>Provide for sequence to sequence training to the decoder. Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"decoder_input_ids"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(tf.Tensor)</code>, <em>optional</em>) &#x2014; This tuple must consist of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(tf.Tensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"decoder_inputs_embeds"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.labels",description:`<strong>labels</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss for the decoder. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>Seq2SeqLMOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.TFVisionEncoderDecoderModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). kwargs &#x2014; (<em>optional</em>) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:</p> <ul> <li>Without a prefix which will be input as <code>**encoder_kwargs</code> for the encoder forward function.</li> <li>With a <em>decoder_</em> prefix which will be input as <code>**decoder_kwargs</code> for the decoder forward function.</li> </ul>`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Te=new Vd({props:{$$slots:{default:[cc]},$$scope:{ctx:Q}}}),oo=new yo({props:{code:`from transformers import AutoFeatureExtractor, AutoTokenizer, TFVisionEncoderDecoderModel from PIL import Image import requests feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") decoder_tokenizer = AutoTokenizer.from_pretrained("gpt2") # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "gpt2" ) url = "http://images.cocodataset.org/val2017/000000039769.jpg" img = Image.open(requests.get(url, stream=True).raw) # forward pixel_values = feature_extractor(images=img, return_tensors="tf").pixel_values # Batch size 1 decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids # Batch size 1 outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) # training outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss, logits = outputs.loss, outputs.logits # save and load from pretrained model.save_pretrained("vit-gpt2") model = TFVisionEncoderDecoderModel.from_pretrained("vit-gpt2") # generation generated = model.generate(pixel_values, decoder_start_token_id=model.config.decoder.bos_token_id)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor, AutoTokenizer, TFVisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;gpt2&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>img = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = feature_extractor(images=img, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).pixel_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = decoder_tokenizer(<span class="hljs-string">&quot;Linda Davis&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># training</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>loss, logits = outputs.loss, outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save and load from pretrained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;vit-gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;vit-gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generation</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generated = model.generate(pixel_values, decoder_start_token_id=model.config.decoder.bos_token_id)`}}),to=new B({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": str = None"},{name:"decoder_pretrained_model_name_or_path",val:": str = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py#L348",parametersDescription:[{anchor:"transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pytorch index checkpoint file</em> (e.g, <code>./pt_model/</code>). In this case, <code>encoder_from_pt</code> should be set to <code>True</code>.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <em>None</em>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pytorch checkpoint file</em> (e.g, <code>./pt_model/</code>). In this case, <code>decoder_from_pt</code> should be set to <code>True</code>.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),no=new yo({props:{code:`from transformers import TFVisionEncoderDecoderModel # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "bert-base-uncased" ) # saving model after fine-tuning model.save_pretrained("./vit-bert") # load fine-tuned model model = TFVisionEncoderDecoderModel.from_pretrained("./vit-bert")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFVisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;bert-base-uncased&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFVisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-bert&quot;</span>)`}}),ro=new Yt({}),ao=new B({props:{name:"class transformers.FlaxVisionEncoderDecoderModel",anchor:"transformers.FlaxVisionEncoderDecoderModel",parameters:[{name:"config",val:": VisionEncoderDecoderConfig"},{name:"input_shape",val:": typing.Optional[typing.Tuple] = None"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L268",parametersDescription:[{anchor:"transformers.FlaxVisionEncoderDecoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig">VisionEncoderDecoderConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),fo=new B({props:{name:"__call__",anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__",parameters:[{name:"pixel_values",val:": ndarray"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L583",parametersDescription:[{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.pixel_values",description:`<strong>pixel_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using the vision model&#x2019;s feature extractor. For example, using <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor">ViTFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/vit#transformers.ViTFeatureExtractor.__call__">ViTFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a>`,name:"decoder_input_ids"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.decoder.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, the model will return a <code>FlaxSeq2SeqLMOutput</code> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig" >VisionEncoderDecoderConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ye=new Vd({props:{$$slots:{default:[lc]},$$scope:{ctx:Q}}}),uo=new yo({props:{code:`from transformers import FlaxVisionEncoderDecoderModel, ViTFeatureExtractor, GPT2Tokenizer from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") # load output tokenizer tokenizer_output = GPT2Tokenizer.from_pretrained("gpt2") # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "gpt2" ) pixel_values = feature_extractor(images=image, return_tensors="np").pixel_values # use GPT2's eos_token as the pad as well as eos token model.config.eos_token_id = model.config.decoder.eos_token_id model.config.pad_token_id = model.config.eos_token_id # generation sequences = model.generate(pixel_values, num_beams=4, max_length=12).sequences captions = tokenizer_output.batch_decode(sequences, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxVisionEncoderDecoderModel, ViTFeatureExtractor, GPT2Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = ViTFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load output tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer_output = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;gpt2&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># use GPT2&#x27;s eos_token as the pad as well as eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.eos_token_id = model.config.decoder.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generation</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sequences = model.generate(pixel_values, num_beams=<span class="hljs-number">4</span>, max_length=<span class="hljs-number">12</span>).sequences <span class="hljs-meta">&gt;&gt;&gt; </span>captions = tokenizer_output.batch_decode(sequences, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),go=new B({props:{name:"from_encoder_decoder_pretrained",anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained",parameters:[{name:"encoder_pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType] = None"},{name:"decoder_pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType] = None"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py#L708",parametersDescription:[{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.encoder_pretrained_model_name_or_path",description:`<strong>encoder_pretrained_model_name_or_path</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014; Information necessary to initiate the encoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. An example is <code>google/vit-base-patch16-224-in21k</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul>`,name:"encoder_pretrained_model_name_or_path"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.decoder_pretrained_model_name_or_path",description:`<strong>decoder_pretrained_model_name_or_path</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; Information necessary to initiate the decoder. Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> </ul>`,name:"decoder_pretrained_model_name_or_path"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.model_args",description:`<strong>model_args</strong> (remaining positional arguments, <em>optional</em>) &#x2014; All remaning positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>).</p> <ul> <li>To update the encoder configuration, use the prefix <em>encoder_</em> for each configuration parameter.</li> <li>To update the decoder configuration, use the prefix <em>decoder_</em> for each configuration parameter.</li> <li>To update the parent model configuration, do not use a prefix for each configuration parameter.</li> </ul> <p>Behaves differently depending on whether a <code>config</code> is provided or automatically loaded.`,name:"kwargs"}]}}),_o=new yo({props:{code:`from transformers import FlaxVisionEncoderDecoderModel # initialize a vit-gpt2 from a pretrained ViT and a pretrained GPT2 model. Note that the cross-attention layers will be randomly initialized model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "gpt2" ) # saving model after fine-tuning model.save_pretrained("./vit-gpt2") # load fine-tuned model model = FlaxVisionEncoderDecoderModel.from_pretrained("./vit-gpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxVisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># initialize a vit-gpt2 from a pretrained ViT and a pretrained GPT2 model. Note that the cross-attention layers will be randomly initialized</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-string">&quot;gpt2&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># saving model after fine-tuning</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./vit-gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load fine-tuned model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxVisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;./vit-gpt2&quot;</span>)`}}),{c(){p=r("meta"),P=c(),m=r("h1"),M=r("a"),F=r("span"),u(y.$$.fragment),j=c(),C=r("span"),Vn=t("Vision Encoder Decoder Models"),Jt=c(),f=r("p"),$n=t("The "),Eo=r("a"),qn=t("VisionEncoderDecoderModel"),zn=t(` can be used to initialize an image-to-text-sequence model with any pretrained Transformer-based vision autoencoding model as the encoder (`),rt=r("em"),Pn=t("e.g."),Fn=c(),ko=r("a"),Cn=t("ViT"),An=t(", "),xo=r("a"),Sn=t("BEiT"),In=t(", "),jo=r("a"),Ln=t("DeiT"),Nn=t(", "),Do=r("a"),On=t("Swin"),Rn=t(`) and any pretrained language model as the decoder (`),at=r("em"),Bn=t("e.g."),Gn=c(),Mo=r("a"),Wn=t("RoBERTa"),Un=t(", "),Vo=r("a"),Hn=t("GPT2"),Zn=t(", "),$o=r("a"),Yn=t("BERT"),Jn=t(", "),qo=r("a"),Kn=t("DistilBERT"),Qn=t(")."),Kt=c(),he=r("p"),Xn=t(`The effectiveness of initializing image-to-text-sequence models with pretrained checkpoints has been shown in (for example) `),xe=r("a"),er=t("TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),or=t(` by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.`),Qt=c(),G=r("p"),tr=t("An example of how to use a "),zo=r("a"),nr=t("VisionEncoderDecoderModel"),rr=t(" for inference can be seen in "),Po=r("a"),ar=t("TrOCR"),sr=t("."),Xt=c(),X=r("h2"),me=r("a"),st=r("span"),u(je.$$.fragment),dr=c(),dt=r("span"),ir=t("VisionEncoderDecoderConfig"),en=c(),z=r("div"),u(De.$$.fragment),cr=c(),fe=r("p"),Fo=r("a"),lr=t("VisionEncoderDecoderConfig"),pr=t(` is the configuration class to store the configuration of a `),Co=r("a"),hr=t("VisionEncoderDecoderModel"),mr=t(`. It is used to instantiate a Vision-Encoder-Text-Decoder model according to the specified arguments, defining the encoder and decoder configs.`),fr=c(),ee=r("p"),ur=t("Configuration objects inherit from "),Ao=r("a"),gr=t("PretrainedConfig"),_r=t(` and can be used to control the model outputs. Read the documentation from `),So=r("a"),vr=t("PretrainedConfig"),br=t(" for more information."),Tr=c(),it=r("p"),wr=t("Examples:"),yr=c(),u(Me.$$.fragment),Er=c(),ue=r("div"),u(Ve.$$.fragment),kr=c(),$e=r("p"),xr=t("Instantiate a "),Io=r("a"),jr=t("VisionEncoderDecoderConfig"),Dr=t(` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),Mr=c(),ge=r("div"),u(qe.$$.fragment),Vr=c(),oe=r("p"),$r=t("Serializes this instance to a Python dictionary. Override the default "),ct=r("em"),qr=t("to_dict()"),zr=t(" from "),lt=r("em"),Pr=t("PretrainedConfig"),Fr=t("."),on=c(),te=r("h2"),_e=r("a"),pt=r("span"),u(ze.$$.fragment),Cr=c(),ht=r("span"),Ar=t("VisionEncoderDecoderModel"),tn=c(),E=r("div"),u(Pe.$$.fragment),Sr=c(),ne=r("p"),Ir=t(`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),mt=r("code"),Lr=t("from_pretrained()"),Nr=t("function and the decoder is loaded via "),ft=r("code"),Or=t("from_pretrained()"),Rr=t(`function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),Br=c(),Fe=r("p"),Gr=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Ce=r("a"),Wr=t(`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),Ur=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Hr=c(),Ae=r("p"),Zr=t("Additionally, in "),Se=r("a"),Yr=t(`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),Jr=t(` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),Kr=c(),ut=r("p"),Qr=t(`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Xr=c(),Ie=r("p"),ea=t("This model inherits from "),Lo=r("a"),oa=t("PreTrainedModel"),ta=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),na=c(),Le=r("p"),ra=t("This model is also a PyTorch "),Ne=r("a"),aa=t("torch.nn.Module"),sa=t(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),da=c(),W=r("p"),No=r("a"),ia=t("VisionEncoderDecoderModel"),ca=t(` is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one as decoder when created with the :meth`),gt=r("em"),la=t("~transformers.AutoModel.from_pretrained"),pa=t(` class method for the encoder and :meth`),_t=r("em"),ha=t("~transformers.AutoModelForCausalLM.from_pretrained"),ma=t(" class method for the decoder."),fa=c(),A=r("div"),u(Oe.$$.fragment),ua=c(),re=r("p"),ga=t("The "),Oo=r("a"),_a=t("VisionEncoderDecoderModel"),va=t(" forward method, overrides the "),vt=r("code"),ba=t("__call__"),Ta=t(" special method."),wa=c(),u(ve.$$.fragment),ya=c(),bt=r("p"),Ea=t("Examples:"),ka=c(),u(Re.$$.fragment),xa=c(),S=r("div"),u(Be.$$.fragment),ja=c(),Tt=r("p"),Da=t(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),Ma=c(),ae=r("p"),Va=t("The model is set in evaluation mode by default using "),wt=r("code"),$a=t("model.eval()"),qa=t(` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),yt=r("code"),za=t("model.train()"),Pa=t("."),Fa=c(),Et=r("p"),Ca=t("Example:"),Aa=c(),u(Ge.$$.fragment),nn=c(),se=r("h2"),be=r("a"),kt=r("span"),u(We.$$.fragment),Sa=c(),xt=r("span"),Ia=t("TFVisionEncoderDecoderModel"),rn=c(),k=r("div"),u(Ue.$$.fragment),La=c(),de=r("p"),Na=t(`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),jt=r("code"),Oa=t("from_pretrained()"),Ra=t("function and the decoder is loaded via "),Dt=r("code"),Ba=t("from_pretrained()"),Ga=t(`function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),Wa=c(),He=r("p"),Ua=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Ze=r("a"),Ha=t(`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),Za=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Ya=c(),Ye=r("p"),Ja=t("Additionally, in "),Je=r("a"),Ka=t(`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),Qa=t(` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),Xa=c(),Mt=r("p"),es=t(`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),os=c(),Ke=r("p"),ts=t("This model inherits from "),Ro=r("a"),ns=t("TFPreTrainedModel"),rs=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),as=c(),Qe=r("p"),ss=t("This model is also a "),Xe=r("a"),ds=t("tf.keras.Model"),is=t(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),cs=c(),U=r("p"),Bo=r("a"),ls=t("TFVisionEncoderDecoderModel"),ps=t(` is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one of the base model classes as decoder when created with the `),Vt=r("code"),hs=t("from_pretrained()"),ms=t(`class method for the encoder and `),$t=r("code"),fs=t("from_pretrained()"),us=t("class method for the decoder."),gs=c(),I=r("div"),u(eo.$$.fragment),_s=c(),ie=r("p"),vs=t("The "),Go=r("a"),bs=t("TFVisionEncoderDecoderModel"),Ts=t(" forward method, overrides the "),qt=r("code"),ws=t("__call__"),ys=t(" special method."),Es=c(),u(Te.$$.fragment),ks=c(),zt=r("p"),xs=t("Examples:"),js=c(),u(oo.$$.fragment),Ds=c(),O=r("div"),u(to.$$.fragment),Ms=c(),Pt=r("p"),Vs=t(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),$s=c(),Ft=r("p"),qs=t("Example:"),zs=c(),u(no.$$.fragment),an=c(),ce=r("h2"),we=r("a"),Ct=r("span"),u(ro.$$.fragment),Ps=c(),At=r("span"),Fs=t("FlaxVisionEncoderDecoderModel"),sn=c(),x=r("div"),u(ao.$$.fragment),Cs=c(),le=r("p"),As=t(`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),St=r("code"),Ss=t("from_pretrained()"),Is=t("function and the decoder is loaded via "),It=r("code"),Ls=t("from_pretrained()"),Ns=t(`function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),Os=c(),so=r("p"),Rs=t(`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),io=r("a"),Bs=t(`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),Gs=t(` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Ws=c(),co=r("p"),Us=t("Additionally, in "),lo=r("a"),Hs=t(`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),Zs=t(` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),Ys=c(),Lt=r("p"),Js=t(`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Ks=c(),po=r("p"),Qs=t("This model inherits from "),Wo=r("a"),Xs=t("FlaxPreTrainedModel"),ed=t(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),od=c(),ho=r("p"),td=t(`This model is also a Flax Linen `),mo=r("a"),nd=t("flax.nn.Module"),rd=t(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),ad=c(),H=r("p"),Uo=r("a"),sd=t("FlaxVisionEncoderDecoderModel"),dd=t(` is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base vision model classes of the library as encoder module and another one as decoder module when created with the :meth`),Nt=r("em"),id=t("~transformers.FlaxAutoModel.from_pretrained"),cd=t(` class method for the encoder and :meth`),Ot=r("em"),ld=t("~transformers.FlaxAutoModelForCausalLM.from_pretrained"),pd=t(" class method for the decoder."),hd=c(),L=r("div"),u(fo.$$.fragment),md=c(),pe=r("p"),fd=t("The "),Ho=r("a"),ud=t("FlaxVisionEncoderDecoderModel"),gd=t(" forward method, overrides the "),Rt=r("code"),_d=t("__call__"),vd=t(" special method."),bd=c(),u(ye.$$.fragment),Td=c(),Bt=r("p"),wd=t("Examples:"),yd=c(),u(uo.$$.fragment),Ed=c(),R=r("div"),u(go.$$.fragment),kd=c(),Gt=r("p"),xd=t(`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),jd=c(),Wt=r("p"),Dd=t("Example:"),Md=c(),u(_o.$$.fragment),this.h()},l(d){const h=dc('[data-svelte="svelte-1phssyn"]',document.head);p=a(h,"META",{name:!0,content:!0}),h.forEach(o),P=l(d),m=a(d,"H1",{class:!0});var vo=s(m);M=a(vo,"A",{id:!0,class:!0,href:!0});var Ut=s(M);F=a(Ut,"SPAN",{});var Ht=s(F);g(y.$$.fragment,Ht),Ht.forEach(o),Ut.forEach(o),j=l(vo),C=a(vo,"SPAN",{});var $d=s(C);Vn=n($d,"Vision Encoder Decoder Models"),$d.forEach(o),vo.forEach(o),Jt=l(d),f=a(d,"P",{});var D=s(f);$n=n(D,"The "),Eo=a(D,"A",{href:!0});var qd=s(Eo);qn=n(qd,"VisionEncoderDecoderModel"),qd.forEach(o),zn=n(D,` can be used to initialize an image-to-text-sequence model with any pretrained Transformer-based vision autoencoding model as the encoder (`),rt=a(D,"EM",{});var zd=s(rt);Pn=n(zd,"e.g."),zd.forEach(o),Fn=l(D),ko=a(D,"A",{href:!0});var Pd=s(ko);Cn=n(Pd,"ViT"),Pd.forEach(o),An=n(D,", "),xo=a(D,"A",{href:!0});var Fd=s(xo);Sn=n(Fd,"BEiT"),Fd.forEach(o),In=n(D,", "),jo=a(D,"A",{href:!0});var Cd=s(jo);Ln=n(Cd,"DeiT"),Cd.forEach(o),Nn=n(D,", "),Do=a(D,"A",{href:!0});var Ad=s(Do);On=n(Ad,"Swin"),Ad.forEach(o),Rn=n(D,`) and any pretrained language model as the decoder (`),at=a(D,"EM",{});var Sd=s(at);Bn=n(Sd,"e.g."),Sd.forEach(o),Gn=l(D),Mo=a(D,"A",{href:!0});var Id=s(Mo);Wn=n(Id,"RoBERTa"),Id.forEach(o),Un=n(D,", "),Vo=a(D,"A",{href:!0});var Ld=s(Vo);Hn=n(Ld,"GPT2"),Ld.forEach(o),Zn=n(D,", "),$o=a(D,"A",{href:!0});var Nd=s($o);Yn=n(Nd,"BERT"),Nd.forEach(o),Jn=n(D,", "),qo=a(D,"A",{href:!0});var Od=s(qo);Kn=n(Od,"DistilBERT"),Od.forEach(o),Qn=n(D,")."),D.forEach(o),Kt=l(d),he=a(d,"P",{});var cn=s(he);Xn=n(cn,`The effectiveness of initializing image-to-text-sequence models with pretrained checkpoints has been shown in (for example) `),xe=a(cn,"A",{href:!0,rel:!0});var Rd=s(xe);er=n(Rd,"TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Rd.forEach(o),or=n(cn,` by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.`),cn.forEach(o),Qt=l(d),G=a(d,"P",{});var Zo=s(G);tr=n(Zo,"An example of how to use a "),zo=a(Zo,"A",{href:!0});var Bd=s(zo);nr=n(Bd,"VisionEncoderDecoderModel"),Bd.forEach(o),rr=n(Zo," for inference can be seen in "),Po=a(Zo,"A",{href:!0});var Gd=s(Po);ar=n(Gd,"TrOCR"),Gd.forEach(o),sr=n(Zo,"."),Zo.forEach(o),Xt=l(d),X=a(d,"H2",{class:!0});var ln=s(X);me=a(ln,"A",{id:!0,class:!0,href:!0});var Wd=s(me);st=a(Wd,"SPAN",{});var Ud=s(st);g(je.$$.fragment,Ud),Ud.forEach(o),Wd.forEach(o),dr=l(ln),dt=a(ln,"SPAN",{});var Hd=s(dt);ir=n(Hd,"VisionEncoderDecoderConfig"),Hd.forEach(o),ln.forEach(o),en=l(d),z=a(d,"DIV",{class:!0});var N=s(z);g(De.$$.fragment,N),cr=l(N),fe=a(N,"P",{});var Zt=s(fe);Fo=a(Zt,"A",{href:!0});var Zd=s(Fo);lr=n(Zd,"VisionEncoderDecoderConfig"),Zd.forEach(o),pr=n(Zt,` is the configuration class to store the configuration of a `),Co=a(Zt,"A",{href:!0});var Yd=s(Co);hr=n(Yd,"VisionEncoderDecoderModel"),Yd.forEach(o),mr=n(Zt,`. It is used to instantiate a Vision-Encoder-Text-Decoder model according to the specified arguments, defining the encoder and decoder configs.`),Zt.forEach(o),fr=l(N),ee=a(N,"P",{});var Yo=s(ee);ur=n(Yo,"Configuration objects inherit from "),Ao=a(Yo,"A",{href:!0});var Jd=s(Ao);gr=n(Jd,"PretrainedConfig"),Jd.forEach(o),_r=n(Yo,` and can be used to control the model outputs. Read the documentation from `),So=a(Yo,"A",{href:!0});var Kd=s(So);vr=n(Kd,"PretrainedConfig"),Kd.forEach(o),br=n(Yo," for more information."),Yo.forEach(o),Tr=l(N),it=a(N,"P",{});var Qd=s(it);wr=n(Qd,"Examples:"),Qd.forEach(o),yr=l(N),g(Me.$$.fragment,N),Er=l(N),ue=a(N,"DIV",{class:!0});var pn=s(ue);g(Ve.$$.fragment,pn),kr=l(pn),$e=a(pn,"P",{});var hn=s($e);xr=n(hn,"Instantiate a "),Io=a(hn,"A",{href:!0});var Xd=s(Io);jr=n(Xd,"VisionEncoderDecoderConfig"),Xd.forEach(o),Dr=n(hn,` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration.`),hn.forEach(o),pn.forEach(o),Mr=l(N),ge=a(N,"DIV",{class:!0});var mn=s(ge);g(qe.$$.fragment,mn),Vr=l(mn),oe=a(mn,"P",{});var Jo=s(oe);$r=n(Jo,"Serializes this instance to a Python dictionary. Override the default "),ct=a(Jo,"EM",{});var ei=s(ct);qr=n(ei,"to_dict()"),ei.forEach(o),zr=n(Jo," from "),lt=a(Jo,"EM",{});var oi=s(lt);Pr=n(oi,"PretrainedConfig"),oi.forEach(o),Fr=n(Jo,"."),Jo.forEach(o),mn.forEach(o),N.forEach(o),on=l(d),te=a(d,"H2",{class:!0});var fn=s(te);_e=a(fn,"A",{id:!0,class:!0,href:!0});var ti=s(_e);pt=a(ti,"SPAN",{});var ni=s(pt);g(ze.$$.fragment,ni),ni.forEach(o),ti.forEach(o),Cr=l(fn),ht=a(fn,"SPAN",{});var ri=s(ht);Ar=n(ri,"VisionEncoderDecoderModel"),ri.forEach(o),fn.forEach(o),tn=l(d),E=a(d,"DIV",{class:!0});var V=s(E);g(Pe.$$.fragment,V),Sr=l(V),ne=a(V,"P",{});var Ko=s(ne);Ir=n(Ko,`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),mt=a(Ko,"CODE",{});var ai=s(mt);Lr=n(ai,"from_pretrained()"),ai.forEach(o),Nr=n(Ko,"function and the decoder is loaded via "),ft=a(Ko,"CODE",{});var si=s(ft);Or=n(si,"from_pretrained()"),si.forEach(o),Rr=n(Ko,`function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),Ko.forEach(o),Br=l(V),Fe=a(V,"P",{});var un=s(Fe);Gr=n(un,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Ce=a(un,"A",{href:!0,rel:!0});var di=s(Ce);Wr=n(di,`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),di.forEach(o),Ur=n(un,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),un.forEach(o),Hr=l(V),Ae=a(V,"P",{});var gn=s(Ae);Zr=n(gn,"Additionally, in "),Se=a(gn,"A",{href:!0,rel:!0});var ii=s(Se);Yr=n(ii,`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),ii.forEach(o),Jr=n(gn,` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),gn.forEach(o),Kr=l(V),ut=a(V,"P",{});var ci=s(ut);Qr=n(ci,`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),ci.forEach(o),Xr=l(V),Ie=a(V,"P",{});var _n=s(Ie);ea=n(_n,"This model inherits from "),Lo=a(_n,"A",{href:!0});var li=s(Lo);oa=n(li,"PreTrainedModel"),li.forEach(o),ta=n(_n,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_n.forEach(o),na=l(V),Le=a(V,"P",{});var vn=s(Le);ra=n(vn,"This model is also a PyTorch "),Ne=a(vn,"A",{href:!0,rel:!0});var pi=s(Ne);aa=n(pi,"torch.nn.Module"),pi.forEach(o),sa=n(vn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vn.forEach(o),da=l(V),W=a(V,"P",{});var bo=s(W);No=a(bo,"A",{href:!0});var hi=s(No);ia=n(hi,"VisionEncoderDecoderModel"),hi.forEach(o),ca=n(bo,` is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one as decoder when created with the :meth`),gt=a(bo,"EM",{});var mi=s(gt);la=n(mi,"~transformers.AutoModel.from_pretrained"),mi.forEach(o),pa=n(bo,` class method for the encoder and :meth`),_t=a(bo,"EM",{});var fi=s(_t);ha=n(fi,"~transformers.AutoModelForCausalLM.from_pretrained"),fi.forEach(o),ma=n(bo," class method for the decoder."),bo.forEach(o),fa=l(V),A=a(V,"DIV",{class:!0});var Z=s(A);g(Oe.$$.fragment,Z),ua=l(Z),re=a(Z,"P",{});var Qo=s(re);ga=n(Qo,"The "),Oo=a(Qo,"A",{href:!0});var ui=s(Oo);_a=n(ui,"VisionEncoderDecoderModel"),ui.forEach(o),va=n(Qo," forward method, overrides the "),vt=a(Qo,"CODE",{});var gi=s(vt);ba=n(gi,"__call__"),gi.forEach(o),Ta=n(Qo," special method."),Qo.forEach(o),wa=l(Z),g(ve.$$.fragment,Z),ya=l(Z),bt=a(Z,"P",{});var _i=s(bt);Ea=n(_i,"Examples:"),_i.forEach(o),ka=l(Z),g(Re.$$.fragment,Z),Z.forEach(o),xa=l(V),S=a(V,"DIV",{class:!0});var Y=s(S);g(Be.$$.fragment,Y),ja=l(Y),Tt=a(Y,"P",{});var vi=s(Tt);Da=n(vi,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),vi.forEach(o),Ma=l(Y),ae=a(Y,"P",{});var Xo=s(ae);Va=n(Xo,"The model is set in evaluation mode by default using "),wt=a(Xo,"CODE",{});var bi=s(wt);$a=n(bi,"model.eval()"),bi.forEach(o),qa=n(Xo,` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `),yt=a(Xo,"CODE",{});var Ti=s(yt);za=n(Ti,"model.train()"),Ti.forEach(o),Pa=n(Xo,"."),Xo.forEach(o),Fa=l(Y),Et=a(Y,"P",{});var wi=s(Et);Ca=n(wi,"Example:"),wi.forEach(o),Aa=l(Y),g(Ge.$$.fragment,Y),Y.forEach(o),V.forEach(o),nn=l(d),se=a(d,"H2",{class:!0});var bn=s(se);be=a(bn,"A",{id:!0,class:!0,href:!0});var yi=s(be);kt=a(yi,"SPAN",{});var Ei=s(kt);g(We.$$.fragment,Ei),Ei.forEach(o),yi.forEach(o),Sa=l(bn),xt=a(bn,"SPAN",{});var ki=s(xt);Ia=n(ki,"TFVisionEncoderDecoderModel"),ki.forEach(o),bn.forEach(o),rn=l(d),k=a(d,"DIV",{class:!0});var $=s(k);g(Ue.$$.fragment,$),La=l($),de=a($,"P",{});var et=s(de);Na=n(et,`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),jt=a(et,"CODE",{});var xi=s(jt);Oa=n(xi,"from_pretrained()"),xi.forEach(o),Ra=n(et,"function and the decoder is loaded via "),Dt=a(et,"CODE",{});var ji=s(Dt);Ba=n(ji,"from_pretrained()"),ji.forEach(o),Ga=n(et,`function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),et.forEach(o),Wa=l($),He=a($,"P",{});var Tn=s(He);Ua=n(Tn,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),Ze=a(Tn,"A",{href:!0,rel:!0});var Di=s(Ze);Ha=n(Di,`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),Di.forEach(o),Za=n(Tn,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),Tn.forEach(o),Ya=l($),Ye=a($,"P",{});var wn=s(Ye);Ja=n(wn,"Additionally, in "),Je=a(wn,"A",{href:!0,rel:!0});var Mi=s(Je);Ka=n(Mi,`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),Mi.forEach(o),Qa=n(wn,` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),wn.forEach(o),Xa=l($),Mt=a($,"P",{});var Vi=s(Mt);es=n(Vi,`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Vi.forEach(o),os=l($),Ke=a($,"P",{});var yn=s(Ke);ts=n(yn,"This model inherits from "),Ro=a(yn,"A",{href:!0});var $i=s(Ro);ns=n($i,"TFPreTrainedModel"),$i.forEach(o),rs=n(yn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yn.forEach(o),as=l($),Qe=a($,"P",{});var En=s(Qe);ss=n(En,"This model is also a "),Xe=a(En,"A",{href:!0,rel:!0});var qi=s(Xe);ds=n(qi,"tf.keras.Model"),qi.forEach(o),is=n(En,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),En.forEach(o),cs=l($),U=a($,"P",{});var To=s(U);Bo=a(To,"A",{href:!0});var zi=s(Bo);ls=n(zi,"TFVisionEncoderDecoderModel"),zi.forEach(o),ps=n(To,` is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one of the base model classes as decoder when created with the `),Vt=a(To,"CODE",{});var Pi=s(Vt);hs=n(Pi,"from_pretrained()"),Pi.forEach(o),ms=n(To,`class method for the encoder and `),$t=a(To,"CODE",{});var Fi=s($t);fs=n(Fi,"from_pretrained()"),Fi.forEach(o),us=n(To,"class method for the decoder."),To.forEach(o),gs=l($),I=a($,"DIV",{class:!0});var J=s(I);g(eo.$$.fragment,J),_s=l(J),ie=a(J,"P",{});var ot=s(ie);vs=n(ot,"The "),Go=a(ot,"A",{href:!0});var Ci=s(Go);bs=n(Ci,"TFVisionEncoderDecoderModel"),Ci.forEach(o),Ts=n(ot," forward method, overrides the "),qt=a(ot,"CODE",{});var Ai=s(qt);ws=n(Ai,"__call__"),Ai.forEach(o),ys=n(ot," special method."),ot.forEach(o),Es=l(J),g(Te.$$.fragment,J),ks=l(J),zt=a(J,"P",{});var Si=s(zt);xs=n(Si,"Examples:"),Si.forEach(o),js=l(J),g(oo.$$.fragment,J),J.forEach(o),Ds=l($),O=a($,"DIV",{class:!0});var Ee=s(O);g(to.$$.fragment,Ee),Ms=l(Ee),Pt=a(Ee,"P",{});var Ii=s(Pt);Vs=n(Ii,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),Ii.forEach(o),$s=l(Ee),Ft=a(Ee,"P",{});var Li=s(Ft);qs=n(Li,"Example:"),Li.forEach(o),zs=l(Ee),g(no.$$.fragment,Ee),Ee.forEach(o),$.forEach(o),an=l(d),ce=a(d,"H2",{class:!0});var kn=s(ce);we=a(kn,"A",{id:!0,class:!0,href:!0});var Ni=s(we);Ct=a(Ni,"SPAN",{});var Oi=s(Ct);g(ro.$$.fragment,Oi),Oi.forEach(o),Ni.forEach(o),Ps=l(kn),At=a(kn,"SPAN",{});var Ri=s(At);Fs=n(Ri,"FlaxVisionEncoderDecoderModel"),Ri.forEach(o),kn.forEach(o),sn=l(d),x=a(d,"DIV",{class:!0});var q=s(x);g(ao.$$.fragment,q),Cs=l(q),le=a(q,"P",{});var tt=s(le);As=n(tt,`This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via `),St=a(tt,"CODE",{});var Bi=s(St);Ss=n(Bi,"from_pretrained()"),Bi.forEach(o),Is=n(tt,"function and the decoder is loaded via "),It=a(tt,"CODE",{});var Gi=s(It);Ls=n(Gi,"from_pretrained()"),Gi.forEach(o),Ns=n(tt,`function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning.`),tt.forEach(o),Os=l(q),so=a(q,"P",{});var xn=s(so);Rs=n(xn,`The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in `),io=a(xn,"A",{href:!0,rel:!0});var Wi=s(io);Bs=n(Wi,`Leveraging Pre-trained Checkpoints for Sequence Generation Tasks`),Wi.forEach(o),Gs=n(xn,` by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.`),xn.forEach(o),Ws=l(q),co=a(q,"P",{});var jn=s(co);Us=n(jn,"Additionally, in "),lo=a(jn,"A",{href:!0,rel:!0});var Ui=s(lo);Hs=n(Ui,`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),Ui.forEach(o),Zs=n(jn,` it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement.`),jn.forEach(o),Ys=l(q),Lt=a(q,"P",{});var Hi=s(Lt);Js=n(Hi,`After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information).`),Hi.forEach(o),Ks=l(q),po=a(q,"P",{});var Dn=s(po);Qs=n(Dn,"This model inherits from "),Wo=a(Dn,"A",{href:!0});var Zi=s(Wo);Xs=n(Zi,"FlaxPreTrainedModel"),Zi.forEach(o),ed=n(Dn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Dn.forEach(o),od=l(q),ho=a(q,"P",{});var Mn=s(ho);td=n(Mn,`This model is also a Flax Linen `),mo=a(Mn,"A",{href:!0,rel:!0});var Yi=s(mo);nd=n(Yi,"flax.nn.Module"),Yi.forEach(o),rd=n(Mn,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Mn.forEach(o),ad=l(q),H=a(q,"P",{});var wo=s(H);Uo=a(wo,"A",{href:!0});var Ji=s(Uo);sd=n(Ji,"FlaxVisionEncoderDecoderModel"),Ji.forEach(o),dd=n(wo,` is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base vision model classes of the library as encoder module and another one as decoder module when created with the :meth`),Nt=a(wo,"EM",{});var Ki=s(Nt);id=n(Ki,"~transformers.FlaxAutoModel.from_pretrained"),Ki.forEach(o),cd=n(wo,` class method for the encoder and :meth`),Ot=a(wo,"EM",{});var Qi=s(Ot);ld=n(Qi,"~transformers.FlaxAutoModelForCausalLM.from_pretrained"),Qi.forEach(o),pd=n(wo," class method for the decoder."),wo.forEach(o),hd=l(q),L=a(q,"DIV",{class:!0});var K=s(L);g(fo.$$.fragment,K),md=l(K),pe=a(K,"P",{});var nt=s(pe);fd=n(nt,"The "),Ho=a(nt,"A",{href:!0});var Xi=s(Ho);ud=n(Xi,"FlaxVisionEncoderDecoderModel"),Xi.forEach(o),gd=n(nt," forward method, overrides the "),Rt=a(nt,"CODE",{});var ec=s(Rt);_d=n(ec,"__call__"),ec.forEach(o),vd=n(nt," special method."),nt.forEach(o),bd=l(K),g(ye.$$.fragment,K),Td=l(K),Bt=a(K,"P",{});var oc=s(Bt);wd=n(oc,"Examples:"),oc.forEach(o),yd=l(K),g(uo.$$.fragment,K),K.forEach(o),Ed=l(q),R=a(q,"DIV",{class:!0});var ke=s(R);g(go.$$.fragment,ke),kd=l(ke),Gt=a(ke,"P",{});var tc=s(Gt);xd=n(tc,`Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints.`),tc.forEach(o),jd=l(ke),Wt=a(ke,"P",{});var nc=s(Wt);Dd=n(nc,"Example:"),nc.forEach(o),Md=l(ke),g(_o.$$.fragment,ke),ke.forEach(o),q.forEach(o),this.h()},h(){i(p,"name","hf:doc:metadata"),i(p,"content",JSON.stringify(hc)),i(M,"id","vision-encoder-decoder-models"),i(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(M,"href","#vision-encoder-decoder-models"),i(m,"class","relative group"),i(Eo,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel"),i(ko,"href","vit"),i(xo,"href","beit"),i(jo,"href","deit"),i(Do,"href","swin"),i(Mo,"href","roberta"),i(Vo,"href","gpt2"),i($o,"href","bert"),i(qo,"href","distilbert"),i(xe,"href","https://arxiv.org/abs/2109.10282"),i(xe,"rel","nofollow"),i(zo,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel"),i(Po,"href","trocr"),i(me,"id","transformers.VisionEncoderDecoderConfig"),i(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(me,"href","#transformers.VisionEncoderDecoderConfig"),i(X,"class","relative group"),i(Fo,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig"),i(Co,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel"),i(Ao,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),i(So,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),i(Io,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderConfig"),i(ue,"class","docstring"),i(ge,"class","docstring"),i(z,"class","docstring"),i(_e,"id","transformers.VisionEncoderDecoderModel"),i(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(_e,"href","#transformers.VisionEncoderDecoderModel"),i(te,"class","relative group"),i(Ce,"href","https://arxiv.org/abs/1907.12461"),i(Ce,"rel","nofollow"),i(Se,"href","https://arxiv.org/abs/2109.10282"),i(Se,"rel","nofollow"),i(Lo,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),i(Ne,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),i(Ne,"rel","nofollow"),i(No,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel"),i(Oo,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel"),i(A,"class","docstring"),i(S,"class","docstring"),i(E,"class","docstring"),i(be,"id","transformers.TFVisionEncoderDecoderModel"),i(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(be,"href","#transformers.TFVisionEncoderDecoderModel"),i(se,"class","relative group"),i(Ze,"href","https://arxiv.org/abs/1907.12461"),i(Ze,"rel","nofollow"),i(Je,"href","https://arxiv.org/abs/2109.10282"),i(Je,"rel","nofollow"),i(Ro,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),i(Xe,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),i(Xe,"rel","nofollow"),i(Bo,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.TFVisionEncoderDecoderModel"),i(Go,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.TFVisionEncoderDecoderModel"),i(I,"class","docstring"),i(O,"class","docstring"),i(k,"class","docstring"),i(we,"id","transformers.FlaxVisionEncoderDecoderModel"),i(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(we,"href","#transformers.FlaxVisionEncoderDecoderModel"),i(ce,"class","relative group"),i(io,"href","https://arxiv.org/abs/1907.12461"),i(io,"rel","nofollow"),i(lo,"href","https://arxiv.org/abs/2109.10282"),i(lo,"rel","nofollow"),i(Wo,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),i(mo,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),i(mo,"rel","nofollow"),i(Uo,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.FlaxVisionEncoderDecoderModel"),i(Ho,"href","/docs/transformers/pr_16143/en/model_doc/vision-encoder-decoder#transformers.FlaxVisionEncoderDecoderModel"),i(L,"class","docstring"),i(R,"class","docstring"),i(x,"class","docstring")},m(d,h){e(document.head,p),w(d,P,h),w(d,m,h),e(m,M),e(M,F),_(y,F,null),e(m,j),e(m,C),e(C,Vn),w(d,Jt,h),w(d,f,h),e(f,$n),e(f,Eo),e(Eo,qn),e(f,zn),e(f,rt),e(rt,Pn),e(f,Fn),e(f,ko),e(ko,Cn),e(f,An),e(f,xo),e(xo,Sn),e(f,In),e(f,jo),e(jo,Ln),e(f,Nn),e(f,Do),e(Do,On),e(f,Rn),e(f,at),e(at,Bn),e(f,Gn),e(f,Mo),e(Mo,Wn),e(f,Un),e(f,Vo),e(Vo,Hn),e(f,Zn),e(f,$o),e($o,Yn),e(f,Jn),e(f,qo),e(qo,Kn),e(f,Qn),w(d,Kt,h),w(d,he,h),e(he,Xn),e(he,xe),e(xe,er),e(he,or),w(d,Qt,h),w(d,G,h),e(G,tr),e(G,zo),e(zo,nr),e(G,rr),e(G,Po),e(Po,ar),e(G,sr),w(d,Xt,h),w(d,X,h),e(X,me),e(me,st),_(je,st,null),e(X,dr),e(X,dt),e(dt,ir),w(d,en,h),w(d,z,h),_(De,z,null),e(z,cr),e(z,fe),e(fe,Fo),e(Fo,lr),e(fe,pr),e(fe,Co),e(Co,hr),e(fe,mr),e(z,fr),e(z,ee),e(ee,ur),e(ee,Ao),e(Ao,gr),e(ee,_r),e(ee,So),e(So,vr),e(ee,br),e(z,Tr),e(z,it),e(it,wr),e(z,yr),_(Me,z,null),e(z,Er),e(z,ue),_(Ve,ue,null),e(ue,kr),e(ue,$e),e($e,xr),e($e,Io),e(Io,jr),e($e,Dr),e(z,Mr),e(z,ge),_(qe,ge,null),e(ge,Vr),e(ge,oe),e(oe,$r),e(oe,ct),e(ct,qr),e(oe,zr),e(oe,lt),e(lt,Pr),e(oe,Fr),w(d,on,h),w(d,te,h),e(te,_e),e(_e,pt),_(ze,pt,null),e(te,Cr),e(te,ht),e(ht,Ar),w(d,tn,h),w(d,E,h),_(Pe,E,null),e(E,Sr),e(E,ne),e(ne,Ir),e(ne,mt),e(mt,Lr),e(ne,Nr),e(ne,ft),e(ft,Or),e(ne,Rr),e(E,Br),e(E,Fe),e(Fe,Gr),e(Fe,Ce),e(Ce,Wr),e(Fe,Ur),e(E,Hr),e(E,Ae),e(Ae,Zr),e(Ae,Se),e(Se,Yr),e(Ae,Jr),e(E,Kr),e(E,ut),e(ut,Qr),e(E,Xr),e(E,Ie),e(Ie,ea),e(Ie,Lo),e(Lo,oa),e(Ie,ta),e(E,na),e(E,Le),e(Le,ra),e(Le,Ne),e(Ne,aa),e(Le,sa),e(E,da),e(E,W),e(W,No),e(No,ia),e(W,ca),e(W,gt),e(gt,la),e(W,pa),e(W,_t),e(_t,ha),e(W,ma),e(E,fa),e(E,A),_(Oe,A,null),e(A,ua),e(A,re),e(re,ga),e(re,Oo),e(Oo,_a),e(re,va),e(re,vt),e(vt,ba),e(re,Ta),e(A,wa),_(ve,A,null),e(A,ya),e(A,bt),e(bt,Ea),e(A,ka),_(Re,A,null),e(E,xa),e(E,S),_(Be,S,null),e(S,ja),e(S,Tt),e(Tt,Da),e(S,Ma),e(S,ae),e(ae,Va),e(ae,wt),e(wt,$a),e(ae,qa),e(ae,yt),e(yt,za),e(ae,Pa),e(S,Fa),e(S,Et),e(Et,Ca),e(S,Aa),_(Ge,S,null),w(d,nn,h),w(d,se,h),e(se,be),e(be,kt),_(We,kt,null),e(se,Sa),e(se,xt),e(xt,Ia),w(d,rn,h),w(d,k,h),_(Ue,k,null),e(k,La),e(k,de),e(de,Na),e(de,jt),e(jt,Oa),e(de,Ra),e(de,Dt),e(Dt,Ba),e(de,Ga),e(k,Wa),e(k,He),e(He,Ua),e(He,Ze),e(Ze,Ha),e(He,Za),e(k,Ya),e(k,Ye),e(Ye,Ja),e(Ye,Je),e(Je,Ka),e(Ye,Qa),e(k,Xa),e(k,Mt),e(Mt,es),e(k,os),e(k,Ke),e(Ke,ts),e(Ke,Ro),e(Ro,ns),e(Ke,rs),e(k,as),e(k,Qe),e(Qe,ss),e(Qe,Xe),e(Xe,ds),e(Qe,is),e(k,cs),e(k,U),e(U,Bo),e(Bo,ls),e(U,ps),e(U,Vt),e(Vt,hs),e(U,ms),e(U,$t),e($t,fs),e(U,us),e(k,gs),e(k,I),_(eo,I,null),e(I,_s),e(I,ie),e(ie,vs),e(ie,Go),e(Go,bs),e(ie,Ts),e(ie,qt),e(qt,ws),e(ie,ys),e(I,Es),_(Te,I,null),e(I,ks),e(I,zt),e(zt,xs),e(I,js),_(oo,I,null),e(k,Ds),e(k,O),_(to,O,null),e(O,Ms),e(O,Pt),e(Pt,Vs),e(O,$s),e(O,Ft),e(Ft,qs),e(O,zs),_(no,O,null),w(d,an,h),w(d,ce,h),e(ce,we),e(we,Ct),_(ro,Ct,null),e(ce,Ps),e(ce,At),e(At,Fs),w(d,sn,h),w(d,x,h),_(ao,x,null),e(x,Cs),e(x,le),e(le,As),e(le,St),e(St,Ss),e(le,Is),e(le,It),e(It,Ls),e(le,Ns),e(x,Os),e(x,so),e(so,Rs),e(so,io),e(io,Bs),e(so,Gs),e(x,Ws),e(x,co),e(co,Us),e(co,lo),e(lo,Hs),e(co,Zs),e(x,Ys),e(x,Lt),e(Lt,Js),e(x,Ks),e(x,po),e(po,Qs),e(po,Wo),e(Wo,Xs),e(po,ed),e(x,od),e(x,ho),e(ho,td),e(ho,mo),e(mo,nd),e(ho,rd),e(x,ad),e(x,H),e(H,Uo),e(Uo,sd),e(H,dd),e(H,Nt),e(Nt,id),e(H,cd),e(H,Ot),e(Ot,ld),e(H,pd),e(x,hd),e(x,L),_(fo,L,null),e(L,md),e(L,pe),e(pe,fd),e(pe,Ho),e(Ho,ud),e(pe,gd),e(pe,Rt),e(Rt,_d),e(pe,vd),e(L,bd),_(ye,L,null),e(L,Td),e(L,Bt),e(Bt,wd),e(L,yd),_(uo,L,null),e(x,Ed),e(x,R),_(go,R,null),e(R,kd),e(R,Gt),e(Gt,xd),e(R,jd),e(R,Wt),e(Wt,Dd),e(R,Md),_(_o,R,null),dn=!0},p(d,[h]){const vo={};h&2&&(vo.$$scope={dirty:h,ctx:d}),ve.$set(vo);const Ut={};h&2&&(Ut.$$scope={dirty:h,ctx:d}),Te.$set(Ut);const Ht={};h&2&&(Ht.$$scope={dirty:h,ctx:d}),ye.$set(Ht)},i(d){dn||(v(y.$$.fragment,d),v(je.$$.fragment,d),v(De.$$.fragment,d),v(Me.$$.fragment,d),v(Ve.$$.fragment,d),v(qe.$$.fragment,d),v(ze.$$.fragment,d),v(Pe.$$.fragment,d),v(Oe.$$.fragment,d),v(ve.$$.fragment,d),v(Re.$$.fragment,d),v(Be.$$.fragment,d),v(Ge.$$.fragment,d),v(We.$$.fragment,d),v(Ue.$$.fragment,d),v(eo.$$.fragment,d),v(Te.$$.fragment,d),v(oo.$$.fragment,d),v(to.$$.fragment,d),v(no.$$.fragment,d),v(ro.$$.fragment,d),v(ao.$$.fragment,d),v(fo.$$.fragment,d),v(ye.$$.fragment,d),v(uo.$$.fragment,d),v(go.$$.fragment,d),v(_o.$$.fragment,d),dn=!0)},o(d){b(y.$$.fragment,d),b(je.$$.fragment,d),b(De.$$.fragment,d),b(Me.$$.fragment,d),b(Ve.$$.fragment,d),b(qe.$$.fragment,d),b(ze.$$.fragment,d),b(Pe.$$.fragment,d),b(Oe.$$.fragment,d),b(ve.$$.fragment,d),b(Re.$$.fragment,d),b(Be.$$.fragment,d),b(Ge.$$.fragment,d),b(We.$$.fragment,d),b(Ue.$$.fragment,d),b(eo.$$.fragment,d),b(Te.$$.fragment,d),b(oo.$$.fragment,d),b(to.$$.fragment,d),b(no.$$.fragment,d),b(ro.$$.fragment,d),b(ao.$$.fragment,d),b(fo.$$.fragment,d),b(ye.$$.fragment,d),b(uo.$$.fragment,d),b(go.$$.fragment,d),b(_o.$$.fragment,d),dn=!1},d(d){o(p),d&&o(P),d&&o(m),T(y),d&&o(Jt),d&&o(f),d&&o(Kt),d&&o(he),d&&o(Qt),d&&o(G),d&&o(Xt),d&&o(X),T(je),d&&o(en),d&&o(z),T(De),T(Me),T(Ve),T(qe),d&&o(on),d&&o(te),T(ze),d&&o(tn),d&&o(E),T(Pe),T(Oe),T(ve),T(Re),T(Be),T(Ge),d&&o(nn),d&&o(se),T(We),d&&o(rn),d&&o(k),T(Ue),T(eo),T(Te),T(oo),T(to),T(no),d&&o(an),d&&o(ce),T(ro),d&&o(sn),d&&o(x),T(ao),T(fo),T(ye),T(uo),T(go),T(_o)}}}const hc={local:"vision-encoder-decoder-models",sections:[{local:"transformers.VisionEncoderDecoderConfig",title:"VisionEncoderDecoderConfig"},{local:"transformers.VisionEncoderDecoderModel",title:"VisionEncoderDecoderModel"},{local:"transformers.TFVisionEncoderDecoderModel",title:"TFVisionEncoderDecoderModel"},{local:"transformers.FlaxVisionEncoderDecoderModel",title:"FlaxVisionEncoderDecoderModel"}],title:"Vision Encoder Decoder Models"};function mc(Q,p,P){let{fw:m}=p;return Q.$$set=M=>{"fw"in M&&P(0,m=M.fw)},[m]}class Tc extends rc{constructor(p){super();ac(this,p,mc,pc,sc,{fw:0})}}export{Tc as default,hc as metadata};
290
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/xlsr_wav2vec2.mdx-7f80d6d2.js
import{S as je,i as He,s as Je,e as r,k as f,w as ze,t as i,M as De,c as o,d as a,m as u,a as s,x as Be,h as l,b as h,F as t,g as c,y as Me,L as Ge,q as Ne,o as Ie,B as Fe}from"../../chunks/vendor-4833417e.js";import{I as Ue}from"../../chunks/IconCopyLink-4b81c553.js";function Ke(ae){let d,W,p,v,X,L,te,$,re,B,m,w,O,E,oe,P,se,M,g,ne,S,ie,le,N,k,ce,I,T,q,he,F,C,pe,U,_,z,fe,ue,A,ve,V,de,me,j,b,we,x,ge,_e,H,y,be,R,ye,Le,J;return L=new Ue({}),E=new Ue({}),{c(){d=r("meta"),W=f(),p=r("h1"),v=r("a"),X=r("span"),ze(L.$$.fragment),te=f(),$=r("span"),re=i("XLSR-Wav2Vec2"),B=f(),m=r("h2"),w=r("a"),O=r("span"),ze(E.$$.fragment),oe=f(),P=r("span"),se=i("Overview"),M=f(),g=r("p"),ne=i("The XLSR-Wav2Vec2 model was proposed in "),S=r("a"),ie=i("Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),le=i(` by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.`),N=f(),k=r("p"),ce=i("The abstract from the paper is the following:"),I=f(),T=r("p"),q=r("em"),he=i(`This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over masked latent speech representations and jointly learns a quantization of the latents shared across languages. The resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong individual models. Analysis shows that the latent discrete speech representations are shared across languages with increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages.`),F=f(),C=r("p"),pe=i("Tips:"),U=f(),_=r("ul"),z=r("li"),fe=i("XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ue=f(),A=r("li"),ve=i(`XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),V=r("a"),de=i("Wav2Vec2CTCTokenizer"),me=i("."),j=f(),b=r("p"),we=i("XLSR-Wav2Vec2\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),x=r("a"),ge=i("Wav2Vec2\u2019s documentation page"),_e=i("."),H=f(),y=r("p"),be=i("The original code can be found "),R=r("a"),ye=i("here"),Le=i("."),this.h()},l(e){const n=De('[data-svelte="svelte-1phssyn"]',document.head);d=o(n,"META",{name:!0,content:!0}),n.forEach(a),W=u(e),p=o(e,"H1",{class:!0});var D=s(p);v=o(D,"A",{id:!0,class:!0,href:!0});var Ee=s(v);X=o(Ee,"SPAN",{});var Se=s(X);Be(L.$$.fragment,Se),Se.forEach(a),Ee.forEach(a),te=u(D),$=o(D,"SPAN",{});var Ae=s($);re=l(Ae,"XLSR-Wav2Vec2"),Ae.forEach(a),D.forEach(a),B=u(e),m=o(e,"H2",{class:!0});var G=s(m);w=o(G,"A",{id:!0,class:!0,href:!0});var Re=s(w);O=o(Re,"SPAN",{});var We=s(O);Be(E.$$.fragment,We),We.forEach(a),Re.forEach(a),oe=u(G),P=o(G,"SPAN",{});var ke=s(P);se=l(ke,"Overview"),ke.forEach(a),G.forEach(a),M=u(e),g=o(e,"P",{});var K=s(g);ne=l(K,"The XLSR-Wav2Vec2 model was proposed in "),S=o(K,"A",{href:!0,rel:!0});var Te=s(S);ie=l(Te,"Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),Te.forEach(a),le=l(K,` by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.`),K.forEach(a),N=u(e),k=o(e,"P",{});var Ce=s(k);ce=l(Ce,"The abstract from the paper is the following:"),Ce.forEach(a),I=u(e),T=o(e,"P",{});var Ve=s(T);q=o(Ve,"EM",{});var xe=s(q);he=l(xe,`This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over masked latent speech representations and jointly learns a quantization of the latents shared across languages. The resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong individual models. Analysis shows that the latent discrete speech representations are shared across languages with increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages.`),xe.forEach(a),Ve.forEach(a),F=u(e),C=o(e,"P",{});var Xe=s(C);pe=l(Xe,"Tips:"),Xe.forEach(a),U=u(e),_=o(e,"UL",{});var Q=s(_);z=o(Q,"LI",{});var $e=s(z);fe=l($e,"XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),$e.forEach(a),ue=u(Q),A=o(Q,"LI",{});var Y=s(A);ve=l(Y,`XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),V=o(Y,"A",{href:!0});var Oe=s(V);de=l(Oe,"Wav2Vec2CTCTokenizer"),Oe.forEach(a),me=l(Y,"."),Y.forEach(a),Q.forEach(a),j=u(e),b=o(e,"P",{});var Z=s(b);we=l(Z,"XLSR-Wav2Vec2\u2019s architecture is based on the Wav2Vec2 model, so one can refer to "),x=o(Z,"A",{href:!0});var Pe=s(x);ge=l(Pe,"Wav2Vec2\u2019s documentation page"),Pe.forEach(a),_e=l(Z,"."),Z.forEach(a),H=u(e),y=o(e,"P",{});var ee=s(y);be=l(ee,"The original code can be found "),R=o(ee,"A",{href:!0,rel:!0});var qe=s(R);ye=l(qe,"here"),qe.forEach(a),Le=l(ee,"."),ee.forEach(a),this.h()},h(){h(d,"name","hf:doc:metadata"),h(d,"content",JSON.stringify(Qe)),h(v,"id","xlsrwav2vec2"),h(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(v,"href","#xlsrwav2vec2"),h(p,"class","relative group"),h(w,"id","overview"),h(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(w,"href","#overview"),h(m,"class","relative group"),h(S,"href","https://arxiv.org/abs/2006.13979"),h(S,"rel","nofollow"),h(V,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),h(x,"href","wav2vec2"),h(R,"href","https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec"),h(R,"rel","nofollow")},m(e,n){t(document.head,d),c(e,W,n),c(e,p,n),t(p,v),t(v,X),Me(L,X,null),t(p,te),t(p,$),t($,re),c(e,B,n),c(e,m,n),t(m,w),t(w,O),Me(E,O,null),t(m,oe),t(m,P),t(P,se),c(e,M,n),c(e,g,n),t(g,ne),t(g,S),t(S,ie),t(g,le),c(e,N,n),c(e,k,n),t(k,ce),c(e,I,n),c(e,T,n),t(T,q),t(q,he),c(e,F,n),c(e,C,n),t(C,pe),c(e,U,n),c(e,_,n),t(_,z),t(z,fe),t(_,ue),t(_,A),t(A,ve),t(A,V),t(V,de),t(A,me),c(e,j,n),c(e,b,n),t(b,we),t(b,x),t(x,ge),t(b,_e),c(e,H,n),c(e,y,n),t(y,be),t(y,R),t(R,ye),t(y,Le),J=!0},p:Ge,i(e){J||(Ne(L.$$.fragment,e),Ne(E.$$.fragment,e),J=!0)},o(e){Ie(L.$$.fragment,e),Ie(E.$$.fragment,e),J=!1},d(e){a(d),e&&a(W),e&&a(p),Fe(L),e&&a(B),e&&a(m),Fe(E),e&&a(M),e&&a(g),e&&a(N),e&&a(k),e&&a(I),e&&a(T),e&&a(F),e&&a(C),e&&a(U),e&&a(_),e&&a(j),e&&a(b),e&&a(H),e&&a(y)}}}const Qe={local:"xlsrwav2vec2",sections:[{local:"overview",title:"Overview"}],title:"XLSR-Wav2Vec2"};function Ye(ae,d,W){let{fw:p}=d;return ae.$$set=v=>{"fw"in v&&W(0,p=v.fw)},[p]}class aa extends je{constructor(d){super();He(this,d,Ye,Ke,Je,{fw:0})}}export{aa as default,Qe as metadata};
291
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/roberta.mdx-002140a3.js
import{S as uE,i as fE,s as mE,e as n,k as l,w as T,t as a,M as gE,c as s,d as t,m as d,a as r,x as y,h as i,b as c,F as e,g as h,y as v,q as w,o as $,B as F}from"../../chunks/vendor-4833417e.js";import{T as fe}from"../../chunks/Tip-fffd6df1.js";import{D as j}from"../../chunks/Docstring-4f315ed9.js";import{C as we}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as ye}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function _E(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("When used with "),m=n("code"),b=a("is_split_into_words=True"),k=a(", this tokenizer will add a space before each word (even the first one).")},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"When used with "),m=s(f,"CODE",{});var M=r(m);b=i(M,"is_split_into_words=True"),M.forEach(t),k=i(f,", this tokenizer will add a space before each word (even the first one)."),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function bE(q){let p,R,m,b,k,_,f,M;return{c(){p=n("p"),R=a("When used with "),m=n("code"),b=a("is_split_into_words=True"),k=a(", this tokenizer needs to be instantiated with "),_=n("code"),f=a("add_prefix_space=True"),M=a(".")},l(Y){p=s(Y,"P",{});var I=r(p);R=i(I,"When used with "),m=s(I,"CODE",{});var z=r(m);b=i(z,"is_split_into_words=True"),z.forEach(t),k=i(I,", this tokenizer needs to be instantiated with "),_=s(I,"CODE",{});var X=r(_);f=i(X,"add_prefix_space=True"),X.forEach(t),M=i(I,"."),I.forEach(t)},m(Y,I){h(Y,p,I),e(p,R),e(p,m),e(m,b),e(p,k),e(p,_),e(_,f),e(p,M)},d(Y){Y&&t(p)}}}function kE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function TE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function yE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function vE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function wE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function $E(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function FE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function RE(q){let p,R,m,b,k,_,f,M,Y,I,z,X,D,te,me,S,ge,pe,K,A,oe,Z,x,P,se,U,he,re,H,ue,ae,C,_e,W,ie,be,B,G,ne,O,le,Q,ke;return{c(){p=n("p"),R=a("TF 2.0 models accepts two formats as inputs:"),m=l(),b=n("ul"),k=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),M=n("li"),Y=a("having all inputs as a list, tuple or dict in the first positional arguments."),I=l(),z=n("p"),X=a("This second option is useful when using "),D=n("code"),te=a("tf.keras.Model.fit"),me=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),ge=a("model(inputs)"),pe=a("."),K=l(),A=n("p"),oe=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),x=n("ul"),P=n("li"),se=a("a single Tensor with "),U=n("code"),he=a("input_ids"),re=a(" only and nothing else: "),H=n("code"),ue=a("model(inputs_ids)"),ae=l(),C=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),ie=a("model([input_ids, attention_mask])"),be=a(" or "),B=n("code"),G=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),O=n("li"),le=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),ke=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var E=r(p);R=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),m=d(u),b=s(u,"UL",{});var ee=r(b);k=s(ee,"LI",{});var Me=r(k);_=i(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),f=d(ee),M=s(ee,"LI",{});var ce=r(M);Y=i(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),ee.forEach(t),I=d(u),z=s(u,"P",{});var N=r(z);X=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var $e=r(D);te=i($e,"tf.keras.Model.fit"),$e.forEach(t),me=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),S=s(N,"CODE",{});var Ee=r(S);ge=i(Ee,"model(inputs)"),Ee.forEach(t),pe=i(N,"."),N.forEach(t),K=d(u),A=s(u,"P",{});var ze=r(A);oe=i(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Z=d(u),x=s(u,"UL",{});var L=r(x);P=s(L,"LI",{});var V=r(P);se=i(V,"a single Tensor with "),U=s(V,"CODE",{});var xe=r(U);he=i(xe,"input_ids"),xe.forEach(t),re=i(V," only and nothing else: "),H=s(V,"CODE",{});var Fe=r(H);ue=i(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),ae=d(L),C=s(L,"LI",{});var J=r(C);_e=i(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(J,"CODE",{});var qe=r(W);ie=i(qe,"model([input_ids, attention_mask])"),qe.forEach(t),be=i(J," or "),B=s(J,"CODE",{});var Ce=r(B);G=i(Ce,"model([input_ids, attention_mask, token_type_ids])"),Ce.forEach(t),J.forEach(t),ne=d(L),O=s(L,"LI",{});var de=r(O);le=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(de,"CODE",{});var ve=r(Q);ke=i(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),de.forEach(t),L.forEach(t)},m(u,E){h(u,p,E),e(p,R),h(u,m,E),h(u,b,E),e(b,k),e(k,_),e(b,f),e(b,M),e(M,Y),h(u,I,E),h(u,z,E),e(z,X),e(z,D),e(D,te),e(z,me),e(z,S),e(S,ge),e(z,pe),h(u,K,E),h(u,A,E),e(A,oe),h(u,Z,E),h(u,x,E),e(x,P),e(P,se),e(P,U),e(U,he),e(P,re),e(P,H),e(H,ue),e(x,ae),e(x,C),e(C,_e),e(C,W),e(W,ie),e(C,be),e(C,B),e(B,G),e(x,ne),e(x,O),e(O,le),e(O,Q),e(Q,ke)},d(u){u&&t(p),u&&t(m),u&&t(b),u&&t(I),u&&t(z),u&&t(K),u&&t(A),u&&t(Z),u&&t(x)}}}function ME(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function EE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function zE(q){let p,R,m,b,k,_,f,M,Y,I,z,X,D,te,me,S,ge,pe,K,A,oe,Z,x,P,se,U,he,re,H,ue,ae,C,_e,W,ie,be,B,G,ne,O,le,Q,ke;return{c(){p=n("p"),R=a("TF 2.0 models accepts two formats as inputs:"),m=l(),b=n("ul"),k=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),M=n("li"),Y=a("having all inputs as a list, tuple or dict in the first positional arguments."),I=l(),z=n("p"),X=a("This second option is useful when using "),D=n("code"),te=a("tf.keras.Model.fit"),me=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),ge=a("model(inputs)"),pe=a("."),K=l(),A=n("p"),oe=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),x=n("ul"),P=n("li"),se=a("a single Tensor with "),U=n("code"),he=a("input_ids"),re=a(" only and nothing else: "),H=n("code"),ue=a("model(inputs_ids)"),ae=l(),C=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),ie=a("model([input_ids, attention_mask])"),be=a(" or "),B=n("code"),G=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),O=n("li"),le=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),ke=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var E=r(p);R=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),m=d(u),b=s(u,"UL",{});var ee=r(b);k=s(ee,"LI",{});var Me=r(k);_=i(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),f=d(ee),M=s(ee,"LI",{});var ce=r(M);Y=i(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),ee.forEach(t),I=d(u),z=s(u,"P",{});var N=r(z);X=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var $e=r(D);te=i($e,"tf.keras.Model.fit"),$e.forEach(t),me=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),S=s(N,"CODE",{});var Ee=r(S);ge=i(Ee,"model(inputs)"),Ee.forEach(t),pe=i(N,"."),N.forEach(t),K=d(u),A=s(u,"P",{});var ze=r(A);oe=i(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Z=d(u),x=s(u,"UL",{});var L=r(x);P=s(L,"LI",{});var V=r(P);se=i(V,"a single Tensor with "),U=s(V,"CODE",{});var xe=r(U);he=i(xe,"input_ids"),xe.forEach(t),re=i(V," only and nothing else: "),H=s(V,"CODE",{});var Fe=r(H);ue=i(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),ae=d(L),C=s(L,"LI",{});var J=r(C);_e=i(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(J,"CODE",{});var qe=r(W);ie=i(qe,"model([input_ids, attention_mask])"),qe.forEach(t),be=i(J," or "),B=s(J,"CODE",{});var Ce=r(B);G=i(Ce,"model([input_ids, attention_mask, token_type_ids])"),Ce.forEach(t),J.forEach(t),ne=d(L),O=s(L,"LI",{});var de=r(O);le=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(de,"CODE",{});var ve=r(Q);ke=i(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),de.forEach(t),L.forEach(t)},m(u,E){h(u,p,E),e(p,R),h(u,m,E),h(u,b,E),e(b,k),e(k,_),e(b,f),e(b,M),e(M,Y),h(u,I,E),h(u,z,E),e(z,X),e(z,D),e(D,te),e(z,me),e(z,S),e(S,ge),e(z,pe),h(u,K,E),h(u,A,E),e(A,oe),h(u,Z,E),h(u,x,E),e(x,P),e(P,se),e(P,U),e(U,he),e(P,re),e(P,H),e(H,ue),e(x,ae),e(x,C),e(C,_e),e(C,W),e(W,ie),e(C,be),e(C,B),e(B,G),e(x,ne),e(x,O),e(O,le),e(O,Q),e(Q,ke)},d(u){u&&t(p),u&&t(m),u&&t(b),u&&t(I),u&&t(z),u&&t(K),u&&t(A),u&&t(Z),u&&t(x)}}}function xE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function qE(q){let p,R,m,b,k,_,f,M,Y,I,z,X,D,te,me,S,ge,pe,K,A,oe,Z,x,P,se,U,he,re,H,ue,ae,C,_e,W,ie,be,B,G,ne,O,le,Q,ke;return{c(){p=n("p"),R=a("TF 2.0 models accepts two formats as inputs:"),m=l(),b=n("ul"),k=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),M=n("li"),Y=a("having all inputs as a list, tuple or dict in the first positional arguments."),I=l(),z=n("p"),X=a("This second option is useful when using "),D=n("code"),te=a("tf.keras.Model.fit"),me=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),ge=a("model(inputs)"),pe=a("."),K=l(),A=n("p"),oe=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),x=n("ul"),P=n("li"),se=a("a single Tensor with "),U=n("code"),he=a("input_ids"),re=a(" only and nothing else: "),H=n("code"),ue=a("model(inputs_ids)"),ae=l(),C=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),ie=a("model([input_ids, attention_mask])"),be=a(" or "),B=n("code"),G=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),O=n("li"),le=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),ke=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var E=r(p);R=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),m=d(u),b=s(u,"UL",{});var ee=r(b);k=s(ee,"LI",{});var Me=r(k);_=i(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),f=d(ee),M=s(ee,"LI",{});var ce=r(M);Y=i(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),ee.forEach(t),I=d(u),z=s(u,"P",{});var N=r(z);X=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var $e=r(D);te=i($e,"tf.keras.Model.fit"),$e.forEach(t),me=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),S=s(N,"CODE",{});var Ee=r(S);ge=i(Ee,"model(inputs)"),Ee.forEach(t),pe=i(N,"."),N.forEach(t),K=d(u),A=s(u,"P",{});var ze=r(A);oe=i(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Z=d(u),x=s(u,"UL",{});var L=r(x);P=s(L,"LI",{});var V=r(P);se=i(V,"a single Tensor with "),U=s(V,"CODE",{});var xe=r(U);he=i(xe,"input_ids"),xe.forEach(t),re=i(V," only and nothing else: "),H=s(V,"CODE",{});var Fe=r(H);ue=i(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),ae=d(L),C=s(L,"LI",{});var J=r(C);_e=i(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(J,"CODE",{});var qe=r(W);ie=i(qe,"model([input_ids, attention_mask])"),qe.forEach(t),be=i(J," or "),B=s(J,"CODE",{});var Ce=r(B);G=i(Ce,"model([input_ids, attention_mask, token_type_ids])"),Ce.forEach(t),J.forEach(t),ne=d(L),O=s(L,"LI",{});var de=r(O);le=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(de,"CODE",{});var ve=r(Q);ke=i(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),de.forEach(t),L.forEach(t)},m(u,E){h(u,p,E),e(p,R),h(u,m,E),h(u,b,E),e(b,k),e(k,_),e(b,f),e(b,M),e(M,Y),h(u,I,E),h(u,z,E),e(z,X),e(z,D),e(D,te),e(z,me),e(z,S),e(S,ge),e(z,pe),h(u,K,E),h(u,A,E),e(A,oe),h(u,Z,E),h(u,x,E),e(x,P),e(P,se),e(P,U),e(U,he),e(P,re),e(P,H),e(H,ue),e(x,ae),e(x,C),e(C,_e),e(C,W),e(W,ie),e(C,be),e(C,B),e(B,G),e(x,ne),e(x,O),e(O,le),e(O,Q),e(Q,ke)},d(u){u&&t(p),u&&t(m),u&&t(b),u&&t(I),u&&t(z),u&&t(K),u&&t(A),u&&t(Z),u&&t(x)}}}function CE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function PE(q){let p,R,m,b,k,_,f,M,Y,I,z,X,D,te,me,S,ge,pe,K,A,oe,Z,x,P,se,U,he,re,H,ue,ae,C,_e,W,ie,be,B,G,ne,O,le,Q,ke;return{c(){p=n("p"),R=a("TF 2.0 models accepts two formats as inputs:"),m=l(),b=n("ul"),k=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),M=n("li"),Y=a("having all inputs as a list, tuple or dict in the first positional arguments."),I=l(),z=n("p"),X=a("This second option is useful when using "),D=n("code"),te=a("tf.keras.Model.fit"),me=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),ge=a("model(inputs)"),pe=a("."),K=l(),A=n("p"),oe=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),x=n("ul"),P=n("li"),se=a("a single Tensor with "),U=n("code"),he=a("input_ids"),re=a(" only and nothing else: "),H=n("code"),ue=a("model(inputs_ids)"),ae=l(),C=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),ie=a("model([input_ids, attention_mask])"),be=a(" or "),B=n("code"),G=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),O=n("li"),le=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),ke=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var E=r(p);R=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),m=d(u),b=s(u,"UL",{});var ee=r(b);k=s(ee,"LI",{});var Me=r(k);_=i(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),f=d(ee),M=s(ee,"LI",{});var ce=r(M);Y=i(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),ee.forEach(t),I=d(u),z=s(u,"P",{});var N=r(z);X=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var $e=r(D);te=i($e,"tf.keras.Model.fit"),$e.forEach(t),me=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),S=s(N,"CODE",{});var Ee=r(S);ge=i(Ee,"model(inputs)"),Ee.forEach(t),pe=i(N,"."),N.forEach(t),K=d(u),A=s(u,"P",{});var ze=r(A);oe=i(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Z=d(u),x=s(u,"UL",{});var L=r(x);P=s(L,"LI",{});var V=r(P);se=i(V,"a single Tensor with "),U=s(V,"CODE",{});var xe=r(U);he=i(xe,"input_ids"),xe.forEach(t),re=i(V," only and nothing else: "),H=s(V,"CODE",{});var Fe=r(H);ue=i(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),ae=d(L),C=s(L,"LI",{});var J=r(C);_e=i(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(J,"CODE",{});var qe=r(W);ie=i(qe,"model([input_ids, attention_mask])"),qe.forEach(t),be=i(J," or "),B=s(J,"CODE",{});var Ce=r(B);G=i(Ce,"model([input_ids, attention_mask, token_type_ids])"),Ce.forEach(t),J.forEach(t),ne=d(L),O=s(L,"LI",{});var de=r(O);le=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(de,"CODE",{});var ve=r(Q);ke=i(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),de.forEach(t),L.forEach(t)},m(u,E){h(u,p,E),e(p,R),h(u,m,E),h(u,b,E),e(b,k),e(k,_),e(b,f),e(b,M),e(M,Y),h(u,I,E),h(u,z,E),e(z,X),e(z,D),e(D,te),e(z,me),e(z,S),e(S,ge),e(z,pe),h(u,K,E),h(u,A,E),e(A,oe),h(u,Z,E),h(u,x,E),e(x,P),e(P,se),e(P,U),e(U,he),e(P,re),e(P,H),e(H,ue),e(x,ae),e(x,C),e(C,_e),e(C,W),e(W,ie),e(C,be),e(C,B),e(B,G),e(x,ne),e(x,O),e(O,le),e(O,Q),e(Q,ke)},d(u){u&&t(p),u&&t(m),u&&t(b),u&&t(I),u&&t(z),u&&t(K),u&&t(A),u&&t(Z),u&&t(x)}}}function jE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function LE(q){let p,R,m,b,k,_,f,M,Y,I,z,X,D,te,me,S,ge,pe,K,A,oe,Z,x,P,se,U,he,re,H,ue,ae,C,_e,W,ie,be,B,G,ne,O,le,Q,ke;return{c(){p=n("p"),R=a("TF 2.0 models accepts two formats as inputs:"),m=l(),b=n("ul"),k=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),M=n("li"),Y=a("having all inputs as a list, tuple or dict in the first positional arguments."),I=l(),z=n("p"),X=a("This second option is useful when using "),D=n("code"),te=a("tf.keras.Model.fit"),me=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),ge=a("model(inputs)"),pe=a("."),K=l(),A=n("p"),oe=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),x=n("ul"),P=n("li"),se=a("a single Tensor with "),U=n("code"),he=a("input_ids"),re=a(" only and nothing else: "),H=n("code"),ue=a("model(inputs_ids)"),ae=l(),C=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),ie=a("model([input_ids, attention_mask])"),be=a(" or "),B=n("code"),G=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),O=n("li"),le=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),ke=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var E=r(p);R=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),m=d(u),b=s(u,"UL",{});var ee=r(b);k=s(ee,"LI",{});var Me=r(k);_=i(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),f=d(ee),M=s(ee,"LI",{});var ce=r(M);Y=i(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),ee.forEach(t),I=d(u),z=s(u,"P",{});var N=r(z);X=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var $e=r(D);te=i($e,"tf.keras.Model.fit"),$e.forEach(t),me=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),S=s(N,"CODE",{});var Ee=r(S);ge=i(Ee,"model(inputs)"),Ee.forEach(t),pe=i(N,"."),N.forEach(t),K=d(u),A=s(u,"P",{});var ze=r(A);oe=i(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Z=d(u),x=s(u,"UL",{});var L=r(x);P=s(L,"LI",{});var V=r(P);se=i(V,"a single Tensor with "),U=s(V,"CODE",{});var xe=r(U);he=i(xe,"input_ids"),xe.forEach(t),re=i(V," only and nothing else: "),H=s(V,"CODE",{});var Fe=r(H);ue=i(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),ae=d(L),C=s(L,"LI",{});var J=r(C);_e=i(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(J,"CODE",{});var qe=r(W);ie=i(qe,"model([input_ids, attention_mask])"),qe.forEach(t),be=i(J," or "),B=s(J,"CODE",{});var Ce=r(B);G=i(Ce,"model([input_ids, attention_mask, token_type_ids])"),Ce.forEach(t),J.forEach(t),ne=d(L),O=s(L,"LI",{});var de=r(O);le=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(de,"CODE",{});var ve=r(Q);ke=i(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),de.forEach(t),L.forEach(t)},m(u,E){h(u,p,E),e(p,R),h(u,m,E),h(u,b,E),e(b,k),e(k,_),e(b,f),e(b,M),e(M,Y),h(u,I,E),h(u,z,E),e(z,X),e(z,D),e(D,te),e(z,me),e(z,S),e(S,ge),e(z,pe),h(u,K,E),h(u,A,E),e(A,oe),h(u,Z,E),h(u,x,E),e(x,P),e(P,se),e(P,U),e(U,he),e(P,re),e(P,H),e(H,ue),e(x,ae),e(x,C),e(C,_e),e(C,W),e(W,ie),e(C,be),e(C,B),e(B,G),e(x,ne),e(x,O),e(O,le),e(O,Q),e(Q,ke)},d(u){u&&t(p),u&&t(m),u&&t(b),u&&t(I),u&&t(z),u&&t(K),u&&t(A),u&&t(Z),u&&t(x)}}}function AE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function OE(q){let p,R,m,b,k,_,f,M,Y,I,z,X,D,te,me,S,ge,pe,K,A,oe,Z,x,P,se,U,he,re,H,ue,ae,C,_e,W,ie,be,B,G,ne,O,le,Q,ke;return{c(){p=n("p"),R=a("TF 2.0 models accepts two formats as inputs:"),m=l(),b=n("ul"),k=n("li"),_=a("having all inputs as keyword arguments (like PyTorch models), or"),f=l(),M=n("li"),Y=a("having all inputs as a list, tuple or dict in the first positional arguments."),I=l(),z=n("p"),X=a("This second option is useful when using "),D=n("code"),te=a("tf.keras.Model.fit"),me=a(` method which currently requires having all the tensors in the first argument of the model call function: `),S=n("code"),ge=a("model(inputs)"),pe=a("."),K=l(),A=n("p"),oe=a(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Z=l(),x=n("ul"),P=n("li"),se=a("a single Tensor with "),U=n("code"),he=a("input_ids"),re=a(" only and nothing else: "),H=n("code"),ue=a("model(inputs_ids)"),ae=l(),C=n("li"),_e=a(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=n("code"),ie=a("model([input_ids, attention_mask])"),be=a(" or "),B=n("code"),G=a("model([input_ids, attention_mask, token_type_ids])"),ne=l(),O=n("li"),le=a(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=n("code"),ke=a('model({"input_ids": input_ids, "token_type_ids": token_type_ids})')},l(u){p=s(u,"P",{});var E=r(p);R=i(E,"TF 2.0 models accepts two formats as inputs:"),E.forEach(t),m=d(u),b=s(u,"UL",{});var ee=r(b);k=s(ee,"LI",{});var Me=r(k);_=i(Me,"having all inputs as keyword arguments (like PyTorch models), or"),Me.forEach(t),f=d(ee),M=s(ee,"LI",{});var ce=r(M);Y=i(ce,"having all inputs as a list, tuple or dict in the first positional arguments."),ce.forEach(t),ee.forEach(t),I=d(u),z=s(u,"P",{});var N=r(z);X=i(N,"This second option is useful when using "),D=s(N,"CODE",{});var $e=r(D);te=i($e,"tf.keras.Model.fit"),$e.forEach(t),me=i(N,` method which currently requires having all the tensors in the first argument of the model call function: `),S=s(N,"CODE",{});var Ee=r(S);ge=i(Ee,"model(inputs)"),Ee.forEach(t),pe=i(N,"."),N.forEach(t),K=d(u),A=s(u,"P",{});var ze=r(A);oe=i(ze,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),ze.forEach(t),Z=d(u),x=s(u,"UL",{});var L=r(x);P=s(L,"LI",{});var V=r(P);se=i(V,"a single Tensor with "),U=s(V,"CODE",{});var xe=r(U);he=i(xe,"input_ids"),xe.forEach(t),re=i(V," only and nothing else: "),H=s(V,"CODE",{});var Fe=r(H);ue=i(Fe,"model(inputs_ids)"),Fe.forEach(t),V.forEach(t),ae=d(L),C=s(L,"LI",{});var J=r(C);_e=i(J,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),W=s(J,"CODE",{});var qe=r(W);ie=i(qe,"model([input_ids, attention_mask])"),qe.forEach(t),be=i(J," or "),B=s(J,"CODE",{});var Ce=r(B);G=i(Ce,"model([input_ids, attention_mask, token_type_ids])"),Ce.forEach(t),J.forEach(t),ne=d(L),O=s(L,"LI",{});var de=r(O);le=i(de,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),Q=s(de,"CODE",{});var ve=r(Q);ke=i(ve,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ve.forEach(t),de.forEach(t),L.forEach(t)},m(u,E){h(u,p,E),e(p,R),h(u,m,E),h(u,b,E),e(b,k),e(k,_),e(b,f),e(b,M),e(M,Y),h(u,I,E),h(u,z,E),e(z,X),e(z,D),e(D,te),e(z,me),e(z,S),e(S,ge),e(z,pe),h(u,K,E),h(u,A,E),e(A,oe),h(u,Z,E),h(u,x,E),e(x,P),e(P,se),e(P,U),e(U,he),e(P,re),e(P,H),e(H,ue),e(x,ae),e(x,C),e(C,_e),e(C,W),e(W,ie),e(C,be),e(C,B),e(B,G),e(x,ne),e(x,O),e(O,le),e(O,Q),e(Q,ke)},d(u){u&&t(p),u&&t(m),u&&t(b),u&&t(I),u&&t(z),u&&t(K),u&&t(A),u&&t(Z),u&&t(x)}}}function NE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function IE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function DE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function SE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function WE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function BE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function UE(q){let p,R,m,b,k;return{c(){p=n("p"),R=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=n("code"),b=a("Module"),k=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(_){p=s(_,"P",{});var f=r(p);R=i(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s(f,"CODE",{});var M=r(m);b=i(M,"Module"),M.forEach(t),k=i(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(_,f){h(_,p,f),e(p,R),e(p,m),e(m,b),e(p,k)},d(_){_&&t(p)}}}function HE(q){let p,R,m,b,k,_,f,M,Y,I,z,X,D,te,me,S,ge,pe,K,A,oe,Z,x,P,se,U,he,re,H,ue,ae,C,_e,W,ie,be,B,G,ne,O,le,Q,ke,u,E,ee,Me,ce,N,$e,Ee,ze,L,V,xe,Fe,J,qe,Ce,de,ve,dm,cm,Qh,Qt,pm,$s,hm,um,Fs,fm,mm,Vh,To,bn,vd,Rs,gm,wd,_m,Jh,We,Ms,bm,yo,km,nl,Tm,ym,sl,vm,wm,$m,vo,Fm,rl,Rm,Mm,al,Em,zm,xm,wo,qm,il,Cm,Pm,ll,jm,Lm,Am,$d,Om,Nm,Es,Kh,$o,kn,Fd,zs,Im,Rd,Dm,Gh,Te,xs,Sm,Md,Wm,Bm,Ed,Um,Hm,qs,Qm,Cs,Vm,zd,Jm,Km,Gm,Tn,Xm,Ps,Ym,dl,Zm,eg,tg,Vt,js,og,xd,ng,sg,Ls,cl,rg,qd,ag,ig,pl,lg,Cd,dg,cg,yn,As,pg,Os,hg,Pd,ug,fg,mg,vn,Ns,gg,jd,_g,bg,hl,Is,Xh,Fo,wn,Ld,Ds,kg,Ad,Tg,Yh,Pe,Ss,yg,Ws,vg,Od,wg,$g,Fg,Nd,Rg,Mg,Bs,Eg,Us,zg,Id,xg,qg,Cg,$n,Pg,Hs,jg,ul,Lg,Ag,Og,fl,Qs,Zh,Ro,Fn,Dd,Vs,Ng,Sd,Ig,eu,je,Js,Dg,Wd,Sg,Wg,Ks,Bg,ml,Ug,Hg,Qg,Gs,Vg,Xs,Jg,Kg,Gg,Ys,Xg,Bd,Yg,Zg,e_,Be,t_,Ud,o_,n_,Hd,s_,r_,Qd,a_,i_,Vd,l_,d_,Jd,c_,p_,Kd,h_,u_,f_,Rn,m_,Gd,g_,__,Zs,b_,k_,st,er,T_,Mo,y_,gl,v_,w_,Xd,$_,F_,R_,Mn,M_,Yd,E_,z_,tr,tu,Eo,En,Zd,or,x_,ec,q_,ou,Ye,nr,C_,sr,P_,tc,j_,L_,A_,rr,O_,_l,N_,I_,D_,ar,S_,ir,W_,B_,U_,rt,lr,H_,zo,Q_,bl,V_,J_,oc,K_,G_,X_,zn,Y_,nc,Z_,eb,dr,nu,xo,xn,sc,cr,tb,rc,ob,su,Ze,pr,nb,hr,sb,ac,rb,ab,ib,ur,lb,kl,db,cb,pb,fr,hb,mr,ub,fb,mb,at,gr,gb,qo,_b,Tl,bb,kb,ic,Tb,yb,vb,qn,wb,lc,$b,Fb,_r,ru,Co,Cn,dc,br,Rb,cc,Mb,au,et,kr,Eb,pc,zb,xb,Tr,qb,yl,Cb,Pb,jb,yr,Lb,vr,Ab,Ob,Nb,Se,wr,Ib,Po,Db,vl,Sb,Wb,hc,Bb,Ub,Hb,Pn,Qb,uc,Vb,Jb,$r,Kb,fc,Gb,Xb,Fr,iu,jo,jn,mc,Rr,Yb,gc,Zb,lu,tt,Mr,ek,_c,tk,ok,Er,nk,wl,sk,rk,ak,zr,ik,xr,lk,dk,ck,it,qr,pk,Lo,hk,$l,uk,fk,bc,mk,gk,_k,Ln,bk,kc,kk,Tk,Cr,du,Ao,An,Tc,Pr,yk,yc,vk,cu,ot,jr,wk,vc,$k,Fk,Lr,Rk,Fl,Mk,Ek,zk,Ar,xk,Or,qk,Ck,Pk,lt,Nr,jk,Oo,Lk,Rl,Ak,Ok,wc,Nk,Ik,Dk,On,Sk,$c,Wk,Bk,Ir,pu,No,Nn,Fc,Dr,Uk,Rc,Hk,hu,nt,Sr,Qk,Io,Vk,Mc,Jk,Kk,Ec,Gk,Xk,Yk,Wr,Zk,Ml,e1,t1,o1,Br,n1,Ur,s1,r1,a1,dt,Hr,i1,Do,l1,El,d1,c1,zc,p1,h1,u1,In,f1,xc,m1,g1,Qr,uu,So,Dn,qc,Vr,_1,Cc,b1,fu,Ue,Jr,k1,Pc,T1,y1,Kr,v1,zl,w1,$1,F1,Gr,R1,Xr,M1,E1,z1,Sn,x1,ct,Yr,q1,Wo,C1,xl,P1,j1,jc,L1,A1,O1,Wn,N1,Lc,I1,D1,Zr,mu,Bo,Bn,Ac,ea,S1,Oc,W1,gu,Uo,ta,B1,pt,oa,U1,Ho,H1,ql,Q1,V1,Nc,J1,K1,G1,Un,X1,Ic,Y1,Z1,na,_u,Qo,Hn,Dc,sa,eT,Sc,tT,bu,He,ra,oT,aa,nT,Wc,sT,rT,aT,ia,iT,Cl,lT,dT,cT,la,pT,da,hT,uT,fT,Qn,mT,ht,ca,gT,Vo,_T,Pl,bT,kT,Bc,TT,yT,vT,Vn,wT,Uc,$T,FT,pa,ku,Jo,Jn,Hc,ha,RT,Qc,MT,Tu,Qe,ua,ET,Vc,zT,xT,fa,qT,jl,CT,PT,jT,ma,LT,ga,AT,OT,NT,Kn,IT,ut,_a,DT,Ko,ST,Ll,WT,BT,Jc,UT,HT,QT,Gn,VT,Kc,JT,KT,ba,yu,Go,Xn,Gc,ka,GT,Xc,XT,vu,Ve,Ta,YT,Yc,ZT,ey,ya,ty,Al,oy,ny,sy,va,ry,wa,ay,iy,ly,Yn,dy,ft,$a,cy,Xo,py,Ol,hy,uy,Zc,fy,my,gy,Zn,_y,ep,by,ky,Fa,wu,Yo,es,tp,Ra,Ty,op,yy,$u,Je,Ma,vy,np,wy,$y,Ea,Fy,Nl,Ry,My,Ey,za,zy,xa,xy,qy,Cy,ts,Py,mt,qa,jy,Zo,Ly,Il,Ay,Oy,sp,Ny,Iy,Dy,os,Sy,rp,Wy,By,Ca,Fu,en,ns,ap,Pa,Uy,ip,Hy,Ru,Ke,ja,Qy,tn,Vy,lp,Jy,Ky,dp,Gy,Xy,Yy,La,Zy,Dl,ev,tv,ov,Aa,nv,Oa,sv,rv,av,ss,iv,gt,Na,lv,on,dv,Sl,cv,pv,cp,hv,uv,fv,rs,mv,pp,gv,_v,Ia,Mu,nn,as,hp,Da,bv,up,kv,Eu,Le,Sa,Tv,fp,yv,vv,Wa,wv,Wl,$v,Fv,Rv,Ba,Mv,Ua,Ev,zv,xv,mp,qv,Cv,Nt,gp,Ha,Pv,jv,_p,Qa,Lv,Av,bp,Va,Ov,Nv,kp,Ja,Iv,Dv,_t,Ka,Sv,sn,Wv,Tp,Bv,Uv,yp,Hv,Qv,Vv,is,Jv,vp,Kv,Gv,Ga,zu,rn,ls,wp,Xa,Xv,$p,Yv,xu,Ae,Ya,Zv,Za,ew,Fp,tw,ow,nw,ei,sw,Bl,rw,aw,iw,ti,lw,oi,dw,cw,pw,Rp,hw,uw,It,Mp,ni,fw,mw,Ep,si,gw,_w,zp,ri,bw,kw,xp,ai,Tw,yw,bt,ii,vw,an,ww,qp,$w,Fw,Cp,Rw,Mw,Ew,ds,zw,Pp,xw,qw,li,qu,ln,cs,jp,di,Cw,Lp,Pw,Cu,Oe,ci,jw,Ap,Lw,Aw,pi,Ow,Ul,Nw,Iw,Dw,hi,Sw,ui,Ww,Bw,Uw,Op,Hw,Qw,Dt,Np,fi,Vw,Jw,Ip,mi,Kw,Gw,Dp,gi,Xw,Yw,Sp,_i,Zw,e$,kt,bi,t$,dn,o$,Wp,n$,s$,Bp,r$,a$,i$,ps,l$,Up,d$,c$,ki,Pu,cn,hs,Hp,Ti,p$,Qp,h$,ju,Ne,yi,u$,Vp,f$,m$,vi,g$,Hl,_$,b$,k$,wi,T$,$i,y$,v$,w$,Jp,$$,F$,St,Kp,Fi,R$,M$,Gp,Ri,E$,z$,Xp,Mi,x$,q$,Yp,Ei,C$,P$,Tt,zi,j$,pn,L$,Zp,A$,O$,eh,N$,I$,D$,us,S$,th,W$,B$,xi,Lu,hn,fs,oh,qi,U$,nh,H$,Au,Ie,Ci,Q$,sh,V$,J$,Pi,K$,Ql,G$,X$,Y$,ji,Z$,Li,eF,tF,oF,rh,nF,sF,Wt,ah,Ai,rF,aF,ih,Oi,iF,lF,lh,Ni,dF,cF,dh,Ii,pF,hF,yt,Di,uF,un,fF,ch,mF,gF,ph,_F,bF,kF,ms,TF,hh,yF,vF,Si,Ou,fn,gs,uh,Wi,wF,fh,$F,Nu,De,Bi,FF,mn,RF,mh,MF,EF,gh,zF,xF,qF,Ui,CF,Vl,PF,jF,LF,Hi,AF,Qi,OF,NF,IF,_h,DF,SF,Bt,bh,Vi,WF,BF,kh,Ji,UF,HF,Th,Ki,QF,VF,yh,Gi,JF,KF,vt,Xi,GF,gn,XF,vh,YF,ZF,wh,e2,t2,o2,_s,n2,$h,s2,r2,Yi,Iu;return _=new ye({}),te=new ye({}),Rs=new ye({}),Ms=new j({props:{name:"class transformers.RobertaConfig",anchor:"transformers.RobertaConfig",parameters:[{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/configuration_roberta.py#L37"}}),Es=new we({props:{code:`from transformers import RobertaConfig, RobertaModel # Initializing a RoBERTa configuration configuration = RobertaConfig() # Initializing a model from the configuration model = RobertaModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaConfig, RobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a RoBERTa configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = RobertaConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),zs=new ye({}),xs=new j({props:{name:"class transformers.RobertaTokenizer",anchor:"transformers.RobertaTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L103",parametersDescription:[{anchor:"transformers.RobertaTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.RobertaTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.RobertaTokenizer.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.RobertaTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.RobertaTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.RobertaTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.RobertaTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.RobertaTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.RobertaTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.RobertaTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.RobertaTokenizer.add_prefix_space",description:`<strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (RoBERTa tokenizer detect beginning of words by the preceding space).`,name:"add_prefix_space"}]}}),qs=new we({props:{code:`from transformers import RobertaTokenizer tokenizer = RobertaTokenizer.from_pretrained("roberta-base") tokenizer("Hello world")['input_ids'] tokenizer(" Hello world")['input_ids']`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 31414, 232, 328, 2] <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot; Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 20920, 232, 2]`}}),Tn=new fe({props:{$$slots:{default:[_E]},$$scope:{ctx:q}}}),js=new j({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RobertaTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L340",parametersDescription:[{anchor:"transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.RobertaTokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),As=new j({props:{name:"get_special_tokens_mask",anchor:"transformers.RobertaTokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L365",parametersDescription:[{anchor:"transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RobertaTokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.RobertaTokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Ns=new j({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.RobertaTokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L392",parametersDescription:[{anchor:"transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.RobertaTokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Is=new j({props:{name:"save_vocabulary",anchor:"transformers.RobertaTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta.py#L311"}}),Ds=new ye({}),Ss=new j({props:{name:"class transformers.RobertaTokenizerFast",anchor:"transformers.RobertaTokenizerFast",parameters:[{name:"vocab_file",val:" = None"},{name:"merges_file",val:" = None"},{name:"tokenizer_file",val:" = None"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"trim_offsets",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta_fast.py#L68",parametersDescription:[{anchor:"transformers.RobertaTokenizerFast.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.RobertaTokenizerFast.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.RobertaTokenizerFast.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.RobertaTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.RobertaTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.RobertaTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.RobertaTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.RobertaTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.RobertaTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.RobertaTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.RobertaTokenizerFast.add_prefix_space",description:`<strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (RoBERTa tokenizer detect beginning of words by the preceding space).`,name:"add_prefix_space"},{anchor:"transformers.RobertaTokenizerFast.trim_offsets",description:`<strong>trim_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the post processing step should trim offsets to avoid including whitespaces.`,name:"trim_offsets"}]}}),Bs=new we({props:{code:`from transformers import RobertaTokenizerFast tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") tokenizer("Hello world")['input_ids'] tokenizer(" Hello world")['input_ids']`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python"><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizerFast</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer = RobertaTokenizerFast.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>)</span> <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot;Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 31414, 232, 328, 2] <span class="hljs-meta">&gt;&gt;&gt;</span> <span class="language-python">tokenizer(<span class="hljs-string">&quot; Hello world&quot;</span>)[<span class="hljs-string">&#x27;input_ids&#x27;</span>]</span> [0, 20920, 232, 2]`}}),$n=new fe({props:{$$slots:{default:[bE]},$$scope:{ctx:q}}}),Qs=new j({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.RobertaTokenizerFast.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/tokenization_roberta_fast.py#L270"}}),Vs=new ye({}),Js=new j({props:{name:"class transformers.RobertaModel",anchor:"transformers.RobertaModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L696",parametersDescription:[{anchor:"transformers.RobertaModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),er=new j({props:{name:"forward",anchor:"transformers.RobertaModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L741",parametersDescription:[{anchor:"transformers.RobertaModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RobertaModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RobertaModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RobertaModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Mn=new fe({props:{$$slots:{default:[kE]},$$scope:{ctx:q}}}),tr=new we({props:{code:`from transformers import RobertaTokenizer, RobertaModel import torch tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = RobertaModel.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaModel.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),or=new ye({}),nr=new j({props:{name:"class transformers.RobertaForCausalLM",anchor:"transformers.RobertaForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L882",parametersDescription:[{anchor:"transformers.RobertaForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),lr=new j({props:{name:"forward",anchor:"transformers.RobertaForCausalLM.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"past_key_values",val:": typing.Tuple[typing.Tuple[torch.FloatTensor]] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L908",parametersDescription:[{anchor:"transformers.RobertaForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.RobertaForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.RobertaForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.RobertaForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.RobertaForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),zn=new fe({props:{$$slots:{default:[TE]},$$scope:{ctx:q}}}),dr=new we({props:{code:`from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig import torch tokenizer = RobertaTokenizer.from_pretrained("roberta-base") config = RobertaConfig.from_pretrained("roberta-base") config.is_decoder = True model = RobertaForCausalLM.from_pretrained("roberta-base", config=config) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForCausalLM, RobertaConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = RobertaConfig.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForCausalLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),cr=new ye({}),pr=new j({props:{name:"class transformers.RobertaForMaskedLM",anchor:"transformers.RobertaForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1034",parametersDescription:[{anchor:"transformers.RobertaForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),gr=new j({props:{name:"forward",anchor:"transformers.RobertaForMaskedLM.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1063",parametersDescription:[{anchor:"transformers.RobertaForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.RobertaForMaskedLM.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qn=new fe({props:{$$slots:{default:[yE]},$$scope:{ctx:q}}}),_r=new we({props:{code:`from transformers import RobertaTokenizer, RobertaForMaskedLM import torch tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = RobertaForMaskedLM.from_pretrained("roberta-base") inputs = tokenizer("The capital of France is <mask>.", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),br=new ye({}),kr=new j({props:{name:"class transformers.RobertaForSequenceClassification",anchor:"transformers.RobertaForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1163",parametersDescription:[{anchor:"transformers.RobertaForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),wr=new j({props:{name:"forward",anchor:"transformers.RobertaForSequenceClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1177",parametersDescription:[{anchor:"transformers.RobertaForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Pn=new fe({props:{$$slots:{default:[vE]},$$scope:{ctx:q}}}),$r=new we({props:{code:`import torch from transformers import RobertaTokenizer, RobertaForSequenceClassification torch.manual_seed(0) tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = RobertaForSequenceClassification.from_pretrained("roberta-base", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),Fr=new we({props:{code:`import torch from transformers import RobertaTokenizer, RobertaForSequenceClassification torch.manual_seed(0) tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = RobertaForSequenceClassification.from_pretrained("roberta-base", problem_type="multi_label_classification", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),Rr=new ye({}),Mr=new j({props:{name:"class transformers.RobertaForMultipleChoice",anchor:"transformers.RobertaForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1261",parametersDescription:[{anchor:"transformers.RobertaForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qr=new j({props:{name:"forward",anchor:"transformers.RobertaForMultipleChoice.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1274",parametersDescription:[{anchor:"transformers.RobertaForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ln=new fe({props:{$$slots:{default:[wE]},$$scope:{ctx:q}}}),Cr=new we({props:{code:`from transformers import RobertaTokenizer, RobertaForMultipleChoice import torch tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = RobertaForMultipleChoice.from_pretrained("roberta-base") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Pr=new ye({}),jr=new j({props:{name:"class transformers.RobertaForTokenClassification",anchor:"transformers.RobertaForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1354",parametersDescription:[{anchor:"transformers.RobertaForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Nr=new j({props:{name:"forward",anchor:"transformers.RobertaForTokenClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1372",parametersDescription:[{anchor:"transformers.RobertaForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),On=new fe({props:{$$slots:{default:[$E]},$$scope:{ctx:q}}}),Ir=new we({props:{code:`from transformers import RobertaTokenizer, RobertaForTokenClassification import torch tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = RobertaForTokenClassification.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Dr=new ye({}),Sr=new j({props:{name:"class transformers.RobertaForQuestionAnswering",anchor:"transformers.RobertaForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1461",parametersDescription:[{anchor:"transformers.RobertaForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Hr=new j({props:{name:"forward",anchor:"transformers.RobertaForQuestionAnswering.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_roberta.py#L1475",parametersDescription:[{anchor:"transformers.RobertaForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.RobertaForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.RobertaForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.RobertaForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.RobertaForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.RobertaForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.RobertaForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.RobertaForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.RobertaForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.RobertaForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.RobertaForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),In=new fe({props:{$$slots:{default:[FE]},$$scope:{ctx:q}}}),Qr=new we({props:{code:`from transformers import RobertaTokenizer, RobertaForQuestionAnswering import torch torch.manual_seed(0) tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = RobertaForQuestionAnswering.from_pretrained("roberta-base") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss round(loss.item(), 2) start_scores = outputs.start_logits list(start_scores.shape) end_scores = outputs.end_logits list(end_scores.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, RobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = RobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) `}}),Vr=new ye({}),Jr=new j({props:{name:"class transformers.TFRobertaModel",anchor:"transformers.TFRobertaModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L910",parametersDescription:[{anchor:"transformers.TFRobertaModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Sn=new fe({props:{$$slots:{default:[RE]},$$scope:{ctx:q}}}),Yr=new j({props:{name:"call",anchor:"transformers.TFRobertaModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_hidden_states",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L915",parametersDescription:[{anchor:"transformers.TFRobertaModel.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaModel.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaModel.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaModel.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFRobertaModel.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFRobertaModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFRobertaModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you\u2019re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Wn=new fe({props:{$$slots:{default:[ME]},$$scope:{ctx:q}}}),Zr=new we({props:{code:`from transformers import RobertaTokenizer, TFRobertaModel import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = TFRobertaModel.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaModel.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),ea=new ye({}),ta=new j({props:{name:"class transformers.TFRobertaForCausalLM",anchor:"transformers.TFRobertaForCausalLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1138"}}),oa=new j({props:{name:"call",anchor:"transformers.TFRobertaForCausalLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_hidden_states",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"encoder_attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1171",parametersDescription:[{anchor:"transformers.TFRobertaForCausalLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForCausalLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForCausalLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForCausalLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForCausalLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForCausalLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForCausalLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForCausalLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForCausalLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForCausalLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForCausalLM.call.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TFRobertaForCausalLM.call.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.TFRobertaForCausalLM.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFRobertaForCausalLM.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFRobertaForCausalLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the cross entropy classification loss. Indices should be in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" >transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions</a> or <code>tuple(tf.Tensor)</code></p> `}}),Un=new fe({props:{$$slots:{default:[EE]},$$scope:{ctx:q}}}),na=new we({props:{code:`from transformers import RobertaTokenizer, TFRobertaForCausalLM import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = TFRobertaForCausalLM.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForCausalLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),sa=new ye({}),ra=new j({props:{name:"class transformers.TFRobertaForMaskedLM",anchor:"transformers.TFRobertaForMaskedLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1055",parametersDescription:[{anchor:"transformers.TFRobertaForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Qn=new fe({props:{$$slots:{default:[zE]},$$scope:{ctx:q}}}),ca=new j({props:{name:"call",anchor:"transformers.TFRobertaForMaskedLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1072",parametersDescription:[{anchor:"transformers.TFRobertaForMaskedLM.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForMaskedLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForMaskedLM.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForMaskedLM.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForMaskedLM.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForMaskedLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForMaskedLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForMaskedLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForMaskedLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForMaskedLM.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForMaskedLM.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMaskedLMOutput" >transformers.modeling_tf_outputs.TFMaskedLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Vn=new fe({props:{$$slots:{default:[xE]},$$scope:{ctx:q}}}),pa=new we({props:{code:`from transformers import RobertaTokenizer, TFRobertaForMaskedLM import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = TFRobertaForMaskedLM.from_pretrained("roberta-base") inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf") inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] outputs = model(inputs) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ha=new ye({}),ua=new j({props:{name:"class transformers.TFRobertaForSequenceClassification",anchor:"transformers.TFRobertaForSequenceClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1319",parametersDescription:[{anchor:"transformers.TFRobertaForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Kn=new fe({props:{$$slots:{default:[qE]},$$scope:{ctx:q}}}),_a=new j({props:{name:"call",anchor:"transformers.TFRobertaForSequenceClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1330",parametersDescription:[{anchor:"transformers.TFRobertaForSequenceClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForSequenceClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForSequenceClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForSequenceClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForSequenceClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForSequenceClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForSequenceClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForSequenceClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForSequenceClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForSequenceClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForSequenceClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFSequenceClassifierOutput" >transformers.modeling_tf_outputs.TFSequenceClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Gn=new fe({props:{$$slots:{default:[CE]},$$scope:{ctx:q}}}),ba=new we({props:{code:`from transformers import RobertaTokenizer, TFRobertaForSequenceClassification import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = TFRobertaForSequenceClassification.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape(tf.constant(<span class="hljs-number">1</span>), (-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>)) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ka=new ye({}),Ta=new j({props:{name:"class transformers.TFRobertaForMultipleChoice",anchor:"transformers.TFRobertaForMultipleChoice",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1402",parametersDescription:[{anchor:"transformers.TFRobertaForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Yn=new fe({props:{$$slots:{default:[PE]},$$scope:{ctx:q}}}),$a=new j({props:{name:"call",anchor:"transformers.TFRobertaForMultipleChoice.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1426",parametersDescription:[{anchor:"transformers.TFRobertaForMultipleChoice.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForMultipleChoice.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForMultipleChoice.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForMultipleChoice.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForMultipleChoice.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForMultipleChoice.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForMultipleChoice.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForMultipleChoice.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForMultipleChoice.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForMultipleChoice.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForMultipleChoice.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" >transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Zn=new fe({props:{$$slots:{default:[jE]},$$scope:{ctx:q}}}),Fa=new we({props:{code:`from transformers import RobertaTokenizer, TFRobertaForMultipleChoice import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = TFRobertaForMultipleChoice.from_pretrained("roberta-base") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} outputs = model(inputs) # batch size is 1 # the linear classifier still needs to be trained logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = {k: tf.expand_dims(v, <span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ra=new ye({}),Ma=new j({props:{name:"class transformers.TFRobertaForTokenClassification",anchor:"transformers.TFRobertaForTokenClassification",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1524",parametersDescription:[{anchor:"transformers.TFRobertaForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ts=new fe({props:{$$slots:{default:[LE]},$$scope:{ctx:q}}}),qa=new j({props:{name:"call",anchor:"transformers.TFRobertaForTokenClassification.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1542",parametersDescription:[{anchor:"transformers.TFRobertaForTokenClassification.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForTokenClassification.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForTokenClassification.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForTokenClassification.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForTokenClassification.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForTokenClassification.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForTokenClassification.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForTokenClassification.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForTokenClassification.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForTokenClassification.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForTokenClassification.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFTokenClassifierOutput" >transformers.modeling_tf_outputs.TFTokenClassifierOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),os=new fe({props:{$$slots:{default:[AE]},$$scope:{ctx:q}}}),Ca=new we({props:{code:`from transformers import RobertaTokenizer, TFRobertaForTokenClassification import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = TFRobertaForTokenClassification.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") input_ids = inputs["input_ids"] inputs["labels"] = tf.reshape( tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids)) ) # Batch size 1 outputs = model(inputs) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = tf.reshape( <span class="hljs-meta">... </span> tf.constant([<span class="hljs-number">1</span>] * tf.size(input_ids).numpy()), (-<span class="hljs-number">1</span>, tf.size(input_ids)) <span class="hljs-meta">&gt;&gt;&gt; </span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Pa=new ye({}),ja=new j({props:{name:"class transformers.TFRobertaForQuestionAnswering",anchor:"transformers.TFRobertaForQuestionAnswering",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1614",parametersDescription:[{anchor:"transformers.TFRobertaForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ss=new fe({props:{$$slots:{default:[OE]},$$scope:{ctx:q}}}),Na=new j({props:{name:"call",anchor:"transformers.TFRobertaForQuestionAnswering.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"token_type_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"start_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"end_positions",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_tf_roberta.py#L1627",parametersDescription:[{anchor:"transformers.TFRobertaForQuestionAnswering.call.input_ids",description:`<strong>input_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.attention_mask",description:`<strong>attention_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.position_ids",description:`<strong>position_ids</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.head_mask",description:`<strong>head_mask</strong> (<code>Numpy array</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.start_positions",description:`<strong>start_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.TFRobertaForQuestionAnswering.call.end_positions",description:`<strong>end_positions</strong> (<code>tf.Tensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" >transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),rs=new fe({props:{$$slots:{default:[NE]},$$scope:{ctx:q}}}),Ia=new we({props:{code:`from transformers import RobertaTokenizer, TFRobertaForQuestionAnswering import tensorflow as tf tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = TFRobertaForQuestionAnswering.from_pretrained("roberta-base") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer(question, text, return_tensors="tf") outputs = model(input_dict) start_logits = outputs.start_logits end_logits = outputs.end_logits all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) answer = " ".join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0] + 1])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, TFRobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFRobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_dict = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_dict) <span class="hljs-meta">&gt;&gt;&gt; </span>start_logits = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_logits = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span>all_tokens = tokenizer.convert_ids_to_tokens(input_dict[<span class="hljs-string">&quot;input_ids&quot;</span>].numpy()[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>answer = <span class="hljs-string">&quot; &quot;</span>.join(all_tokens[tf.math.argmax(start_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] : tf.math.argmax(end_logits, <span class="hljs-number">1</span>)[<span class="hljs-number">0</span>] + <span class="hljs-number">1</span>])`}}),Da=new ye({}),Sa=new j({props:{name:"class transformers.FlaxRobertaModel",anchor:"transformers.FlaxRobertaModel",parameters:[{name:"config",val:": RobertaConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L727",parametersDescription:[{anchor:"transformers.FlaxRobertaModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ka=new j({props:{name:"__call__",anchor:"transformers.FlaxRobertaPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609",parametersDescription:[{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),is=new fe({props:{$$slots:{default:[IE]},$$scope:{ctx:q}}}),Ga=new we({props:{code:`from transformers import RobertaTokenizer, FlaxRobertaModel tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = FlaxRobertaModel.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaModel.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Xa=new ye({}),Ya=new j({props:{name:"class transformers.FlaxRobertaForMaskedLM",anchor:"transformers.FlaxRobertaForMaskedLM",parameters:[{name:"config",val:": RobertaConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L789",parametersDescription:[{anchor:"transformers.FlaxRobertaForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ii=new j({props:{name:"__call__",anchor:"transformers.FlaxRobertaPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609",parametersDescription:[{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ds=new fe({props:{$$slots:{default:[DE]},$$scope:{ctx:q}}}),li=new we({props:{code:`from transformers import RobertaTokenizer, FlaxRobertaForMaskedLM tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = FlaxRobertaForMaskedLM.from_pretrained("roberta-base") inputs = tokenizer("The capital of France is [MASK].", return_tensors="jax") outputs = model(**inputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForMaskedLM.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),di=new ye({}),ci=new j({props:{name:"class transformers.FlaxRobertaForSequenceClassification",anchor:"transformers.FlaxRobertaForSequenceClassification",parameters:[{name:"config",val:": RobertaConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L856",parametersDescription:[{anchor:"transformers.FlaxRobertaForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bi=new j({props:{name:"__call__",anchor:"transformers.FlaxRobertaPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609",parametersDescription:[{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" >transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ps=new fe({props:{$$slots:{default:[SE]},$$scope:{ctx:q}}}),ki=new we({props:{code:`from transformers import RobertaTokenizer, FlaxRobertaForSequenceClassification tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = FlaxRobertaForSequenceClassification.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") outputs = model(**inputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ti=new ye({}),yi=new j({props:{name:"class transformers.FlaxRobertaForMultipleChoice",anchor:"transformers.FlaxRobertaForMultipleChoice",parameters:[{name:"config",val:": RobertaConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L933",parametersDescription:[{anchor:"transformers.FlaxRobertaForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),zi=new j({props:{name:"__call__",anchor:"transformers.FlaxRobertaPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609",parametersDescription:[{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" >transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),us=new fe({props:{$$slots:{default:[WE]},$$scope:{ctx:q}}}),xi=new we({props:{code:`from transformers import RobertaTokenizer, FlaxRobertaForMultipleChoice tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = FlaxRobertaForMultipleChoice.from_pretrained("roberta-base") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) outputs = model(**{k: v[None, :] for k, v in encoding.items()}) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;jax&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v[<span class="hljs-literal">None</span>, :] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),qi=new ye({}),Ci=new j({props:{name:"class transformers.FlaxRobertaForTokenClassification",anchor:"transformers.FlaxRobertaForTokenClassification",parameters:[{name:"config",val:": RobertaConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L1010",parametersDescription:[{anchor:"transformers.FlaxRobertaForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Di=new j({props:{name:"__call__",anchor:"transformers.FlaxRobertaPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609",parametersDescription:[{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" >transformers.modeling_flax_outputs.FlaxTokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ms=new fe({props:{$$slots:{default:[BE]},$$scope:{ctx:q}}}),Si=new we({props:{code:`from transformers import RobertaTokenizer, FlaxRobertaForTokenClassification tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = FlaxRobertaForTokenClassification.from_pretrained("roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") outputs = model(**inputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForTokenClassification.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Wi=new ye({}),Bi=new j({props:{name:"class transformers.FlaxRobertaForQuestionAnswering",anchor:"transformers.FlaxRobertaForQuestionAnswering",parameters:[{name:"config",val:": RobertaConfig"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L1082",parametersDescription:[{anchor:"transformers.FlaxRobertaForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig">RobertaConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Xi=new j({props:{name:"__call__",anchor:"transformers.FlaxRobertaPreTrainedModel.__call__",parameters:[{name:"input_ids",val:""},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/roberta/modeling_flax_roberta.py#L609",parametersDescription:[{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.token_type_ids",description:`<strong>token_type_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.head_mask",description:`<strong>head_mask</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <code>optional) -- Mask to nullify selected heads of the attention modules. Mask values selected in </code>[0, 1]\`:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.FlaxRobertaPreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig" >RobertaConfig</a>) and inputs.</p> <ul> <li> <p><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" >transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_s=new fe({props:{$$slots:{default:[UE]},$$scope:{ctx:q}}}),Yi=new we({props:{code:`from transformers import RobertaTokenizer, FlaxRobertaForQuestionAnswering tokenizer = RobertaTokenizer.from_pretrained("roberta-base") model = FlaxRobertaForQuestionAnswering.from_pretrained("roberta-base") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="jax") outputs = model(**inputs) start_scores = outputs.start_logits end_scores = outputs.end_logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, FlaxRobertaForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxRobertaForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits`}}),{c(){p=n("meta"),R=l(),m=n("h1"),b=n("a"),k=n("span"),T(_.$$.fragment),f=l(),M=n("span"),Y=a("RoBERTa"),I=l(),z=n("h2"),X=n("a"),D=n("span"),T(te.$$.fragment),me=l(),S=n("span"),ge=a("Overview"),pe=l(),K=n("p"),A=a("The RoBERTa model was proposed in "),oe=n("a"),Z=a("RoBERTa: A Robustly Optimized BERT Pretraining Approach"),x=a(` by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. It is based on Google\u2019s BERT model released in 2018.`),P=l(),se=n("p"),U=a(`It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining objective and training with much larger mini-batches and learning rates.`),he=l(),re=n("p"),H=a("The abstract from the paper is the following:"),ue=l(),ae=n("p"),C=n("em"),_e=a(`Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.`),W=l(),ie=n("p"),be=a("Tips:"),B=l(),G=n("ul"),ne=n("li"),O=a("This implementation is the same as "),le=n("a"),Q=a("BertModel"),ke=a(` with a tiny embeddings tweak as well as a setup for Roberta pretrained models.`),u=l(),E=n("li"),ee=a(`RoBERTa has the same architecture as BERT, but uses a byte-level BPE as a tokenizer (same as GPT-2) and uses a different pretraining scheme.`),Me=l(),ce=n("li"),N=a("RoBERTa doesn\u2019t have "),$e=n("code"),Ee=a("token_type_ids"),ze=a(`, you don\u2019t need to indicate which token belongs to which segment. Just separate your segments with the separation token `),L=n("code"),V=a("tokenizer.sep_token"),xe=a(" (or "),Fe=n("code"),J=a("</s>"),qe=a(")"),Ce=l(),de=n("li"),ve=n("a"),dm=a("CamemBERT"),cm=a(" is a wrapper around RoBERTa. Refer to this page for usage examples."),Qh=l(),Qt=n("p"),pm=a("This model was contributed by "),$s=n("a"),hm=a("julien-c"),um=a(". The original code can be found "),Fs=n("a"),fm=a("here"),mm=a("."),Vh=l(),To=n("h2"),bn=n("a"),vd=n("span"),T(Rs.$$.fragment),gm=l(),wd=n("span"),_m=a("RobertaConfig"),Jh=l(),We=n("div"),T(Ms.$$.fragment),bm=l(),yo=n("p"),km=a("This is the configuration class to store the configuration of a "),nl=n("a"),Tm=a("RobertaModel"),ym=a(" or a "),sl=n("a"),vm=a("TFRobertaModel"),wm=a(`. It is used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture.`),$m=l(),vo=n("p"),Fm=a("Configuration objects inherit from "),rl=n("a"),Rm=a("PretrainedConfig"),Mm=a(` and can be used to control the model outputs. Read the documentation from `),al=n("a"),Em=a("PretrainedConfig"),zm=a(" for more information."),xm=l(),wo=n("p"),qm=a("The "),il=n("a"),Cm=a("RobertaConfig"),Pm=a(" class directly inherits "),ll=n("a"),jm=a("BertConfig"),Lm=a(`. It reuses the same defaults. Please check the parent class for more information.`),Am=l(),$d=n("p"),Om=a("Examples:"),Nm=l(),T(Es.$$.fragment),Kh=l(),$o=n("h2"),kn=n("a"),Fd=n("span"),T(zs.$$.fragment),Im=l(),Rd=n("span"),Dm=a("RobertaTokenizer"),Gh=l(),Te=n("div"),T(xs.$$.fragment),Sm=l(),Md=n("p"),Wm=a("Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding."),Bm=l(),Ed=n("p"),Um=a(`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),Hm=l(),T(qs.$$.fragment),Qm=l(),Cs=n("p"),Vm=a("You can get around that behavior by passing "),zd=n("code"),Jm=a("add_prefix_space=True"),Km=a(` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),Gm=l(),T(Tn.$$.fragment),Xm=l(),Ps=n("p"),Ym=a("This tokenizer inherits from "),dl=n("a"),Zm=a("PreTrainedTokenizer"),eg=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),tg=l(),Vt=n("div"),T(js.$$.fragment),og=l(),xd=n("p"),ng=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format:`),sg=l(),Ls=n("ul"),cl=n("li"),rg=a("single sequence: "),qd=n("code"),ag=a("<s> X </s>"),ig=l(),pl=n("li"),lg=a("pair of sequences: "),Cd=n("code"),dg=a("<s> A </s></s> B </s>"),cg=l(),yn=n("div"),T(As.$$.fragment),pg=l(),Os=n("p"),hg=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Pd=n("code"),ug=a("prepare_for_model"),fg=a(" method."),mg=l(),vn=n("div"),T(Ns.$$.fragment),gg=l(),jd=n("p"),_g=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),bg=l(),hl=n("div"),T(Is.$$.fragment),Xh=l(),Fo=n("h2"),wn=n("a"),Ld=n("span"),T(Ds.$$.fragment),kg=l(),Ad=n("span"),Tg=a("RobertaTokenizerFast"),Yh=l(),Pe=n("div"),T(Ss.$$.fragment),yg=l(),Ws=n("p"),vg=a("Construct a \u201Cfast\u201D RoBERTa tokenizer (backed by HuggingFace\u2019s "),Od=n("em"),wg=a("tokenizers"),$g=a(` library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.`),Fg=l(),Nd=n("p"),Rg=a(`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),Mg=l(),T(Bs.$$.fragment),Eg=l(),Us=n("p"),zg=a("You can get around that behavior by passing "),Id=n("code"),xg=a("add_prefix_space=True"),qg=a(` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),Cg=l(),T($n.$$.fragment),Pg=l(),Hs=n("p"),jg=a("This tokenizer inherits from "),ul=n("a"),Lg=a("PreTrainedTokenizerFast"),Ag=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Og=l(),fl=n("div"),T(Qs.$$.fragment),Zh=l(),Ro=n("h2"),Fn=n("a"),Dd=n("span"),T(Vs.$$.fragment),Ng=l(),Sd=n("span"),Ig=a("RobertaModel"),eu=l(),je=n("div"),T(Js.$$.fragment),Dg=l(),Wd=n("p"),Sg=a("The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),Wg=l(),Ks=n("p"),Bg=a("This model inherits from "),ml=n("a"),Ug=a("PreTrainedModel"),Hg=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qg=l(),Gs=n("p"),Vg=a("This model is also a PyTorch "),Xs=n("a"),Jg=a("torch.nn.Module"),Kg=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gg=l(),Ys=n("p"),Xg=a(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Bd=n("em"),Yg=a(`Attention is all you need`),Zg=a(`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),e_=l(),Be=n("p"),t_=a("To behave as an decoder the model needs to be initialized with the "),Ud=n("code"),o_=a("is_decoder"),n_=a(` argument of the configuration set to `),Hd=n("code"),s_=a("True"),r_=a(". To be used in a Seq2Seq model, the model needs to initialized with both "),Qd=n("code"),a_=a("is_decoder"),i_=a(` argument and `),Vd=n("code"),l_=a("add_cross_attention"),d_=a(" set to "),Jd=n("code"),c_=a("True"),p_=a("; an "),Kd=n("code"),h_=a("encoder_hidden_states"),u_=a(" is then expected as an input to the forward pass."),f_=l(),Rn=n("p"),m_=a(".. _"),Gd=n("em"),g_=a("Attention is all you need"),__=a(": "),Zs=n("a"),b_=a("https://arxiv.org/abs/1706.03762"),k_=l(),st=n("div"),T(er.$$.fragment),T_=l(),Mo=n("p"),y_=a("The "),gl=n("a"),v_=a("RobertaModel"),w_=a(" forward method, overrides the "),Xd=n("code"),$_=a("__call__"),F_=a(" special method."),R_=l(),T(Mn.$$.fragment),M_=l(),Yd=n("p"),E_=a("Example:"),z_=l(),T(tr.$$.fragment),tu=l(),Eo=n("h2"),En=n("a"),Zd=n("span"),T(or.$$.fragment),x_=l(),ec=n("span"),q_=a("RobertaForCausalLM"),ou=l(),Ye=n("div"),T(nr.$$.fragment),C_=l(),sr=n("p"),P_=a("RoBERTa Model with a "),tc=n("code"),j_=a("language modeling"),L_=a(" head on top for CLM fine-tuning."),A_=l(),rr=n("p"),O_=a("This model inherits from "),_l=n("a"),N_=a("PreTrainedModel"),I_=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),D_=l(),ar=n("p"),S_=a("This model is also a PyTorch "),ir=n("a"),W_=a("torch.nn.Module"),B_=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),U_=l(),rt=n("div"),T(lr.$$.fragment),H_=l(),zo=n("p"),Q_=a("The "),bl=n("a"),V_=a("RobertaForCausalLM"),J_=a(" forward method, overrides the "),oc=n("code"),K_=a("__call__"),G_=a(" special method."),X_=l(),T(zn.$$.fragment),Y_=l(),nc=n("p"),Z_=a("Example:"),eb=l(),T(dr.$$.fragment),nu=l(),xo=n("h2"),xn=n("a"),sc=n("span"),T(cr.$$.fragment),tb=l(),rc=n("span"),ob=a("RobertaForMaskedLM"),su=l(),Ze=n("div"),T(pr.$$.fragment),nb=l(),hr=n("p"),sb=a("RoBERTa Model with a "),ac=n("code"),rb=a("language modeling"),ab=a(" head on top."),ib=l(),ur=n("p"),lb=a("This model inherits from "),kl=n("a"),db=a("PreTrainedModel"),cb=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pb=l(),fr=n("p"),hb=a("This model is also a PyTorch "),mr=n("a"),ub=a("torch.nn.Module"),fb=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mb=l(),at=n("div"),T(gr.$$.fragment),gb=l(),qo=n("p"),_b=a("The "),Tl=n("a"),bb=a("RobertaForMaskedLM"),kb=a(" forward method, overrides the "),ic=n("code"),Tb=a("__call__"),yb=a(" special method."),vb=l(),T(qn.$$.fragment),wb=l(),lc=n("p"),$b=a("Example:"),Fb=l(),T(_r.$$.fragment),ru=l(),Co=n("h2"),Cn=n("a"),dc=n("span"),T(br.$$.fragment),Rb=l(),cc=n("span"),Mb=a("RobertaForSequenceClassification"),au=l(),et=n("div"),T(kr.$$.fragment),Eb=l(),pc=n("p"),zb=a(`RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),xb=l(),Tr=n("p"),qb=a("This model inherits from "),yl=n("a"),Cb=a("PreTrainedModel"),Pb=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jb=l(),yr=n("p"),Lb=a("This model is also a PyTorch "),vr=n("a"),Ab=a("torch.nn.Module"),Ob=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nb=l(),Se=n("div"),T(wr.$$.fragment),Ib=l(),Po=n("p"),Db=a("The "),vl=n("a"),Sb=a("RobertaForSequenceClassification"),Wb=a(" forward method, overrides the "),hc=n("code"),Bb=a("__call__"),Ub=a(" special method."),Hb=l(),T(Pn.$$.fragment),Qb=l(),uc=n("p"),Vb=a("Example of single-label classification:"),Jb=l(),T($r.$$.fragment),Kb=l(),fc=n("p"),Gb=a("Example of multi-label classification:"),Xb=l(),T(Fr.$$.fragment),iu=l(),jo=n("h2"),jn=n("a"),mc=n("span"),T(Rr.$$.fragment),Yb=l(),gc=n("span"),Zb=a("RobertaForMultipleChoice"),lu=l(),tt=n("div"),T(Mr.$$.fragment),ek=l(),_c=n("p"),tk=a(`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),ok=l(),Er=n("p"),nk=a("This model inherits from "),wl=n("a"),sk=a("PreTrainedModel"),rk=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ak=l(),zr=n("p"),ik=a("This model is also a PyTorch "),xr=n("a"),lk=a("torch.nn.Module"),dk=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ck=l(),it=n("div"),T(qr.$$.fragment),pk=l(),Lo=n("p"),hk=a("The "),$l=n("a"),uk=a("RobertaForMultipleChoice"),fk=a(" forward method, overrides the "),bc=n("code"),mk=a("__call__"),gk=a(" special method."),_k=l(),T(Ln.$$.fragment),bk=l(),kc=n("p"),kk=a("Example:"),Tk=l(),T(Cr.$$.fragment),du=l(),Ao=n("h2"),An=n("a"),Tc=n("span"),T(Pr.$$.fragment),yk=l(),yc=n("span"),vk=a("RobertaForTokenClassification"),cu=l(),ot=n("div"),T(jr.$$.fragment),wk=l(),vc=n("p"),$k=a(`Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Fk=l(),Lr=n("p"),Rk=a("This model inherits from "),Fl=n("a"),Mk=a("PreTrainedModel"),Ek=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zk=l(),Ar=n("p"),xk=a("This model is also a PyTorch "),Or=n("a"),qk=a("torch.nn.Module"),Ck=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Pk=l(),lt=n("div"),T(Nr.$$.fragment),jk=l(),Oo=n("p"),Lk=a("The "),Rl=n("a"),Ak=a("RobertaForTokenClassification"),Ok=a(" forward method, overrides the "),wc=n("code"),Nk=a("__call__"),Ik=a(" special method."),Dk=l(),T(On.$$.fragment),Sk=l(),$c=n("p"),Wk=a("Example:"),Bk=l(),T(Ir.$$.fragment),pu=l(),No=n("h2"),Nn=n("a"),Fc=n("span"),T(Dr.$$.fragment),Uk=l(),Rc=n("span"),Hk=a("RobertaForQuestionAnswering"),hu=l(),nt=n("div"),T(Sr.$$.fragment),Qk=l(),Io=n("p"),Vk=a(`Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Mc=n("code"),Jk=a("span start logits"),Kk=a(" and "),Ec=n("code"),Gk=a("span end logits"),Xk=a(")."),Yk=l(),Wr=n("p"),Zk=a("This model inherits from "),Ml=n("a"),e1=a("PreTrainedModel"),t1=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),o1=l(),Br=n("p"),n1=a("This model is also a PyTorch "),Ur=n("a"),s1=a("torch.nn.Module"),r1=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),a1=l(),dt=n("div"),T(Hr.$$.fragment),i1=l(),Do=n("p"),l1=a("The "),El=n("a"),d1=a("RobertaForQuestionAnswering"),c1=a(" forward method, overrides the "),zc=n("code"),p1=a("__call__"),h1=a(" special method."),u1=l(),T(In.$$.fragment),f1=l(),xc=n("p"),m1=a("Example:"),g1=l(),T(Qr.$$.fragment),uu=l(),So=n("h2"),Dn=n("a"),qc=n("span"),T(Vr.$$.fragment),_1=l(),Cc=n("span"),b1=a("TFRobertaModel"),fu=l(),Ue=n("div"),T(Jr.$$.fragment),k1=l(),Pc=n("p"),T1=a("The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),y1=l(),Kr=n("p"),v1=a("This model inherits from "),zl=n("a"),w1=a("TFPreTrainedModel"),$1=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),F1=l(),Gr=n("p"),R1=a("This model is also a "),Xr=n("a"),M1=a("tf.keras.Model"),E1=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),z1=l(),T(Sn.$$.fragment),x1=l(),ct=n("div"),T(Yr.$$.fragment),q1=l(),Wo=n("p"),C1=a("The "),xl=n("a"),P1=a("TFRobertaModel"),j1=a(" forward method, overrides the "),jc=n("code"),L1=a("__call__"),A1=a(" special method."),O1=l(),T(Wn.$$.fragment),N1=l(),Lc=n("p"),I1=a("Example:"),D1=l(),T(Zr.$$.fragment),mu=l(),Bo=n("h2"),Bn=n("a"),Ac=n("span"),T(ea.$$.fragment),S1=l(),Oc=n("span"),W1=a("TFRobertaForCausalLM"),gu=l(),Uo=n("div"),T(ta.$$.fragment),B1=l(),pt=n("div"),T(oa.$$.fragment),U1=l(),Ho=n("p"),H1=a("The "),ql=n("a"),Q1=a("TFRobertaForCausalLM"),V1=a(" forward method, overrides the "),Nc=n("code"),J1=a("__call__"),K1=a(" special method."),G1=l(),T(Un.$$.fragment),X1=l(),Ic=n("p"),Y1=a("Example:"),Z1=l(),T(na.$$.fragment),_u=l(),Qo=n("h2"),Hn=n("a"),Dc=n("span"),T(sa.$$.fragment),eT=l(),Sc=n("span"),tT=a("TFRobertaForMaskedLM"),bu=l(),He=n("div"),T(ra.$$.fragment),oT=l(),aa=n("p"),nT=a("RoBERTa Model with a "),Wc=n("code"),sT=a("language modeling"),rT=a(" head on top."),aT=l(),ia=n("p"),iT=a("This model inherits from "),Cl=n("a"),lT=a("TFPreTrainedModel"),dT=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cT=l(),la=n("p"),pT=a("This model is also a "),da=n("a"),hT=a("tf.keras.Model"),uT=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),fT=l(),T(Qn.$$.fragment),mT=l(),ht=n("div"),T(ca.$$.fragment),gT=l(),Vo=n("p"),_T=a("The "),Pl=n("a"),bT=a("TFRobertaForMaskedLM"),kT=a(" forward method, overrides the "),Bc=n("code"),TT=a("__call__"),yT=a(" special method."),vT=l(),T(Vn.$$.fragment),wT=l(),Uc=n("p"),$T=a("Example:"),FT=l(),T(pa.$$.fragment),ku=l(),Jo=n("h2"),Jn=n("a"),Hc=n("span"),T(ha.$$.fragment),RT=l(),Qc=n("span"),MT=a("TFRobertaForSequenceClassification"),Tu=l(),Qe=n("div"),T(ua.$$.fragment),ET=l(),Vc=n("p"),zT=a(`RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),xT=l(),fa=n("p"),qT=a("This model inherits from "),jl=n("a"),CT=a("TFPreTrainedModel"),PT=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),jT=l(),ma=n("p"),LT=a("This model is also a "),ga=n("a"),AT=a("tf.keras.Model"),OT=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),NT=l(),T(Kn.$$.fragment),IT=l(),ut=n("div"),T(_a.$$.fragment),DT=l(),Ko=n("p"),ST=a("The "),Ll=n("a"),WT=a("TFRobertaForSequenceClassification"),BT=a(" forward method, overrides the "),Jc=n("code"),UT=a("__call__"),HT=a(" special method."),QT=l(),T(Gn.$$.fragment),VT=l(),Kc=n("p"),JT=a("Example:"),KT=l(),T(ba.$$.fragment),yu=l(),Go=n("h2"),Xn=n("a"),Gc=n("span"),T(ka.$$.fragment),GT=l(),Xc=n("span"),XT=a("TFRobertaForMultipleChoice"),vu=l(),Ve=n("div"),T(Ta.$$.fragment),YT=l(),Yc=n("p"),ZT=a(`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),ey=l(),ya=n("p"),ty=a("This model inherits from "),Al=n("a"),oy=a("TFPreTrainedModel"),ny=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sy=l(),va=n("p"),ry=a("This model is also a "),wa=n("a"),ay=a("tf.keras.Model"),iy=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ly=l(),T(Yn.$$.fragment),dy=l(),ft=n("div"),T($a.$$.fragment),cy=l(),Xo=n("p"),py=a("The "),Ol=n("a"),hy=a("TFRobertaForMultipleChoice"),uy=a(" forward method, overrides the "),Zc=n("code"),fy=a("__call__"),my=a(" special method."),gy=l(),T(Zn.$$.fragment),_y=l(),ep=n("p"),by=a("Example:"),ky=l(),T(Fa.$$.fragment),wu=l(),Yo=n("h2"),es=n("a"),tp=n("span"),T(Ra.$$.fragment),Ty=l(),op=n("span"),yy=a("TFRobertaForTokenClassification"),$u=l(),Je=n("div"),T(Ma.$$.fragment),vy=l(),np=n("p"),wy=a(`RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),$y=l(),Ea=n("p"),Fy=a("This model inherits from "),Nl=n("a"),Ry=a("TFPreTrainedModel"),My=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ey=l(),za=n("p"),zy=a("This model is also a "),xa=n("a"),xy=a("tf.keras.Model"),qy=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Cy=l(),T(ts.$$.fragment),Py=l(),mt=n("div"),T(qa.$$.fragment),jy=l(),Zo=n("p"),Ly=a("The "),Il=n("a"),Ay=a("TFRobertaForTokenClassification"),Oy=a(" forward method, overrides the "),sp=n("code"),Ny=a("__call__"),Iy=a(" special method."),Dy=l(),T(os.$$.fragment),Sy=l(),rp=n("p"),Wy=a("Example:"),By=l(),T(Ca.$$.fragment),Fu=l(),en=n("h2"),ns=n("a"),ap=n("span"),T(Pa.$$.fragment),Uy=l(),ip=n("span"),Hy=a("TFRobertaForQuestionAnswering"),Ru=l(),Ke=n("div"),T(ja.$$.fragment),Qy=l(),tn=n("p"),Vy=a(`RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),lp=n("code"),Jy=a("span start logits"),Ky=a(" and "),dp=n("code"),Gy=a("span end logits"),Xy=a(")."),Yy=l(),La=n("p"),Zy=a("This model inherits from "),Dl=n("a"),ev=a("TFPreTrainedModel"),tv=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ov=l(),Aa=n("p"),nv=a("This model is also a "),Oa=n("a"),sv=a("tf.keras.Model"),rv=a(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),av=l(),T(ss.$$.fragment),iv=l(),gt=n("div"),T(Na.$$.fragment),lv=l(),on=n("p"),dv=a("The "),Sl=n("a"),cv=a("TFRobertaForQuestionAnswering"),pv=a(" forward method, overrides the "),cp=n("code"),hv=a("__call__"),uv=a(" special method."),fv=l(),T(rs.$$.fragment),mv=l(),pp=n("p"),gv=a("Example:"),_v=l(),T(Ia.$$.fragment),Mu=l(),nn=n("h2"),as=n("a"),hp=n("span"),T(Da.$$.fragment),bv=l(),up=n("span"),kv=a("FlaxRobertaModel"),Eu=l(),Le=n("div"),T(Sa.$$.fragment),Tv=l(),fp=n("p"),yv=a("The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),vv=l(),Wa=n("p"),wv=a("This model inherits from "),Wl=n("a"),$v=a("FlaxPreTrainedModel"),Fv=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Rv=l(),Ba=n("p"),Mv=a("This model is also a Flax Linen "),Ua=n("a"),Ev=a("flax.linen.Module"),zv=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),xv=l(),mp=n("p"),qv=a("Finally, this model supports inherent JAX features such as:"),Cv=l(),Nt=n("ul"),gp=n("li"),Ha=n("a"),Pv=a("Just-In-Time (JIT) compilation"),jv=l(),_p=n("li"),Qa=n("a"),Lv=a("Automatic Differentiation"),Av=l(),bp=n("li"),Va=n("a"),Ov=a("Vectorization"),Nv=l(),kp=n("li"),Ja=n("a"),Iv=a("Parallelization"),Dv=l(),_t=n("div"),T(Ka.$$.fragment),Sv=l(),sn=n("p"),Wv=a("The "),Tp=n("code"),Bv=a("FlaxRobertaPreTrainedModel"),Uv=a("forward method, overrides the "),yp=n("code"),Hv=a("__call__"),Qv=a(" special method."),Vv=l(),T(is.$$.fragment),Jv=l(),vp=n("p"),Kv=a("Example:"),Gv=l(),T(Ga.$$.fragment),zu=l(),rn=n("h2"),ls=n("a"),wp=n("span"),T(Xa.$$.fragment),Xv=l(),$p=n("span"),Yv=a("FlaxRobertaForMaskedLM"),xu=l(),Ae=n("div"),T(Ya.$$.fragment),Zv=l(),Za=n("p"),ew=a("RoBERTa Model with a "),Fp=n("code"),tw=a("language modeling"),ow=a(" head on top."),nw=l(),ei=n("p"),sw=a("This model inherits from "),Bl=n("a"),rw=a("FlaxPreTrainedModel"),aw=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),iw=l(),ti=n("p"),lw=a("This model is also a Flax Linen "),oi=n("a"),dw=a("flax.linen.Module"),cw=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),pw=l(),Rp=n("p"),hw=a("Finally, this model supports inherent JAX features such as:"),uw=l(),It=n("ul"),Mp=n("li"),ni=n("a"),fw=a("Just-In-Time (JIT) compilation"),mw=l(),Ep=n("li"),si=n("a"),gw=a("Automatic Differentiation"),_w=l(),zp=n("li"),ri=n("a"),bw=a("Vectorization"),kw=l(),xp=n("li"),ai=n("a"),Tw=a("Parallelization"),yw=l(),bt=n("div"),T(ii.$$.fragment),vw=l(),an=n("p"),ww=a("The "),qp=n("code"),$w=a("FlaxRobertaPreTrainedModel"),Fw=a("forward method, overrides the "),Cp=n("code"),Rw=a("__call__"),Mw=a(" special method."),Ew=l(),T(ds.$$.fragment),zw=l(),Pp=n("p"),xw=a("Example:"),qw=l(),T(li.$$.fragment),qu=l(),ln=n("h2"),cs=n("a"),jp=n("span"),T(di.$$.fragment),Cw=l(),Lp=n("span"),Pw=a("FlaxRobertaForSequenceClassification"),Cu=l(),Oe=n("div"),T(ci.$$.fragment),jw=l(),Ap=n("p"),Lw=a(`Roberta Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Aw=l(),pi=n("p"),Ow=a("This model inherits from "),Ul=n("a"),Nw=a("FlaxPreTrainedModel"),Iw=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Dw=l(),hi=n("p"),Sw=a("This model is also a Flax Linen "),ui=n("a"),Ww=a("flax.linen.Module"),Bw=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Uw=l(),Op=n("p"),Hw=a("Finally, this model supports inherent JAX features such as:"),Qw=l(),Dt=n("ul"),Np=n("li"),fi=n("a"),Vw=a("Just-In-Time (JIT) compilation"),Jw=l(),Ip=n("li"),mi=n("a"),Kw=a("Automatic Differentiation"),Gw=l(),Dp=n("li"),gi=n("a"),Xw=a("Vectorization"),Yw=l(),Sp=n("li"),_i=n("a"),Zw=a("Parallelization"),e$=l(),kt=n("div"),T(bi.$$.fragment),t$=l(),dn=n("p"),o$=a("The "),Wp=n("code"),n$=a("FlaxRobertaPreTrainedModel"),s$=a("forward method, overrides the "),Bp=n("code"),r$=a("__call__"),a$=a(" special method."),i$=l(),T(ps.$$.fragment),l$=l(),Up=n("p"),d$=a("Example:"),c$=l(),T(ki.$$.fragment),Pu=l(),cn=n("h2"),hs=n("a"),Hp=n("span"),T(Ti.$$.fragment),p$=l(),Qp=n("span"),h$=a("FlaxRobertaForMultipleChoice"),ju=l(),Ne=n("div"),T(yi.$$.fragment),u$=l(),Vp=n("p"),f$=a(`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),m$=l(),vi=n("p"),g$=a("This model inherits from "),Hl=n("a"),_$=a("FlaxPreTrainedModel"),b$=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),k$=l(),wi=n("p"),T$=a("This model is also a Flax Linen "),$i=n("a"),y$=a("flax.linen.Module"),v$=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),w$=l(),Jp=n("p"),$$=a("Finally, this model supports inherent JAX features such as:"),F$=l(),St=n("ul"),Kp=n("li"),Fi=n("a"),R$=a("Just-In-Time (JIT) compilation"),M$=l(),Gp=n("li"),Ri=n("a"),E$=a("Automatic Differentiation"),z$=l(),Xp=n("li"),Mi=n("a"),x$=a("Vectorization"),q$=l(),Yp=n("li"),Ei=n("a"),C$=a("Parallelization"),P$=l(),Tt=n("div"),T(zi.$$.fragment),j$=l(),pn=n("p"),L$=a("The "),Zp=n("code"),A$=a("FlaxRobertaPreTrainedModel"),O$=a("forward method, overrides the "),eh=n("code"),N$=a("__call__"),I$=a(" special method."),D$=l(),T(us.$$.fragment),S$=l(),th=n("p"),W$=a("Example:"),B$=l(),T(xi.$$.fragment),Lu=l(),hn=n("h2"),fs=n("a"),oh=n("span"),T(qi.$$.fragment),U$=l(),nh=n("span"),H$=a("FlaxRobertaForTokenClassification"),Au=l(),Ie=n("div"),T(Ci.$$.fragment),Q$=l(),sh=n("p"),V$=a(`Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),J$=l(),Pi=n("p"),K$=a("This model inherits from "),Ql=n("a"),G$=a("FlaxPreTrainedModel"),X$=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Y$=l(),ji=n("p"),Z$=a("This model is also a Flax Linen "),Li=n("a"),eF=a("flax.linen.Module"),tF=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),oF=l(),rh=n("p"),nF=a("Finally, this model supports inherent JAX features such as:"),sF=l(),Wt=n("ul"),ah=n("li"),Ai=n("a"),rF=a("Just-In-Time (JIT) compilation"),aF=l(),ih=n("li"),Oi=n("a"),iF=a("Automatic Differentiation"),lF=l(),lh=n("li"),Ni=n("a"),dF=a("Vectorization"),cF=l(),dh=n("li"),Ii=n("a"),pF=a("Parallelization"),hF=l(),yt=n("div"),T(Di.$$.fragment),uF=l(),un=n("p"),fF=a("The "),ch=n("code"),mF=a("FlaxRobertaPreTrainedModel"),gF=a("forward method, overrides the "),ph=n("code"),_F=a("__call__"),bF=a(" special method."),kF=l(),T(ms.$$.fragment),TF=l(),hh=n("p"),yF=a("Example:"),vF=l(),T(Si.$$.fragment),Ou=l(),fn=n("h2"),gs=n("a"),uh=n("span"),T(Wi.$$.fragment),wF=l(),fh=n("span"),$F=a("FlaxRobertaForQuestionAnswering"),Nu=l(),De=n("div"),T(Bi.$$.fragment),FF=l(),mn=n("p"),RF=a(`Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),mh=n("code"),MF=a("span start logits"),EF=a(" and "),gh=n("code"),zF=a("span end logits"),xF=a(")."),qF=l(),Ui=n("p"),CF=a("This model inherits from "),Vl=n("a"),PF=a("FlaxPreTrainedModel"),jF=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),LF=l(),Hi=n("p"),AF=a("This model is also a Flax Linen "),Qi=n("a"),OF=a("flax.linen.Module"),NF=a(` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),IF=l(),_h=n("p"),DF=a("Finally, this model supports inherent JAX features such as:"),SF=l(),Bt=n("ul"),bh=n("li"),Vi=n("a"),WF=a("Just-In-Time (JIT) compilation"),BF=l(),kh=n("li"),Ji=n("a"),UF=a("Automatic Differentiation"),HF=l(),Th=n("li"),Ki=n("a"),QF=a("Vectorization"),VF=l(),yh=n("li"),Gi=n("a"),JF=a("Parallelization"),KF=l(),vt=n("div"),T(Xi.$$.fragment),GF=l(),gn=n("p"),XF=a("The "),vh=n("code"),YF=a("FlaxRobertaPreTrainedModel"),ZF=a("forward method, overrides the "),wh=n("code"),e2=a("__call__"),t2=a(" special method."),o2=l(),T(_s.$$.fragment),n2=l(),$h=n("p"),s2=a("Example:"),r2=l(),T(Yi.$$.fragment),this.h()},l(o){const g=gE('[data-svelte="svelte-1phssyn"]',document.head);p=s(g,"META",{name:!0,content:!0}),g.forEach(t),R=d(o),m=s(o,"H1",{class:!0});var Zi=r(m);b=s(Zi,"A",{id:!0,class:!0,href:!0});var Fh=r(b);k=s(Fh,"SPAN",{});var Rh=r(k);y(_.$$.fragment,Rh),Rh.forEach(t),Fh.forEach(t),f=d(Zi),M=s(Zi,"SPAN",{});var Mh=r(M);Y=i(Mh,"RoBERTa"),Mh.forEach(t),Zi.forEach(t),I=d(o),z=s(o,"H2",{class:!0});var el=r(z);X=s(el,"A",{id:!0,class:!0,href:!0});var Eh=r(X);D=s(Eh,"SPAN",{});var zh=r(D);y(te.$$.fragment,zh),zh.forEach(t),Eh.forEach(t),me=d(el),S=s(el,"SPAN",{});var xh=r(S);ge=i(xh,"Overview"),xh.forEach(t),el.forEach(t),pe=d(o),K=s(o,"P",{});var tl=r(K);A=i(tl,"The RoBERTa model was proposed in "),oe=s(tl,"A",{href:!0,rel:!0});var qh=r(oe);Z=i(qh,"RoBERTa: A Robustly Optimized BERT Pretraining Approach"),qh.forEach(t),x=i(tl,` by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. It is based on Google\u2019s BERT model released in 2018.`),tl.forEach(t),P=d(o),se=s(o,"P",{});var Ch=r(se);U=i(Ch,`It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining objective and training with much larger mini-batches and learning rates.`),Ch.forEach(t),he=d(o),re=s(o,"P",{});var Ph=r(re);H=i(Ph,"The abstract from the paper is the following:"),Ph.forEach(t),ue=d(o),ae=s(o,"P",{});var jh=r(ae);C=s(jh,"EM",{});var Lh=r(C);_e=i(Lh,`Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.`),Lh.forEach(t),jh.forEach(t),W=d(o),ie=s(o,"P",{});var Ah=r(ie);be=i(Ah,"Tips:"),Ah.forEach(t),B=d(o),G=s(o,"UL",{});var Ut=r(G);ne=s(Ut,"LI",{});var ol=r(ne);O=i(ol,"This implementation is the same as "),le=s(ol,"A",{href:!0});var Oh=r(le);Q=i(Oh,"BertModel"),Oh.forEach(t),ke=i(ol,` with a tiny embeddings tweak as well as a setup for Roberta pretrained models.`),ol.forEach(t),u=d(Ut),E=s(Ut,"LI",{});var Nh=r(E);ee=i(Nh,`RoBERTa has the same architecture as BERT, but uses a byte-level BPE as a tokenizer (same as GPT-2) and uses a different pretraining scheme.`),Nh.forEach(t),Me=d(Ut),ce=s(Ut,"LI",{});var Ht=r(ce);N=i(Ht,"RoBERTa doesn\u2019t have "),$e=s(Ht,"CODE",{});var Ih=r($e);Ee=i(Ih,"token_type_ids"),Ih.forEach(t),ze=i(Ht,`, you don\u2019t need to indicate which token belongs to which segment. Just separate your segments with the separation token `),L=s(Ht,"CODE",{});var Dh=r(L);V=i(Dh,"tokenizer.sep_token"),Dh.forEach(t),xe=i(Ht," (or "),Fe=s(Ht,"CODE",{});var Sh=r(Fe);J=i(Sh,"</s>"),Sh.forEach(t),qe=i(Ht,")"),Ht.forEach(t),Ce=d(Ut),de=s(Ut,"LI",{});var Jl=r(de);ve=s(Jl,"A",{href:!0});var Wh=r(ve);dm=i(Wh,"CamemBERT"),Wh.forEach(t),cm=i(Jl," is a wrapper around RoBERTa. Refer to this page for usage examples."),Jl.forEach(t),Ut.forEach(t),Qh=d(o),Qt=s(o,"P",{});var _n=r(Qt);pm=i(_n,"This model was contributed by "),$s=s(_n,"A",{href:!0,rel:!0});var Bh=r($s);hm=i(Bh,"julien-c"),Bh.forEach(t),um=i(_n,". The original code can be found "),Fs=s(_n,"A",{href:!0,rel:!0});var Uh=r(Fs);fm=i(Uh,"here"),Uh.forEach(t),mm=i(_n,"."),_n.forEach(t),Vh=d(o),To=s(o,"H2",{class:!0});var Du=r(To);bn=s(Du,"A",{id:!0,class:!0,href:!0});var l2=r(bn);vd=s(l2,"SPAN",{});var d2=r(vd);y(Rs.$$.fragment,d2),d2.forEach(t),l2.forEach(t),gm=d(Du),wd=s(Du,"SPAN",{});var c2=r(wd);_m=i(c2,"RobertaConfig"),c2.forEach(t),Du.forEach(t),Jh=d(o),We=s(o,"DIV",{class:!0});var qt=r(We);y(Ms.$$.fragment,qt),bm=d(qt),yo=s(qt,"P",{});var Kl=r(yo);km=i(Kl,"This is the configuration class to store the configuration of a "),nl=s(Kl,"A",{href:!0});var p2=r(nl);Tm=i(p2,"RobertaModel"),p2.forEach(t),ym=i(Kl," or a "),sl=s(Kl,"A",{href:!0});var h2=r(sl);vm=i(h2,"TFRobertaModel"),h2.forEach(t),wm=i(Kl,`. It is used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture.`),Kl.forEach(t),$m=d(qt),vo=s(qt,"P",{});var Gl=r(vo);Fm=i(Gl,"Configuration objects inherit from "),rl=s(Gl,"A",{href:!0});var u2=r(rl);Rm=i(u2,"PretrainedConfig"),u2.forEach(t),Mm=i(Gl,` and can be used to control the model outputs. Read the documentation from `),al=s(Gl,"A",{href:!0});var f2=r(al);Em=i(f2,"PretrainedConfig"),f2.forEach(t),zm=i(Gl," for more information."),Gl.forEach(t),xm=d(qt),wo=s(qt,"P",{});var Xl=r(wo);qm=i(Xl,"The "),il=s(Xl,"A",{href:!0});var m2=r(il);Cm=i(m2,"RobertaConfig"),m2.forEach(t),Pm=i(Xl," class directly inherits "),ll=s(Xl,"A",{href:!0});var g2=r(ll);jm=i(g2,"BertConfig"),g2.forEach(t),Lm=i(Xl,`. It reuses the same defaults. Please check the parent class for more information.`),Xl.forEach(t),Am=d(qt),$d=s(qt,"P",{});var _2=r($d);Om=i(_2,"Examples:"),_2.forEach(t),Nm=d(qt),y(Es.$$.fragment,qt),qt.forEach(t),Kh=d(o),$o=s(o,"H2",{class:!0});var Su=r($o);kn=s(Su,"A",{id:!0,class:!0,href:!0});var b2=r(kn);Fd=s(b2,"SPAN",{});var k2=r(Fd);y(zs.$$.fragment,k2),k2.forEach(t),b2.forEach(t),Im=d(Su),Rd=s(Su,"SPAN",{});var T2=r(Rd);Dm=i(T2,"RobertaTokenizer"),T2.forEach(t),Su.forEach(t),Gh=d(o),Te=s(o,"DIV",{class:!0});var Re=r(Te);y(xs.$$.fragment,Re),Sm=d(Re),Md=s(Re,"P",{});var y2=r(Md);Wm=i(y2,"Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding."),y2.forEach(t),Bm=d(Re),Ed=s(Re,"P",{});var v2=r(Ed);Um=i(v2,`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),v2.forEach(t),Hm=d(Re),y(qs.$$.fragment,Re),Qm=d(Re),Cs=s(Re,"P",{});var Wu=r(Cs);Vm=i(Wu,"You can get around that behavior by passing "),zd=s(Wu,"CODE",{});var w2=r(zd);Jm=i(w2,"add_prefix_space=True"),w2.forEach(t),Km=i(Wu,` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),Wu.forEach(t),Gm=d(Re),y(Tn.$$.fragment,Re),Xm=d(Re),Ps=s(Re,"P",{});var Bu=r(Ps);Ym=i(Bu,"This tokenizer inherits from "),dl=s(Bu,"A",{href:!0});var $2=r(dl);Zm=i($2,"PreTrainedTokenizer"),$2.forEach(t),eg=i(Bu,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Bu.forEach(t),tg=d(Re),Vt=s(Re,"DIV",{class:!0});var Yl=r(Vt);y(js.$$.fragment,Yl),og=d(Yl),xd=s(Yl,"P",{});var F2=r(xd);ng=i(F2,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format:`),F2.forEach(t),sg=d(Yl),Ls=s(Yl,"UL",{});var Uu=r(Ls);cl=s(Uu,"LI",{});var a2=r(cl);rg=i(a2,"single sequence: "),qd=s(a2,"CODE",{});var R2=r(qd);ag=i(R2,"<s> X </s>"),R2.forEach(t),a2.forEach(t),ig=d(Uu),pl=s(Uu,"LI",{});var i2=r(pl);lg=i(i2,"pair of sequences: "),Cd=s(i2,"CODE",{});var M2=r(Cd);dg=i(M2,"<s> A </s></s> B </s>"),M2.forEach(t),i2.forEach(t),Uu.forEach(t),Yl.forEach(t),cg=d(Re),yn=s(Re,"DIV",{class:!0});var Hu=r(yn);y(As.$$.fragment,Hu),pg=d(Hu),Os=s(Hu,"P",{});var Qu=r(Os);hg=i(Qu,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Pd=s(Qu,"CODE",{});var E2=r(Pd);ug=i(E2,"prepare_for_model"),E2.forEach(t),fg=i(Qu," method."),Qu.forEach(t),Hu.forEach(t),mg=d(Re),vn=s(Re,"DIV",{class:!0});var Vu=r(vn);y(Ns.$$.fragment,Vu),gg=d(Vu),jd=s(Vu,"P",{});var z2=r(jd);_g=i(z2,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned.`),z2.forEach(t),Vu.forEach(t),bg=d(Re),hl=s(Re,"DIV",{class:!0});var x2=r(hl);y(Is.$$.fragment,x2),x2.forEach(t),Re.forEach(t),Xh=d(o),Fo=s(o,"H2",{class:!0});var Ju=r(Fo);wn=s(Ju,"A",{id:!0,class:!0,href:!0});var q2=r(wn);Ld=s(q2,"SPAN",{});var C2=r(Ld);y(Ds.$$.fragment,C2),C2.forEach(t),q2.forEach(t),kg=d(Ju),Ad=s(Ju,"SPAN",{});var P2=r(Ad);Tg=i(P2,"RobertaTokenizerFast"),P2.forEach(t),Ju.forEach(t),Yh=d(o),Pe=s(o,"DIV",{class:!0});var Ge=r(Pe);y(Ss.$$.fragment,Ge),yg=d(Ge),Ws=s(Ge,"P",{});var Ku=r(Ws);vg=i(Ku,"Construct a \u201Cfast\u201D RoBERTa tokenizer (backed by HuggingFace\u2019s "),Od=s(Ku,"EM",{});var j2=r(Od);wg=i(j2,"tokenizers"),j2.forEach(t),$g=i(Ku,` library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.`),Ku.forEach(t),Fg=d(Ge),Nd=s(Ge,"P",{});var L2=r(Nd);Rg=i(L2,`This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not:`),L2.forEach(t),Mg=d(Ge),y(Bs.$$.fragment,Ge),Eg=d(Ge),Us=s(Ge,"P",{});var Gu=r(Us);zg=i(Gu,"You can get around that behavior by passing "),Id=s(Gu,"CODE",{});var A2=r(Id);xg=i(A2,"add_prefix_space=True"),A2.forEach(t),qg=i(Gu,` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.`),Gu.forEach(t),Cg=d(Ge),y($n.$$.fragment,Ge),Pg=d(Ge),Hs=s(Ge,"P",{});var Xu=r(Hs);jg=i(Xu,"This tokenizer inherits from "),ul=s(Xu,"A",{href:!0});var O2=r(ul);Lg=i(O2,"PreTrainedTokenizerFast"),O2.forEach(t),Ag=i(Xu,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Xu.forEach(t),Og=d(Ge),fl=s(Ge,"DIV",{class:!0});var N2=r(fl);y(Qs.$$.fragment,N2),N2.forEach(t),Ge.forEach(t),Zh=d(o),Ro=s(o,"H2",{class:!0});var Yu=r(Ro);Fn=s(Yu,"A",{id:!0,class:!0,href:!0});var I2=r(Fn);Dd=s(I2,"SPAN",{});var D2=r(Dd);y(Vs.$$.fragment,D2),D2.forEach(t),I2.forEach(t),Ng=d(Yu),Sd=s(Yu,"SPAN",{});var S2=r(Sd);Ig=i(S2,"RobertaModel"),S2.forEach(t),Yu.forEach(t),eu=d(o),je=s(o,"DIV",{class:!0});var Xe=r(je);y(Js.$$.fragment,Xe),Dg=d(Xe),Wd=s(Xe,"P",{});var W2=r(Wd);Sg=i(W2,"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),W2.forEach(t),Wg=d(Xe),Ks=s(Xe,"P",{});var Zu=r(Ks);Bg=i(Zu,"This model inherits from "),ml=s(Zu,"A",{href:!0});var B2=r(ml);Ug=i(B2,"PreTrainedModel"),B2.forEach(t),Hg=i(Zu,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zu.forEach(t),Qg=d(Xe),Gs=s(Xe,"P",{});var ef=r(Gs);Vg=i(ef,"This model is also a PyTorch "),Xs=s(ef,"A",{href:!0,rel:!0});var U2=r(Xs);Jg=i(U2,"torch.nn.Module"),U2.forEach(t),Kg=i(ef,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ef.forEach(t),Gg=d(Xe),Ys=s(Xe,"P",{});var tf=r(Ys);Xg=i(tf,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Bd=s(tf,"EM",{});var H2=r(Bd);Yg=i(H2,`Attention is all you need`),H2.forEach(t),Zg=i(tf,`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),tf.forEach(t),e_=d(Xe),Be=s(Xe,"P",{});var wt=r(Be);t_=i(wt,"To behave as an decoder the model needs to be initialized with the "),Ud=s(wt,"CODE",{});var Q2=r(Ud);o_=i(Q2,"is_decoder"),Q2.forEach(t),n_=i(wt,` argument of the configuration set to `),Hd=s(wt,"CODE",{});var V2=r(Hd);s_=i(V2,"True"),V2.forEach(t),r_=i(wt,". To be used in a Seq2Seq model, the model needs to initialized with both "),Qd=s(wt,"CODE",{});var J2=r(Qd);a_=i(J2,"is_decoder"),J2.forEach(t),i_=i(wt,` argument and `),Vd=s(wt,"CODE",{});var K2=r(Vd);l_=i(K2,"add_cross_attention"),K2.forEach(t),d_=i(wt," set to "),Jd=s(wt,"CODE",{});var G2=r(Jd);c_=i(G2,"True"),G2.forEach(t),p_=i(wt,"; an "),Kd=s(wt,"CODE",{});var X2=r(Kd);h_=i(X2,"encoder_hidden_states"),X2.forEach(t),u_=i(wt," is then expected as an input to the forward pass."),wt.forEach(t),f_=d(Xe),Rn=s(Xe,"P",{});var Hh=r(Rn);m_=i(Hh,".. _"),Gd=s(Hh,"EM",{});var Y2=r(Gd);g_=i(Y2,"Attention is all you need"),Y2.forEach(t),__=i(Hh,": "),Zs=s(Hh,"A",{href:!0,rel:!0});var Z2=r(Zs);b_=i(Z2,"https://arxiv.org/abs/1706.03762"),Z2.forEach(t),Hh.forEach(t),k_=d(Xe),st=s(Xe,"DIV",{class:!0});var Jt=r(st);y(er.$$.fragment,Jt),T_=d(Jt),Mo=s(Jt,"P",{});var Zl=r(Mo);y_=i(Zl,"The "),gl=s(Zl,"A",{href:!0});var e0=r(gl);v_=i(e0,"RobertaModel"),e0.forEach(t),w_=i(Zl," forward method, overrides the "),Xd=s(Zl,"CODE",{});var t0=r(Xd);$_=i(t0,"__call__"),t0.forEach(t),F_=i(Zl," special method."),Zl.forEach(t),R_=d(Jt),y(Mn.$$.fragment,Jt),M_=d(Jt),Yd=s(Jt,"P",{});var o0=r(Yd);E_=i(o0,"Example:"),o0.forEach(t),z_=d(Jt),y(tr.$$.fragment,Jt),Jt.forEach(t),Xe.forEach(t),tu=d(o),Eo=s(o,"H2",{class:!0});var of=r(Eo);En=s(of,"A",{id:!0,class:!0,href:!0});var n0=r(En);Zd=s(n0,"SPAN",{});var s0=r(Zd);y(or.$$.fragment,s0),s0.forEach(t),n0.forEach(t),x_=d(of),ec=s(of,"SPAN",{});var r0=r(ec);q_=i(r0,"RobertaForCausalLM"),r0.forEach(t),of.forEach(t),ou=d(o),Ye=s(o,"DIV",{class:!0});var Kt=r(Ye);y(nr.$$.fragment,Kt),C_=d(Kt),sr=s(Kt,"P",{});var nf=r(sr);P_=i(nf,"RoBERTa Model with a "),tc=s(nf,"CODE",{});var a0=r(tc);j_=i(a0,"language modeling"),a0.forEach(t),L_=i(nf," head on top for CLM fine-tuning."),nf.forEach(t),A_=d(Kt),rr=s(Kt,"P",{});var sf=r(rr);O_=i(sf,"This model inherits from "),_l=s(sf,"A",{href:!0});var i0=r(_l);N_=i(i0,"PreTrainedModel"),i0.forEach(t),I_=i(sf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),sf.forEach(t),D_=d(Kt),ar=s(Kt,"P",{});var rf=r(ar);S_=i(rf,"This model is also a PyTorch "),ir=s(rf,"A",{href:!0,rel:!0});var l0=r(ir);W_=i(l0,"torch.nn.Module"),l0.forEach(t),B_=i(rf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),rf.forEach(t),U_=d(Kt),rt=s(Kt,"DIV",{class:!0});var Gt=r(rt);y(lr.$$.fragment,Gt),H_=d(Gt),zo=s(Gt,"P",{});var ed=r(zo);Q_=i(ed,"The "),bl=s(ed,"A",{href:!0});var d0=r(bl);V_=i(d0,"RobertaForCausalLM"),d0.forEach(t),J_=i(ed," forward method, overrides the "),oc=s(ed,"CODE",{});var c0=r(oc);K_=i(c0,"__call__"),c0.forEach(t),G_=i(ed," special method."),ed.forEach(t),X_=d(Gt),y(zn.$$.fragment,Gt),Y_=d(Gt),nc=s(Gt,"P",{});var p0=r(nc);Z_=i(p0,"Example:"),p0.forEach(t),eb=d(Gt),y(dr.$$.fragment,Gt),Gt.forEach(t),Kt.forEach(t),nu=d(o),xo=s(o,"H2",{class:!0});var af=r(xo);xn=s(af,"A",{id:!0,class:!0,href:!0});var h0=r(xn);sc=s(h0,"SPAN",{});var u0=r(sc);y(cr.$$.fragment,u0),u0.forEach(t),h0.forEach(t),tb=d(af),rc=s(af,"SPAN",{});var f0=r(rc);ob=i(f0,"RobertaForMaskedLM"),f0.forEach(t),af.forEach(t),su=d(o),Ze=s(o,"DIV",{class:!0});var Xt=r(Ze);y(pr.$$.fragment,Xt),nb=d(Xt),hr=s(Xt,"P",{});var lf=r(hr);sb=i(lf,"RoBERTa Model with a "),ac=s(lf,"CODE",{});var m0=r(ac);rb=i(m0,"language modeling"),m0.forEach(t),ab=i(lf," head on top."),lf.forEach(t),ib=d(Xt),ur=s(Xt,"P",{});var df=r(ur);lb=i(df,"This model inherits from "),kl=s(df,"A",{href:!0});var g0=r(kl);db=i(g0,"PreTrainedModel"),g0.forEach(t),cb=i(df,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),df.forEach(t),pb=d(Xt),fr=s(Xt,"P",{});var cf=r(fr);hb=i(cf,"This model is also a PyTorch "),mr=s(cf,"A",{href:!0,rel:!0});var _0=r(mr);ub=i(_0,"torch.nn.Module"),_0.forEach(t),fb=i(cf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cf.forEach(t),mb=d(Xt),at=s(Xt,"DIV",{class:!0});var Yt=r(at);y(gr.$$.fragment,Yt),gb=d(Yt),qo=s(Yt,"P",{});var td=r(qo);_b=i(td,"The "),Tl=s(td,"A",{href:!0});var b0=r(Tl);bb=i(b0,"RobertaForMaskedLM"),b0.forEach(t),kb=i(td," forward method, overrides the "),ic=s(td,"CODE",{});var k0=r(ic);Tb=i(k0,"__call__"),k0.forEach(t),yb=i(td," special method."),td.forEach(t),vb=d(Yt),y(qn.$$.fragment,Yt),wb=d(Yt),lc=s(Yt,"P",{});var T0=r(lc);$b=i(T0,"Example:"),T0.forEach(t),Fb=d(Yt),y(_r.$$.fragment,Yt),Yt.forEach(t),Xt.forEach(t),ru=d(o),Co=s(o,"H2",{class:!0});var pf=r(Co);Cn=s(pf,"A",{id:!0,class:!0,href:!0});var y0=r(Cn);dc=s(y0,"SPAN",{});var v0=r(dc);y(br.$$.fragment,v0),v0.forEach(t),y0.forEach(t),Rb=d(pf),cc=s(pf,"SPAN",{});var w0=r(cc);Mb=i(w0,"RobertaForSequenceClassification"),w0.forEach(t),pf.forEach(t),au=d(o),et=s(o,"DIV",{class:!0});var Zt=r(et);y(kr.$$.fragment,Zt),Eb=d(Zt),pc=s(Zt,"P",{});var $0=r(pc);zb=i($0,`RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),$0.forEach(t),xb=d(Zt),Tr=s(Zt,"P",{});var hf=r(Tr);qb=i(hf,"This model inherits from "),yl=s(hf,"A",{href:!0});var F0=r(yl);Cb=i(F0,"PreTrainedModel"),F0.forEach(t),Pb=i(hf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hf.forEach(t),jb=d(Zt),yr=s(Zt,"P",{});var uf=r(yr);Lb=i(uf,"This model is also a PyTorch "),vr=s(uf,"A",{href:!0,rel:!0});var R0=r(vr);Ab=i(R0,"torch.nn.Module"),R0.forEach(t),Ob=i(uf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),uf.forEach(t),Nb=d(Zt),Se=s(Zt,"DIV",{class:!0});var $t=r(Se);y(wr.$$.fragment,$t),Ib=d($t),Po=s($t,"P",{});var od=r(Po);Db=i(od,"The "),vl=s(od,"A",{href:!0});var M0=r(vl);Sb=i(M0,"RobertaForSequenceClassification"),M0.forEach(t),Wb=i(od," forward method, overrides the "),hc=s(od,"CODE",{});var E0=r(hc);Bb=i(E0,"__call__"),E0.forEach(t),Ub=i(od," special method."),od.forEach(t),Hb=d($t),y(Pn.$$.fragment,$t),Qb=d($t),uc=s($t,"P",{});var z0=r(uc);Vb=i(z0,"Example of single-label classification:"),z0.forEach(t),Jb=d($t),y($r.$$.fragment,$t),Kb=d($t),fc=s($t,"P",{});var x0=r(fc);Gb=i(x0,"Example of multi-label classification:"),x0.forEach(t),Xb=d($t),y(Fr.$$.fragment,$t),$t.forEach(t),Zt.forEach(t),iu=d(o),jo=s(o,"H2",{class:!0});var ff=r(jo);jn=s(ff,"A",{id:!0,class:!0,href:!0});var q0=r(jn);mc=s(q0,"SPAN",{});var C0=r(mc);y(Rr.$$.fragment,C0),C0.forEach(t),q0.forEach(t),Yb=d(ff),gc=s(ff,"SPAN",{});var P0=r(gc);Zb=i(P0,"RobertaForMultipleChoice"),P0.forEach(t),ff.forEach(t),lu=d(o),tt=s(o,"DIV",{class:!0});var eo=r(tt);y(Mr.$$.fragment,eo),ek=d(eo),_c=s(eo,"P",{});var j0=r(_c);tk=i(j0,`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),j0.forEach(t),ok=d(eo),Er=s(eo,"P",{});var mf=r(Er);nk=i(mf,"This model inherits from "),wl=s(mf,"A",{href:!0});var L0=r(wl);sk=i(L0,"PreTrainedModel"),L0.forEach(t),rk=i(mf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),mf.forEach(t),ak=d(eo),zr=s(eo,"P",{});var gf=r(zr);ik=i(gf,"This model is also a PyTorch "),xr=s(gf,"A",{href:!0,rel:!0});var A0=r(xr);lk=i(A0,"torch.nn.Module"),A0.forEach(t),dk=i(gf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gf.forEach(t),ck=d(eo),it=s(eo,"DIV",{class:!0});var to=r(it);y(qr.$$.fragment,to),pk=d(to),Lo=s(to,"P",{});var nd=r(Lo);hk=i(nd,"The "),$l=s(nd,"A",{href:!0});var O0=r($l);uk=i(O0,"RobertaForMultipleChoice"),O0.forEach(t),fk=i(nd," forward method, overrides the "),bc=s(nd,"CODE",{});var N0=r(bc);mk=i(N0,"__call__"),N0.forEach(t),gk=i(nd," special method."),nd.forEach(t),_k=d(to),y(Ln.$$.fragment,to),bk=d(to),kc=s(to,"P",{});var I0=r(kc);kk=i(I0,"Example:"),I0.forEach(t),Tk=d(to),y(Cr.$$.fragment,to),to.forEach(t),eo.forEach(t),du=d(o),Ao=s(o,"H2",{class:!0});var _f=r(Ao);An=s(_f,"A",{id:!0,class:!0,href:!0});var D0=r(An);Tc=s(D0,"SPAN",{});var S0=r(Tc);y(Pr.$$.fragment,S0),S0.forEach(t),D0.forEach(t),yk=d(_f),yc=s(_f,"SPAN",{});var W0=r(yc);vk=i(W0,"RobertaForTokenClassification"),W0.forEach(t),_f.forEach(t),cu=d(o),ot=s(o,"DIV",{class:!0});var oo=r(ot);y(jr.$$.fragment,oo),wk=d(oo),vc=s(oo,"P",{});var B0=r(vc);$k=i(B0,`Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),B0.forEach(t),Fk=d(oo),Lr=s(oo,"P",{});var bf=r(Lr);Rk=i(bf,"This model inherits from "),Fl=s(bf,"A",{href:!0});var U0=r(Fl);Mk=i(U0,"PreTrainedModel"),U0.forEach(t),Ek=i(bf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bf.forEach(t),zk=d(oo),Ar=s(oo,"P",{});var kf=r(Ar);xk=i(kf,"This model is also a PyTorch "),Or=s(kf,"A",{href:!0,rel:!0});var H0=r(Or);qk=i(H0,"torch.nn.Module"),H0.forEach(t),Ck=i(kf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kf.forEach(t),Pk=d(oo),lt=s(oo,"DIV",{class:!0});var no=r(lt);y(Nr.$$.fragment,no),jk=d(no),Oo=s(no,"P",{});var sd=r(Oo);Lk=i(sd,"The "),Rl=s(sd,"A",{href:!0});var Q0=r(Rl);Ak=i(Q0,"RobertaForTokenClassification"),Q0.forEach(t),Ok=i(sd," forward method, overrides the "),wc=s(sd,"CODE",{});var V0=r(wc);Nk=i(V0,"__call__"),V0.forEach(t),Ik=i(sd," special method."),sd.forEach(t),Dk=d(no),y(On.$$.fragment,no),Sk=d(no),$c=s(no,"P",{});var J0=r($c);Wk=i(J0,"Example:"),J0.forEach(t),Bk=d(no),y(Ir.$$.fragment,no),no.forEach(t),oo.forEach(t),pu=d(o),No=s(o,"H2",{class:!0});var Tf=r(No);Nn=s(Tf,"A",{id:!0,class:!0,href:!0});var K0=r(Nn);Fc=s(K0,"SPAN",{});var G0=r(Fc);y(Dr.$$.fragment,G0),G0.forEach(t),K0.forEach(t),Uk=d(Tf),Rc=s(Tf,"SPAN",{});var X0=r(Rc);Hk=i(X0,"RobertaForQuestionAnswering"),X0.forEach(t),Tf.forEach(t),hu=d(o),nt=s(o,"DIV",{class:!0});var so=r(nt);y(Sr.$$.fragment,so),Qk=d(so),Io=s(so,"P",{});var rd=r(Io);Vk=i(rd,`Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Mc=s(rd,"CODE",{});var Y0=r(Mc);Jk=i(Y0,"span start logits"),Y0.forEach(t),Kk=i(rd," and "),Ec=s(rd,"CODE",{});var Z0=r(Ec);Gk=i(Z0,"span end logits"),Z0.forEach(t),Xk=i(rd,")."),rd.forEach(t),Yk=d(so),Wr=s(so,"P",{});var yf=r(Wr);Zk=i(yf,"This model inherits from "),Ml=s(yf,"A",{href:!0});var e4=r(Ml);e1=i(e4,"PreTrainedModel"),e4.forEach(t),t1=i(yf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yf.forEach(t),o1=d(so),Br=s(so,"P",{});var vf=r(Br);n1=i(vf,"This model is also a PyTorch "),Ur=s(vf,"A",{href:!0,rel:!0});var t4=r(Ur);s1=i(t4,"torch.nn.Module"),t4.forEach(t),r1=i(vf,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),vf.forEach(t),a1=d(so),dt=s(so,"DIV",{class:!0});var ro=r(dt);y(Hr.$$.fragment,ro),i1=d(ro),Do=s(ro,"P",{});var ad=r(Do);l1=i(ad,"The "),El=s(ad,"A",{href:!0});var o4=r(El);d1=i(o4,"RobertaForQuestionAnswering"),o4.forEach(t),c1=i(ad," forward method, overrides the "),zc=s(ad,"CODE",{});var n4=r(zc);p1=i(n4,"__call__"),n4.forEach(t),h1=i(ad," special method."),ad.forEach(t),u1=d(ro),y(In.$$.fragment,ro),f1=d(ro),xc=s(ro,"P",{});var s4=r(xc);m1=i(s4,"Example:"),s4.forEach(t),g1=d(ro),y(Qr.$$.fragment,ro),ro.forEach(t),so.forEach(t),uu=d(o),So=s(o,"H2",{class:!0});var wf=r(So);Dn=s(wf,"A",{id:!0,class:!0,href:!0});var r4=r(Dn);qc=s(r4,"SPAN",{});var a4=r(qc);y(Vr.$$.fragment,a4),a4.forEach(t),r4.forEach(t),_1=d(wf),Cc=s(wf,"SPAN",{});var i4=r(Cc);b1=i(i4,"TFRobertaModel"),i4.forEach(t),wf.forEach(t),fu=d(o),Ue=s(o,"DIV",{class:!0});var Ct=r(Ue);y(Jr.$$.fragment,Ct),k1=d(Ct),Pc=s(Ct,"P",{});var l4=r(Pc);T1=i(l4,"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),l4.forEach(t),y1=d(Ct),Kr=s(Ct,"P",{});var $f=r(Kr);v1=i($f,"This model inherits from "),zl=s($f,"A",{href:!0});var d4=r(zl);w1=i(d4,"TFPreTrainedModel"),d4.forEach(t),$1=i($f,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$f.forEach(t),F1=d(Ct),Gr=s(Ct,"P",{});var Ff=r(Gr);R1=i(Ff,"This model is also a "),Xr=s(Ff,"A",{href:!0,rel:!0});var c4=r(Xr);M1=i(c4,"tf.keras.Model"),c4.forEach(t),E1=i(Ff,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ff.forEach(t),z1=d(Ct),y(Sn.$$.fragment,Ct),x1=d(Ct),ct=s(Ct,"DIV",{class:!0});var ao=r(ct);y(Yr.$$.fragment,ao),q1=d(ao),Wo=s(ao,"P",{});var id=r(Wo);C1=i(id,"The "),xl=s(id,"A",{href:!0});var p4=r(xl);P1=i(p4,"TFRobertaModel"),p4.forEach(t),j1=i(id," forward method, overrides the "),jc=s(id,"CODE",{});var h4=r(jc);L1=i(h4,"__call__"),h4.forEach(t),A1=i(id," special method."),id.forEach(t),O1=d(ao),y(Wn.$$.fragment,ao),N1=d(ao),Lc=s(ao,"P",{});var u4=r(Lc);I1=i(u4,"Example:"),u4.forEach(t),D1=d(ao),y(Zr.$$.fragment,ao),ao.forEach(t),Ct.forEach(t),mu=d(o),Bo=s(o,"H2",{class:!0});var Rf=r(Bo);Bn=s(Rf,"A",{id:!0,class:!0,href:!0});var f4=r(Bn);Ac=s(f4,"SPAN",{});var m4=r(Ac);y(ea.$$.fragment,m4),m4.forEach(t),f4.forEach(t),S1=d(Rf),Oc=s(Rf,"SPAN",{});var g4=r(Oc);W1=i(g4,"TFRobertaForCausalLM"),g4.forEach(t),Rf.forEach(t),gu=d(o),Uo=s(o,"DIV",{class:!0});var Mf=r(Uo);y(ta.$$.fragment,Mf),B1=d(Mf),pt=s(Mf,"DIV",{class:!0});var io=r(pt);y(oa.$$.fragment,io),U1=d(io),Ho=s(io,"P",{});var ld=r(Ho);H1=i(ld,"The "),ql=s(ld,"A",{href:!0});var _4=r(ql);Q1=i(_4,"TFRobertaForCausalLM"),_4.forEach(t),V1=i(ld," forward method, overrides the "),Nc=s(ld,"CODE",{});var b4=r(Nc);J1=i(b4,"__call__"),b4.forEach(t),K1=i(ld," special method."),ld.forEach(t),G1=d(io),y(Un.$$.fragment,io),X1=d(io),Ic=s(io,"P",{});var k4=r(Ic);Y1=i(k4,"Example:"),k4.forEach(t),Z1=d(io),y(na.$$.fragment,io),io.forEach(t),Mf.forEach(t),_u=d(o),Qo=s(o,"H2",{class:!0});var Ef=r(Qo);Hn=s(Ef,"A",{id:!0,class:!0,href:!0});var T4=r(Hn);Dc=s(T4,"SPAN",{});var y4=r(Dc);y(sa.$$.fragment,y4),y4.forEach(t),T4.forEach(t),eT=d(Ef),Sc=s(Ef,"SPAN",{});var v4=r(Sc);tT=i(v4,"TFRobertaForMaskedLM"),v4.forEach(t),Ef.forEach(t),bu=d(o),He=s(o,"DIV",{class:!0});var Pt=r(He);y(ra.$$.fragment,Pt),oT=d(Pt),aa=s(Pt,"P",{});var zf=r(aa);nT=i(zf,"RoBERTa Model with a "),Wc=s(zf,"CODE",{});var w4=r(Wc);sT=i(w4,"language modeling"),w4.forEach(t),rT=i(zf," head on top."),zf.forEach(t),aT=d(Pt),ia=s(Pt,"P",{});var xf=r(ia);iT=i(xf,"This model inherits from "),Cl=s(xf,"A",{href:!0});var $4=r(Cl);lT=i($4,"TFPreTrainedModel"),$4.forEach(t),dT=i(xf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),xf.forEach(t),cT=d(Pt),la=s(Pt,"P",{});var qf=r(la);pT=i(qf,"This model is also a "),da=s(qf,"A",{href:!0,rel:!0});var F4=r(da);hT=i(F4,"tf.keras.Model"),F4.forEach(t),uT=i(qf,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),qf.forEach(t),fT=d(Pt),y(Qn.$$.fragment,Pt),mT=d(Pt),ht=s(Pt,"DIV",{class:!0});var lo=r(ht);y(ca.$$.fragment,lo),gT=d(lo),Vo=s(lo,"P",{});var dd=r(Vo);_T=i(dd,"The "),Pl=s(dd,"A",{href:!0});var R4=r(Pl);bT=i(R4,"TFRobertaForMaskedLM"),R4.forEach(t),kT=i(dd," forward method, overrides the "),Bc=s(dd,"CODE",{});var M4=r(Bc);TT=i(M4,"__call__"),M4.forEach(t),yT=i(dd," special method."),dd.forEach(t),vT=d(lo),y(Vn.$$.fragment,lo),wT=d(lo),Uc=s(lo,"P",{});var E4=r(Uc);$T=i(E4,"Example:"),E4.forEach(t),FT=d(lo),y(pa.$$.fragment,lo),lo.forEach(t),Pt.forEach(t),ku=d(o),Jo=s(o,"H2",{class:!0});var Cf=r(Jo);Jn=s(Cf,"A",{id:!0,class:!0,href:!0});var z4=r(Jn);Hc=s(z4,"SPAN",{});var x4=r(Hc);y(ha.$$.fragment,x4),x4.forEach(t),z4.forEach(t),RT=d(Cf),Qc=s(Cf,"SPAN",{});var q4=r(Qc);MT=i(q4,"TFRobertaForSequenceClassification"),q4.forEach(t),Cf.forEach(t),Tu=d(o),Qe=s(o,"DIV",{class:!0});var jt=r(Qe);y(ua.$$.fragment,jt),ET=d(jt),Vc=s(jt,"P",{});var C4=r(Vc);zT=i(C4,`RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),C4.forEach(t),xT=d(jt),fa=s(jt,"P",{});var Pf=r(fa);qT=i(Pf,"This model inherits from "),jl=s(Pf,"A",{href:!0});var P4=r(jl);CT=i(P4,"TFPreTrainedModel"),P4.forEach(t),PT=i(Pf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pf.forEach(t),jT=d(jt),ma=s(jt,"P",{});var jf=r(ma);LT=i(jf,"This model is also a "),ga=s(jf,"A",{href:!0,rel:!0});var j4=r(ga);AT=i(j4,"tf.keras.Model"),j4.forEach(t),OT=i(jf,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),jf.forEach(t),NT=d(jt),y(Kn.$$.fragment,jt),IT=d(jt),ut=s(jt,"DIV",{class:!0});var co=r(ut);y(_a.$$.fragment,co),DT=d(co),Ko=s(co,"P",{});var cd=r(Ko);ST=i(cd,"The "),Ll=s(cd,"A",{href:!0});var L4=r(Ll);WT=i(L4,"TFRobertaForSequenceClassification"),L4.forEach(t),BT=i(cd," forward method, overrides the "),Jc=s(cd,"CODE",{});var A4=r(Jc);UT=i(A4,"__call__"),A4.forEach(t),HT=i(cd," special method."),cd.forEach(t),QT=d(co),y(Gn.$$.fragment,co),VT=d(co),Kc=s(co,"P",{});var O4=r(Kc);JT=i(O4,"Example:"),O4.forEach(t),KT=d(co),y(ba.$$.fragment,co),co.forEach(t),jt.forEach(t),yu=d(o),Go=s(o,"H2",{class:!0});var Lf=r(Go);Xn=s(Lf,"A",{id:!0,class:!0,href:!0});var N4=r(Xn);Gc=s(N4,"SPAN",{});var I4=r(Gc);y(ka.$$.fragment,I4),I4.forEach(t),N4.forEach(t),GT=d(Lf),Xc=s(Lf,"SPAN",{});var D4=r(Xc);XT=i(D4,"TFRobertaForMultipleChoice"),D4.forEach(t),Lf.forEach(t),vu=d(o),Ve=s(o,"DIV",{class:!0});var Lt=r(Ve);y(Ta.$$.fragment,Lt),YT=d(Lt),Yc=s(Lt,"P",{});var S4=r(Yc);ZT=i(S4,`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),S4.forEach(t),ey=d(Lt),ya=s(Lt,"P",{});var Af=r(ya);ty=i(Af,"This model inherits from "),Al=s(Af,"A",{href:!0});var W4=r(Al);oy=i(W4,"TFPreTrainedModel"),W4.forEach(t),ny=i(Af,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Af.forEach(t),sy=d(Lt),va=s(Lt,"P",{});var Of=r(va);ry=i(Of,"This model is also a "),wa=s(Of,"A",{href:!0,rel:!0});var B4=r(wa);ay=i(B4,"tf.keras.Model"),B4.forEach(t),iy=i(Of,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Of.forEach(t),ly=d(Lt),y(Yn.$$.fragment,Lt),dy=d(Lt),ft=s(Lt,"DIV",{class:!0});var po=r(ft);y($a.$$.fragment,po),cy=d(po),Xo=s(po,"P",{});var pd=r(Xo);py=i(pd,"The "),Ol=s(pd,"A",{href:!0});var U4=r(Ol);hy=i(U4,"TFRobertaForMultipleChoice"),U4.forEach(t),uy=i(pd," forward method, overrides the "),Zc=s(pd,"CODE",{});var H4=r(Zc);fy=i(H4,"__call__"),H4.forEach(t),my=i(pd," special method."),pd.forEach(t),gy=d(po),y(Zn.$$.fragment,po),_y=d(po),ep=s(po,"P",{});var Q4=r(ep);by=i(Q4,"Example:"),Q4.forEach(t),ky=d(po),y(Fa.$$.fragment,po),po.forEach(t),Lt.forEach(t),wu=d(o),Yo=s(o,"H2",{class:!0});var Nf=r(Yo);es=s(Nf,"A",{id:!0,class:!0,href:!0});var V4=r(es);tp=s(V4,"SPAN",{});var J4=r(tp);y(Ra.$$.fragment,J4),J4.forEach(t),V4.forEach(t),Ty=d(Nf),op=s(Nf,"SPAN",{});var K4=r(op);yy=i(K4,"TFRobertaForTokenClassification"),K4.forEach(t),Nf.forEach(t),$u=d(o),Je=s(o,"DIV",{class:!0});var At=r(Je);y(Ma.$$.fragment,At),vy=d(At),np=s(At,"P",{});var G4=r(np);wy=i(G4,`RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),G4.forEach(t),$y=d(At),Ea=s(At,"P",{});var If=r(Ea);Fy=i(If,"This model inherits from "),Nl=s(If,"A",{href:!0});var X4=r(Nl);Ry=i(X4,"TFPreTrainedModel"),X4.forEach(t),My=i(If,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),If.forEach(t),Ey=d(At),za=s(At,"P",{});var Df=r(za);zy=i(Df,"This model is also a "),xa=s(Df,"A",{href:!0,rel:!0});var Y4=r(xa);xy=i(Y4,"tf.keras.Model"),Y4.forEach(t),qy=i(Df,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Df.forEach(t),Cy=d(At),y(ts.$$.fragment,At),Py=d(At),mt=s(At,"DIV",{class:!0});var ho=r(mt);y(qa.$$.fragment,ho),jy=d(ho),Zo=s(ho,"P",{});var hd=r(Zo);Ly=i(hd,"The "),Il=s(hd,"A",{href:!0});var Z4=r(Il);Ay=i(Z4,"TFRobertaForTokenClassification"),Z4.forEach(t),Oy=i(hd," forward method, overrides the "),sp=s(hd,"CODE",{});var eR=r(sp);Ny=i(eR,"__call__"),eR.forEach(t),Iy=i(hd," special method."),hd.forEach(t),Dy=d(ho),y(os.$$.fragment,ho),Sy=d(ho),rp=s(ho,"P",{});var tR=r(rp);Wy=i(tR,"Example:"),tR.forEach(t),By=d(ho),y(Ca.$$.fragment,ho),ho.forEach(t),At.forEach(t),Fu=d(o),en=s(o,"H2",{class:!0});var Sf=r(en);ns=s(Sf,"A",{id:!0,class:!0,href:!0});var oR=r(ns);ap=s(oR,"SPAN",{});var nR=r(ap);y(Pa.$$.fragment,nR),nR.forEach(t),oR.forEach(t),Uy=d(Sf),ip=s(Sf,"SPAN",{});var sR=r(ip);Hy=i(sR,"TFRobertaForQuestionAnswering"),sR.forEach(t),Sf.forEach(t),Ru=d(o),Ke=s(o,"DIV",{class:!0});var Ot=r(Ke);y(ja.$$.fragment,Ot),Qy=d(Ot),tn=s(Ot,"P",{});var ud=r(tn);Vy=i(ud,`RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),lp=s(ud,"CODE",{});var rR=r(lp);Jy=i(rR,"span start logits"),rR.forEach(t),Ky=i(ud," and "),dp=s(ud,"CODE",{});var aR=r(dp);Gy=i(aR,"span end logits"),aR.forEach(t),Xy=i(ud,")."),ud.forEach(t),Yy=d(Ot),La=s(Ot,"P",{});var Wf=r(La);Zy=i(Wf,"This model inherits from "),Dl=s(Wf,"A",{href:!0});var iR=r(Dl);ev=i(iR,"TFPreTrainedModel"),iR.forEach(t),tv=i(Wf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wf.forEach(t),ov=d(Ot),Aa=s(Ot,"P",{});var Bf=r(Aa);nv=i(Bf,"This model is also a "),Oa=s(Bf,"A",{href:!0,rel:!0});var lR=r(Oa);sv=i(lR,"tf.keras.Model"),lR.forEach(t),rv=i(Bf,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Bf.forEach(t),av=d(Ot),y(ss.$$.fragment,Ot),iv=d(Ot),gt=s(Ot,"DIV",{class:!0});var uo=r(gt);y(Na.$$.fragment,uo),lv=d(uo),on=s(uo,"P",{});var fd=r(on);dv=i(fd,"The "),Sl=s(fd,"A",{href:!0});var dR=r(Sl);cv=i(dR,"TFRobertaForQuestionAnswering"),dR.forEach(t),pv=i(fd," forward method, overrides the "),cp=s(fd,"CODE",{});var cR=r(cp);hv=i(cR,"__call__"),cR.forEach(t),uv=i(fd," special method."),fd.forEach(t),fv=d(uo),y(rs.$$.fragment,uo),mv=d(uo),pp=s(uo,"P",{});var pR=r(pp);gv=i(pR,"Example:"),pR.forEach(t),_v=d(uo),y(Ia.$$.fragment,uo),uo.forEach(t),Ot.forEach(t),Mu=d(o),nn=s(o,"H2",{class:!0});var Uf=r(nn);as=s(Uf,"A",{id:!0,class:!0,href:!0});var hR=r(as);hp=s(hR,"SPAN",{});var uR=r(hp);y(Da.$$.fragment,uR),uR.forEach(t),hR.forEach(t),bv=d(Uf),up=s(Uf,"SPAN",{});var fR=r(up);kv=i(fR,"FlaxRobertaModel"),fR.forEach(t),Uf.forEach(t),Eu=d(o),Le=s(o,"DIV",{class:!0});var Ft=r(Le);y(Sa.$$.fragment,Ft),Tv=d(Ft),fp=s(Ft,"P",{});var mR=r(fp);yv=i(mR,"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top."),mR.forEach(t),vv=d(Ft),Wa=s(Ft,"P",{});var Hf=r(Wa);wv=i(Hf,"This model inherits from "),Wl=s(Hf,"A",{href:!0});var gR=r(Wl);$v=i(gR,"FlaxPreTrainedModel"),gR.forEach(t),Fv=i(Hf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Hf.forEach(t),Rv=d(Ft),Ba=s(Ft,"P",{});var Qf=r(Ba);Mv=i(Qf,"This model is also a Flax Linen "),Ua=s(Qf,"A",{href:!0,rel:!0});var _R=r(Ua);Ev=i(_R,"flax.linen.Module"),_R.forEach(t),zv=i(Qf,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Qf.forEach(t),xv=d(Ft),mp=s(Ft,"P",{});var bR=r(mp);qv=i(bR,"Finally, this model supports inherent JAX features such as:"),bR.forEach(t),Cv=d(Ft),Nt=s(Ft,"UL",{});var bs=r(Nt);gp=s(bs,"LI",{});var kR=r(gp);Ha=s(kR,"A",{href:!0,rel:!0});var TR=r(Ha);Pv=i(TR,"Just-In-Time (JIT) compilation"),TR.forEach(t),kR.forEach(t),jv=d(bs),_p=s(bs,"LI",{});var yR=r(_p);Qa=s(yR,"A",{href:!0,rel:!0});var vR=r(Qa);Lv=i(vR,"Automatic Differentiation"),vR.forEach(t),yR.forEach(t),Av=d(bs),bp=s(bs,"LI",{});var wR=r(bp);Va=s(wR,"A",{href:!0,rel:!0});var $R=r(Va);Ov=i($R,"Vectorization"),$R.forEach(t),wR.forEach(t),Nv=d(bs),kp=s(bs,"LI",{});var FR=r(kp);Ja=s(FR,"A",{href:!0,rel:!0});var RR=r(Ja);Iv=i(RR,"Parallelization"),RR.forEach(t),FR.forEach(t),bs.forEach(t),Dv=d(Ft),_t=s(Ft,"DIV",{class:!0});var fo=r(_t);y(Ka.$$.fragment,fo),Sv=d(fo),sn=s(fo,"P",{});var md=r(sn);Wv=i(md,"The "),Tp=s(md,"CODE",{});var MR=r(Tp);Bv=i(MR,"FlaxRobertaPreTrainedModel"),MR.forEach(t),Uv=i(md,"forward method, overrides the "),yp=s(md,"CODE",{});var ER=r(yp);Hv=i(ER,"__call__"),ER.forEach(t),Qv=i(md," special method."),md.forEach(t),Vv=d(fo),y(is.$$.fragment,fo),Jv=d(fo),vp=s(fo,"P",{});var zR=r(vp);Kv=i(zR,"Example:"),zR.forEach(t),Gv=d(fo),y(Ga.$$.fragment,fo),fo.forEach(t),Ft.forEach(t),zu=d(o),rn=s(o,"H2",{class:!0});var Vf=r(rn);ls=s(Vf,"A",{id:!0,class:!0,href:!0});var xR=r(ls);wp=s(xR,"SPAN",{});var qR=r(wp);y(Xa.$$.fragment,qR),qR.forEach(t),xR.forEach(t),Xv=d(Vf),$p=s(Vf,"SPAN",{});var CR=r($p);Yv=i(CR,"FlaxRobertaForMaskedLM"),CR.forEach(t),Vf.forEach(t),xu=d(o),Ae=s(o,"DIV",{class:!0});var Rt=r(Ae);y(Ya.$$.fragment,Rt),Zv=d(Rt),Za=s(Rt,"P",{});var Jf=r(Za);ew=i(Jf,"RoBERTa Model with a "),Fp=s(Jf,"CODE",{});var PR=r(Fp);tw=i(PR,"language modeling"),PR.forEach(t),ow=i(Jf," head on top."),Jf.forEach(t),nw=d(Rt),ei=s(Rt,"P",{});var Kf=r(ei);sw=i(Kf,"This model inherits from "),Bl=s(Kf,"A",{href:!0});var jR=r(Bl);rw=i(jR,"FlaxPreTrainedModel"),jR.forEach(t),aw=i(Kf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Kf.forEach(t),iw=d(Rt),ti=s(Rt,"P",{});var Gf=r(ti);lw=i(Gf,"This model is also a Flax Linen "),oi=s(Gf,"A",{href:!0,rel:!0});var LR=r(oi);dw=i(LR,"flax.linen.Module"),LR.forEach(t),cw=i(Gf,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Gf.forEach(t),pw=d(Rt),Rp=s(Rt,"P",{});var AR=r(Rp);hw=i(AR,"Finally, this model supports inherent JAX features such as:"),AR.forEach(t),uw=d(Rt),It=s(Rt,"UL",{});var ks=r(It);Mp=s(ks,"LI",{});var OR=r(Mp);ni=s(OR,"A",{href:!0,rel:!0});var NR=r(ni);fw=i(NR,"Just-In-Time (JIT) compilation"),NR.forEach(t),OR.forEach(t),mw=d(ks),Ep=s(ks,"LI",{});var IR=r(Ep);si=s(IR,"A",{href:!0,rel:!0});var DR=r(si);gw=i(DR,"Automatic Differentiation"),DR.forEach(t),IR.forEach(t),_w=d(ks),zp=s(ks,"LI",{});var SR=r(zp);ri=s(SR,"A",{href:!0,rel:!0});var WR=r(ri);bw=i(WR,"Vectorization"),WR.forEach(t),SR.forEach(t),kw=d(ks),xp=s(ks,"LI",{});var BR=r(xp);ai=s(BR,"A",{href:!0,rel:!0});var UR=r(ai);Tw=i(UR,"Parallelization"),UR.forEach(t),BR.forEach(t),ks.forEach(t),yw=d(Rt),bt=s(Rt,"DIV",{class:!0});var mo=r(bt);y(ii.$$.fragment,mo),vw=d(mo),an=s(mo,"P",{});var gd=r(an);ww=i(gd,"The "),qp=s(gd,"CODE",{});var HR=r(qp);$w=i(HR,"FlaxRobertaPreTrainedModel"),HR.forEach(t),Fw=i(gd,"forward method, overrides the "),Cp=s(gd,"CODE",{});var QR=r(Cp);Rw=i(QR,"__call__"),QR.forEach(t),Mw=i(gd," special method."),gd.forEach(t),Ew=d(mo),y(ds.$$.fragment,mo),zw=d(mo),Pp=s(mo,"P",{});var VR=r(Pp);xw=i(VR,"Example:"),VR.forEach(t),qw=d(mo),y(li.$$.fragment,mo),mo.forEach(t),Rt.forEach(t),qu=d(o),ln=s(o,"H2",{class:!0});var Xf=r(ln);cs=s(Xf,"A",{id:!0,class:!0,href:!0});var JR=r(cs);jp=s(JR,"SPAN",{});var KR=r(jp);y(di.$$.fragment,KR),KR.forEach(t),JR.forEach(t),Cw=d(Xf),Lp=s(Xf,"SPAN",{});var GR=r(Lp);Pw=i(GR,"FlaxRobertaForSequenceClassification"),GR.forEach(t),Xf.forEach(t),Cu=d(o),Oe=s(o,"DIV",{class:!0});var Mt=r(Oe);y(ci.$$.fragment,Mt),jw=d(Mt),Ap=s(Mt,"P",{});var XR=r(Ap);Lw=i(XR,`Roberta Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),XR.forEach(t),Aw=d(Mt),pi=s(Mt,"P",{});var Yf=r(pi);Ow=i(Yf,"This model inherits from "),Ul=s(Yf,"A",{href:!0});var YR=r(Ul);Nw=i(YR,"FlaxPreTrainedModel"),YR.forEach(t),Iw=i(Yf,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),Yf.forEach(t),Dw=d(Mt),hi=s(Mt,"P",{});var Zf=r(hi);Sw=i(Zf,"This model is also a Flax Linen "),ui=s(Zf,"A",{href:!0,rel:!0});var ZR=r(ui);Ww=i(ZR,"flax.linen.Module"),ZR.forEach(t),Bw=i(Zf,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Zf.forEach(t),Uw=d(Mt),Op=s(Mt,"P",{});var eM=r(Op);Hw=i(eM,"Finally, this model supports inherent JAX features such as:"),eM.forEach(t),Qw=d(Mt),Dt=s(Mt,"UL",{});var Ts=r(Dt);Np=s(Ts,"LI",{});var tM=r(Np);fi=s(tM,"A",{href:!0,rel:!0});var oM=r(fi);Vw=i(oM,"Just-In-Time (JIT) compilation"),oM.forEach(t),tM.forEach(t),Jw=d(Ts),Ip=s(Ts,"LI",{});var nM=r(Ip);mi=s(nM,"A",{href:!0,rel:!0});var sM=r(mi);Kw=i(sM,"Automatic Differentiation"),sM.forEach(t),nM.forEach(t),Gw=d(Ts),Dp=s(Ts,"LI",{});var rM=r(Dp);gi=s(rM,"A",{href:!0,rel:!0});var aM=r(gi);Xw=i(aM,"Vectorization"),aM.forEach(t),rM.forEach(t),Yw=d(Ts),Sp=s(Ts,"LI",{});var iM=r(Sp);_i=s(iM,"A",{href:!0,rel:!0});var lM=r(_i);Zw=i(lM,"Parallelization"),lM.forEach(t),iM.forEach(t),Ts.forEach(t),e$=d(Mt),kt=s(Mt,"DIV",{class:!0});var go=r(kt);y(bi.$$.fragment,go),t$=d(go),dn=s(go,"P",{});var _d=r(dn);o$=i(_d,"The "),Wp=s(_d,"CODE",{});var dM=r(Wp);n$=i(dM,"FlaxRobertaPreTrainedModel"),dM.forEach(t),s$=i(_d,"forward method, overrides the "),Bp=s(_d,"CODE",{});var cM=r(Bp);r$=i(cM,"__call__"),cM.forEach(t),a$=i(_d," special method."),_d.forEach(t),i$=d(go),y(ps.$$.fragment,go),l$=d(go),Up=s(go,"P",{});var pM=r(Up);d$=i(pM,"Example:"),pM.forEach(t),c$=d(go),y(ki.$$.fragment,go),go.forEach(t),Mt.forEach(t),Pu=d(o),cn=s(o,"H2",{class:!0});var em=r(cn);hs=s(em,"A",{id:!0,class:!0,href:!0});var hM=r(hs);Hp=s(hM,"SPAN",{});var uM=r(Hp);y(Ti.$$.fragment,uM),uM.forEach(t),hM.forEach(t),p$=d(em),Qp=s(em,"SPAN",{});var fM=r(Qp);h$=i(fM,"FlaxRobertaForMultipleChoice"),fM.forEach(t),em.forEach(t),ju=d(o),Ne=s(o,"DIV",{class:!0});var Et=r(Ne);y(yi.$$.fragment,Et),u$=d(Et),Vp=s(Et,"P",{});var mM=r(Vp);f$=i(mM,`Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),mM.forEach(t),m$=d(Et),vi=s(Et,"P",{});var tm=r(vi);g$=i(tm,"This model inherits from "),Hl=s(tm,"A",{href:!0});var gM=r(Hl);_$=i(gM,"FlaxPreTrainedModel"),gM.forEach(t),b$=i(tm,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),tm.forEach(t),k$=d(Et),wi=s(Et,"P",{});var om=r(wi);T$=i(om,"This model is also a Flax Linen "),$i=s(om,"A",{href:!0,rel:!0});var _M=r($i);y$=i(_M,"flax.linen.Module"),_M.forEach(t),v$=i(om,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),om.forEach(t),w$=d(Et),Jp=s(Et,"P",{});var bM=r(Jp);$$=i(bM,"Finally, this model supports inherent JAX features such as:"),bM.forEach(t),F$=d(Et),St=s(Et,"UL",{});var ys=r(St);Kp=s(ys,"LI",{});var kM=r(Kp);Fi=s(kM,"A",{href:!0,rel:!0});var TM=r(Fi);R$=i(TM,"Just-In-Time (JIT) compilation"),TM.forEach(t),kM.forEach(t),M$=d(ys),Gp=s(ys,"LI",{});var yM=r(Gp);Ri=s(yM,"A",{href:!0,rel:!0});var vM=r(Ri);E$=i(vM,"Automatic Differentiation"),vM.forEach(t),yM.forEach(t),z$=d(ys),Xp=s(ys,"LI",{});var wM=r(Xp);Mi=s(wM,"A",{href:!0,rel:!0});var $M=r(Mi);x$=i($M,"Vectorization"),$M.forEach(t),wM.forEach(t),q$=d(ys),Yp=s(ys,"LI",{});var FM=r(Yp);Ei=s(FM,"A",{href:!0,rel:!0});var RM=r(Ei);C$=i(RM,"Parallelization"),RM.forEach(t),FM.forEach(t),ys.forEach(t),P$=d(Et),Tt=s(Et,"DIV",{class:!0});var _o=r(Tt);y(zi.$$.fragment,_o),j$=d(_o),pn=s(_o,"P",{});var bd=r(pn);L$=i(bd,"The "),Zp=s(bd,"CODE",{});var MM=r(Zp);A$=i(MM,"FlaxRobertaPreTrainedModel"),MM.forEach(t),O$=i(bd,"forward method, overrides the "),eh=s(bd,"CODE",{});var EM=r(eh);N$=i(EM,"__call__"),EM.forEach(t),I$=i(bd," special method."),bd.forEach(t),D$=d(_o),y(us.$$.fragment,_o),S$=d(_o),th=s(_o,"P",{});var zM=r(th);W$=i(zM,"Example:"),zM.forEach(t),B$=d(_o),y(xi.$$.fragment,_o),_o.forEach(t),Et.forEach(t),Lu=d(o),hn=s(o,"H2",{class:!0});var nm=r(hn);fs=s(nm,"A",{id:!0,class:!0,href:!0});var xM=r(fs);oh=s(xM,"SPAN",{});var qM=r(oh);y(qi.$$.fragment,qM),qM.forEach(t),xM.forEach(t),U$=d(nm),nh=s(nm,"SPAN",{});var CM=r(nh);H$=i(CM,"FlaxRobertaForTokenClassification"),CM.forEach(t),nm.forEach(t),Au=d(o),Ie=s(o,"DIV",{class:!0});var zt=r(Ie);y(Ci.$$.fragment,zt),Q$=d(zt),sh=s(zt,"P",{});var PM=r(sh);V$=i(PM,`Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),PM.forEach(t),J$=d(zt),Pi=s(zt,"P",{});var sm=r(Pi);K$=i(sm,"This model inherits from "),Ql=s(sm,"A",{href:!0});var jM=r(Ql);G$=i(jM,"FlaxPreTrainedModel"),jM.forEach(t),X$=i(sm,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),sm.forEach(t),Y$=d(zt),ji=s(zt,"P",{});var rm=r(ji);Z$=i(rm,"This model is also a Flax Linen "),Li=s(rm,"A",{href:!0,rel:!0});var LM=r(Li);eF=i(LM,"flax.linen.Module"),LM.forEach(t),tF=i(rm,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),rm.forEach(t),oF=d(zt),rh=s(zt,"P",{});var AM=r(rh);nF=i(AM,"Finally, this model supports inherent JAX features such as:"),AM.forEach(t),sF=d(zt),Wt=s(zt,"UL",{});var vs=r(Wt);ah=s(vs,"LI",{});var OM=r(ah);Ai=s(OM,"A",{href:!0,rel:!0});var NM=r(Ai);rF=i(NM,"Just-In-Time (JIT) compilation"),NM.forEach(t),OM.forEach(t),aF=d(vs),ih=s(vs,"LI",{});var IM=r(ih);Oi=s(IM,"A",{href:!0,rel:!0});var DM=r(Oi);iF=i(DM,"Automatic Differentiation"),DM.forEach(t),IM.forEach(t),lF=d(vs),lh=s(vs,"LI",{});var SM=r(lh);Ni=s(SM,"A",{href:!0,rel:!0});var WM=r(Ni);dF=i(WM,"Vectorization"),WM.forEach(t),SM.forEach(t),cF=d(vs),dh=s(vs,"LI",{});var BM=r(dh);Ii=s(BM,"A",{href:!0,rel:!0});var UM=r(Ii);pF=i(UM,"Parallelization"),UM.forEach(t),BM.forEach(t),vs.forEach(t),hF=d(zt),yt=s(zt,"DIV",{class:!0});var bo=r(yt);y(Di.$$.fragment,bo),uF=d(bo),un=s(bo,"P",{});var kd=r(un);fF=i(kd,"The "),ch=s(kd,"CODE",{});var HM=r(ch);mF=i(HM,"FlaxRobertaPreTrainedModel"),HM.forEach(t),gF=i(kd,"forward method, overrides the "),ph=s(kd,"CODE",{});var QM=r(ph);_F=i(QM,"__call__"),QM.forEach(t),bF=i(kd," special method."),kd.forEach(t),kF=d(bo),y(ms.$$.fragment,bo),TF=d(bo),hh=s(bo,"P",{});var VM=r(hh);yF=i(VM,"Example:"),VM.forEach(t),vF=d(bo),y(Si.$$.fragment,bo),bo.forEach(t),zt.forEach(t),Ou=d(o),fn=s(o,"H2",{class:!0});var am=r(fn);gs=s(am,"A",{id:!0,class:!0,href:!0});var JM=r(gs);uh=s(JM,"SPAN",{});var KM=r(uh);y(Wi.$$.fragment,KM),KM.forEach(t),JM.forEach(t),wF=d(am),fh=s(am,"SPAN",{});var GM=r(fh);$F=i(GM,"FlaxRobertaForQuestionAnswering"),GM.forEach(t),am.forEach(t),Nu=d(o),De=s(o,"DIV",{class:!0});var xt=r(De);y(Bi.$$.fragment,xt),FF=d(xt),mn=s(xt,"P",{});var Td=r(mn);RF=i(Td,`Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),mh=s(Td,"CODE",{});var XM=r(mh);MF=i(XM,"span start logits"),XM.forEach(t),EF=i(Td," and "),gh=s(Td,"CODE",{});var YM=r(gh);zF=i(YM,"span end logits"),YM.forEach(t),xF=i(Td,")."),Td.forEach(t),qF=d(xt),Ui=s(xt,"P",{});var im=r(Ui);CF=i(im,"This model inherits from "),Vl=s(im,"A",{href:!0});var ZM=r(Vl);PF=i(ZM,"FlaxPreTrainedModel"),ZM.forEach(t),jF=i(im,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models)`),im.forEach(t),LF=d(xt),Hi=s(xt,"P",{});var lm=r(Hi);AF=i(lm,"This model is also a Flax Linen "),Qi=s(lm,"A",{href:!0,rel:!0});var eE=r(Qi);OF=i(eE,"flax.linen.Module"),eE.forEach(t),NF=i(lm,` subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior.`),lm.forEach(t),IF=d(xt),_h=s(xt,"P",{});var tE=r(_h);DF=i(tE,"Finally, this model supports inherent JAX features such as:"),tE.forEach(t),SF=d(xt),Bt=s(xt,"UL",{});var ws=r(Bt);bh=s(ws,"LI",{});var oE=r(bh);Vi=s(oE,"A",{href:!0,rel:!0});var nE=r(Vi);WF=i(nE,"Just-In-Time (JIT) compilation"),nE.forEach(t),oE.forEach(t),BF=d(ws),kh=s(ws,"LI",{});var sE=r(kh);Ji=s(sE,"A",{href:!0,rel:!0});var rE=r(Ji);UF=i(rE,"Automatic Differentiation"),rE.forEach(t),sE.forEach(t),HF=d(ws),Th=s(ws,"LI",{});var aE=r(Th);Ki=s(aE,"A",{href:!0,rel:!0});var iE=r(Ki);QF=i(iE,"Vectorization"),iE.forEach(t),aE.forEach(t),VF=d(ws),yh=s(ws,"LI",{});var lE=r(yh);Gi=s(lE,"A",{href:!0,rel:!0});var dE=r(Gi);JF=i(dE,"Parallelization"),dE.forEach(t),lE.forEach(t),ws.forEach(t),KF=d(xt),vt=s(xt,"DIV",{class:!0});var ko=r(vt);y(Xi.$$.fragment,ko),GF=d(ko),gn=s(ko,"P",{});var yd=r(gn);XF=i(yd,"The "),vh=s(yd,"CODE",{});var cE=r(vh);YF=i(cE,"FlaxRobertaPreTrainedModel"),cE.forEach(t),ZF=i(yd,"forward method, overrides the "),wh=s(yd,"CODE",{});var pE=r(wh);e2=i(pE,"__call__"),pE.forEach(t),t2=i(yd," special method."),yd.forEach(t),o2=d(ko),y(_s.$$.fragment,ko),n2=d(ko),$h=s(ko,"P",{});var hE=r($h);s2=i(hE,"Example:"),hE.forEach(t),r2=d(ko),y(Yi.$$.fragment,ko),ko.forEach(t),xt.forEach(t),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(QE)),c(b,"id","roberta"),c(b,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(b,"href","#roberta"),c(m,"class","relative group"),c(X,"id","overview"),c(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(X,"href","#overview"),c(z,"class","relative group"),c(oe,"href","https://arxiv.org/abs/1907.11692"),c(oe,"rel","nofollow"),c(le,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertModel"),c(ve,"href","camembert"),c($s,"href","https://huggingface.co/julien-c"),c($s,"rel","nofollow"),c(Fs,"href","https://github.com/pytorch/fairseq/tree/master/examples/roberta"),c(Fs,"rel","nofollow"),c(bn,"id","transformers.RobertaConfig"),c(bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bn,"href","#transformers.RobertaConfig"),c(To,"class","relative group"),c(nl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaModel"),c(sl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaModel"),c(rl,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(al,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(il,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaConfig"),c(ll,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertConfig"),c(We,"class","docstring"),c(kn,"id","transformers.RobertaTokenizer"),c(kn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(kn,"href","#transformers.RobertaTokenizer"),c($o,"class","relative group"),c(dl,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(Vt,"class","docstring"),c(yn,"class","docstring"),c(vn,"class","docstring"),c(hl,"class","docstring"),c(Te,"class","docstring"),c(wn,"id","transformers.RobertaTokenizerFast"),c(wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(wn,"href","#transformers.RobertaTokenizerFast"),c(Fo,"class","relative group"),c(ul,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),c(fl,"class","docstring"),c(Pe,"class","docstring"),c(Fn,"id","transformers.RobertaModel"),c(Fn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fn,"href","#transformers.RobertaModel"),c(Ro,"class","relative group"),c(ml,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Xs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Xs,"rel","nofollow"),c(Zs,"href","https://arxiv.org/abs/1706.03762"),c(Zs,"rel","nofollow"),c(gl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaModel"),c(st,"class","docstring"),c(je,"class","docstring"),c(En,"id","transformers.RobertaForCausalLM"),c(En,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(En,"href","#transformers.RobertaForCausalLM"),c(Eo,"class","relative group"),c(_l,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(ir,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ir,"rel","nofollow"),c(bl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForCausalLM"),c(rt,"class","docstring"),c(Ye,"class","docstring"),c(xn,"id","transformers.RobertaForMaskedLM"),c(xn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xn,"href","#transformers.RobertaForMaskedLM"),c(xo,"class","relative group"),c(kl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(mr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(mr,"rel","nofollow"),c(Tl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForMaskedLM"),c(at,"class","docstring"),c(Ze,"class","docstring"),c(Cn,"id","transformers.RobertaForSequenceClassification"),c(Cn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Cn,"href","#transformers.RobertaForSequenceClassification"),c(Co,"class","relative group"),c(yl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(vr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(vr,"rel","nofollow"),c(vl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForSequenceClassification"),c(Se,"class","docstring"),c(et,"class","docstring"),c(jn,"id","transformers.RobertaForMultipleChoice"),c(jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jn,"href","#transformers.RobertaForMultipleChoice"),c(jo,"class","relative group"),c(wl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(xr,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(xr,"rel","nofollow"),c($l,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForMultipleChoice"),c(it,"class","docstring"),c(tt,"class","docstring"),c(An,"id","transformers.RobertaForTokenClassification"),c(An,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(An,"href","#transformers.RobertaForTokenClassification"),c(Ao,"class","relative group"),c(Fl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Or,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Or,"rel","nofollow"),c(Rl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForTokenClassification"),c(lt,"class","docstring"),c(ot,"class","docstring"),c(Nn,"id","transformers.RobertaForQuestionAnswering"),c(Nn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nn,"href","#transformers.RobertaForQuestionAnswering"),c(No,"class","relative group"),c(Ml,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(Ur,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(Ur,"rel","nofollow"),c(El,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaForQuestionAnswering"),c(dt,"class","docstring"),c(nt,"class","docstring"),c(Dn,"id","transformers.TFRobertaModel"),c(Dn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dn,"href","#transformers.TFRobertaModel"),c(So,"class","relative group"),c(zl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(Xr,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Xr,"rel","nofollow"),c(xl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaModel"),c(ct,"class","docstring"),c(Ue,"class","docstring"),c(Bn,"id","transformers.TFRobertaForCausalLM"),c(Bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bn,"href","#transformers.TFRobertaForCausalLM"),c(Bo,"class","relative group"),c(ql,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForCausalLM"),c(pt,"class","docstring"),c(Uo,"class","docstring"),c(Hn,"id","transformers.TFRobertaForMaskedLM"),c(Hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Hn,"href","#transformers.TFRobertaForMaskedLM"),c(Qo,"class","relative group"),c(Cl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(da,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(da,"rel","nofollow"),c(Pl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForMaskedLM"),c(ht,"class","docstring"),c(He,"class","docstring"),c(Jn,"id","transformers.TFRobertaForSequenceClassification"),c(Jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jn,"href","#transformers.TFRobertaForSequenceClassification"),c(Jo,"class","relative group"),c(jl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(ga,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(ga,"rel","nofollow"),c(Ll,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForSequenceClassification"),c(ut,"class","docstring"),c(Qe,"class","docstring"),c(Xn,"id","transformers.TFRobertaForMultipleChoice"),c(Xn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Xn,"href","#transformers.TFRobertaForMultipleChoice"),c(Go,"class","relative group"),c(Al,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(wa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(wa,"rel","nofollow"),c(Ol,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForMultipleChoice"),c(ft,"class","docstring"),c(Ve,"class","docstring"),c(es,"id","transformers.TFRobertaForTokenClassification"),c(es,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(es,"href","#transformers.TFRobertaForTokenClassification"),c(Yo,"class","relative group"),c(Nl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(xa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(xa,"rel","nofollow"),c(Il,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForTokenClassification"),c(mt,"class","docstring"),c(Je,"class","docstring"),c(ns,"id","transformers.TFRobertaForQuestionAnswering"),c(ns,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ns,"href","#transformers.TFRobertaForQuestionAnswering"),c(en,"class","relative group"),c(Dl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(Oa,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),c(Oa,"rel","nofollow"),c(Sl,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.TFRobertaForQuestionAnswering"),c(gt,"class","docstring"),c(Ke,"class","docstring"),c(as,"id","transformers.FlaxRobertaModel"),c(as,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(as,"href","#transformers.FlaxRobertaModel"),c(nn,"class","relative group"),c(Wl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ua,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Ua,"rel","nofollow"),c(Ha,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Ha,"rel","nofollow"),c(Qa,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Qa,"rel","nofollow"),c(Va,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Va,"rel","nofollow"),c(Ja,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Ja,"rel","nofollow"),c(_t,"class","docstring"),c(Le,"class","docstring"),c(ls,"id","transformers.FlaxRobertaForMaskedLM"),c(ls,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ls,"href","#transformers.FlaxRobertaForMaskedLM"),c(rn,"class","relative group"),c(Bl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(oi,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(oi,"rel","nofollow"),c(ni,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(ni,"rel","nofollow"),c(si,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(si,"rel","nofollow"),c(ri,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(ri,"rel","nofollow"),c(ai,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(ai,"rel","nofollow"),c(bt,"class","docstring"),c(Ae,"class","docstring"),c(cs,"id","transformers.FlaxRobertaForSequenceClassification"),c(cs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(cs,"href","#transformers.FlaxRobertaForSequenceClassification"),c(ln,"class","relative group"),c(Ul,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(ui,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(ui,"rel","nofollow"),c(fi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(fi,"rel","nofollow"),c(mi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(mi,"rel","nofollow"),c(gi,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(gi,"rel","nofollow"),c(_i,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(_i,"rel","nofollow"),c(kt,"class","docstring"),c(Oe,"class","docstring"),c(hs,"id","transformers.FlaxRobertaForMultipleChoice"),c(hs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(hs,"href","#transformers.FlaxRobertaForMultipleChoice"),c(cn,"class","relative group"),c(Hl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c($i,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c($i,"rel","nofollow"),c(Fi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Fi,"rel","nofollow"),c(Ri,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ri,"rel","nofollow"),c(Mi,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Mi,"rel","nofollow"),c(Ei,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Ei,"rel","nofollow"),c(Tt,"class","docstring"),c(Ne,"class","docstring"),c(fs,"id","transformers.FlaxRobertaForTokenClassification"),c(fs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fs,"href","#transformers.FlaxRobertaForTokenClassification"),c(hn,"class","relative group"),c(Ql,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Li,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Li,"rel","nofollow"),c(Ai,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Ai,"rel","nofollow"),c(Oi,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Oi,"rel","nofollow"),c(Ni,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Ni,"rel","nofollow"),c(Ii,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Ii,"rel","nofollow"),c(yt,"class","docstring"),c(Ie,"class","docstring"),c(gs,"id","transformers.FlaxRobertaForQuestionAnswering"),c(gs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(gs,"href","#transformers.FlaxRobertaForQuestionAnswering"),c(fn,"class","relative group"),c(Vl,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Qi,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),c(Qi,"rel","nofollow"),c(Vi,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),c(Vi,"rel","nofollow"),c(Ji,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),c(Ji,"rel","nofollow"),c(Ki,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),c(Ki,"rel","nofollow"),c(Gi,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),c(Gi,"rel","nofollow"),c(vt,"class","docstring"),c(De,"class","docstring")},m(o,g){e(document.head,p),h(o,R,g),h(o,m,g),e(m,b),e(b,k),v(_,k,null),e(m,f),e(m,M),e(M,Y),h(o,I,g),h(o,z,g),e(z,X),e(X,D),v(te,D,null),e(z,me),e(z,S),e(S,ge),h(o,pe,g),h(o,K,g),e(K,A),e(K,oe),e(oe,Z),e(K,x),h(o,P,g),h(o,se,g),e(se,U),h(o,he,g),h(o,re,g),e(re,H),h(o,ue,g),h(o,ae,g),e(ae,C),e(C,_e),h(o,W,g),h(o,ie,g),e(ie,be),h(o,B,g),h(o,G,g),e(G,ne),e(ne,O),e(ne,le),e(le,Q),e(ne,ke),e(G,u),e(G,E),e(E,ee),e(G,Me),e(G,ce),e(ce,N),e(ce,$e),e($e,Ee),e(ce,ze),e(ce,L),e(L,V),e(ce,xe),e(ce,Fe),e(Fe,J),e(ce,qe),e(G,Ce),e(G,de),e(de,ve),e(ve,dm),e(de,cm),h(o,Qh,g),h(o,Qt,g),e(Qt,pm),e(Qt,$s),e($s,hm),e(Qt,um),e(Qt,Fs),e(Fs,fm),e(Qt,mm),h(o,Vh,g),h(o,To,g),e(To,bn),e(bn,vd),v(Rs,vd,null),e(To,gm),e(To,wd),e(wd,_m),h(o,Jh,g),h(o,We,g),v(Ms,We,null),e(We,bm),e(We,yo),e(yo,km),e(yo,nl),e(nl,Tm),e(yo,ym),e(yo,sl),e(sl,vm),e(yo,wm),e(We,$m),e(We,vo),e(vo,Fm),e(vo,rl),e(rl,Rm),e(vo,Mm),e(vo,al),e(al,Em),e(vo,zm),e(We,xm),e(We,wo),e(wo,qm),e(wo,il),e(il,Cm),e(wo,Pm),e(wo,ll),e(ll,jm),e(wo,Lm),e(We,Am),e(We,$d),e($d,Om),e(We,Nm),v(Es,We,null),h(o,Kh,g),h(o,$o,g),e($o,kn),e(kn,Fd),v(zs,Fd,null),e($o,Im),e($o,Rd),e(Rd,Dm),h(o,Gh,g),h(o,Te,g),v(xs,Te,null),e(Te,Sm),e(Te,Md),e(Md,Wm),e(Te,Bm),e(Te,Ed),e(Ed,Um),e(Te,Hm),v(qs,Te,null),e(Te,Qm),e(Te,Cs),e(Cs,Vm),e(Cs,zd),e(zd,Jm),e(Cs,Km),e(Te,Gm),v(Tn,Te,null),e(Te,Xm),e(Te,Ps),e(Ps,Ym),e(Ps,dl),e(dl,Zm),e(Ps,eg),e(Te,tg),e(Te,Vt),v(js,Vt,null),e(Vt,og),e(Vt,xd),e(xd,ng),e(Vt,sg),e(Vt,Ls),e(Ls,cl),e(cl,rg),e(cl,qd),e(qd,ag),e(Ls,ig),e(Ls,pl),e(pl,lg),e(pl,Cd),e(Cd,dg),e(Te,cg),e(Te,yn),v(As,yn,null),e(yn,pg),e(yn,Os),e(Os,hg),e(Os,Pd),e(Pd,ug),e(Os,fg),e(Te,mg),e(Te,vn),v(Ns,vn,null),e(vn,gg),e(vn,jd),e(jd,_g),e(Te,bg),e(Te,hl),v(Is,hl,null),h(o,Xh,g),h(o,Fo,g),e(Fo,wn),e(wn,Ld),v(Ds,Ld,null),e(Fo,kg),e(Fo,Ad),e(Ad,Tg),h(o,Yh,g),h(o,Pe,g),v(Ss,Pe,null),e(Pe,yg),e(Pe,Ws),e(Ws,vg),e(Ws,Od),e(Od,wg),e(Ws,$g),e(Pe,Fg),e(Pe,Nd),e(Nd,Rg),e(Pe,Mg),v(Bs,Pe,null),e(Pe,Eg),e(Pe,Us),e(Us,zg),e(Us,Id),e(Id,xg),e(Us,qg),e(Pe,Cg),v($n,Pe,null),e(Pe,Pg),e(Pe,Hs),e(Hs,jg),e(Hs,ul),e(ul,Lg),e(Hs,Ag),e(Pe,Og),e(Pe,fl),v(Qs,fl,null),h(o,Zh,g),h(o,Ro,g),e(Ro,Fn),e(Fn,Dd),v(Vs,Dd,null),e(Ro,Ng),e(Ro,Sd),e(Sd,Ig),h(o,eu,g),h(o,je,g),v(Js,je,null),e(je,Dg),e(je,Wd),e(Wd,Sg),e(je,Wg),e(je,Ks),e(Ks,Bg),e(Ks,ml),e(ml,Ug),e(Ks,Hg),e(je,Qg),e(je,Gs),e(Gs,Vg),e(Gs,Xs),e(Xs,Jg),e(Gs,Kg),e(je,Gg),e(je,Ys),e(Ys,Xg),e(Ys,Bd),e(Bd,Yg),e(Ys,Zg),e(je,e_),e(je,Be),e(Be,t_),e(Be,Ud),e(Ud,o_),e(Be,n_),e(Be,Hd),e(Hd,s_),e(Be,r_),e(Be,Qd),e(Qd,a_),e(Be,i_),e(Be,Vd),e(Vd,l_),e(Be,d_),e(Be,Jd),e(Jd,c_),e(Be,p_),e(Be,Kd),e(Kd,h_),e(Be,u_),e(je,f_),e(je,Rn),e(Rn,m_),e(Rn,Gd),e(Gd,g_),e(Rn,__),e(Rn,Zs),e(Zs,b_),e(je,k_),e(je,st),v(er,st,null),e(st,T_),e(st,Mo),e(Mo,y_),e(Mo,gl),e(gl,v_),e(Mo,w_),e(Mo,Xd),e(Xd,$_),e(Mo,F_),e(st,R_),v(Mn,st,null),e(st,M_),e(st,Yd),e(Yd,E_),e(st,z_),v(tr,st,null),h(o,tu,g),h(o,Eo,g),e(Eo,En),e(En,Zd),v(or,Zd,null),e(Eo,x_),e(Eo,ec),e(ec,q_),h(o,ou,g),h(o,Ye,g),v(nr,Ye,null),e(Ye,C_),e(Ye,sr),e(sr,P_),e(sr,tc),e(tc,j_),e(sr,L_),e(Ye,A_),e(Ye,rr),e(rr,O_),e(rr,_l),e(_l,N_),e(rr,I_),e(Ye,D_),e(Ye,ar),e(ar,S_),e(ar,ir),e(ir,W_),e(ar,B_),e(Ye,U_),e(Ye,rt),v(lr,rt,null),e(rt,H_),e(rt,zo),e(zo,Q_),e(zo,bl),e(bl,V_),e(zo,J_),e(zo,oc),e(oc,K_),e(zo,G_),e(rt,X_),v(zn,rt,null),e(rt,Y_),e(rt,nc),e(nc,Z_),e(rt,eb),v(dr,rt,null),h(o,nu,g),h(o,xo,g),e(xo,xn),e(xn,sc),v(cr,sc,null),e(xo,tb),e(xo,rc),e(rc,ob),h(o,su,g),h(o,Ze,g),v(pr,Ze,null),e(Ze,nb),e(Ze,hr),e(hr,sb),e(hr,ac),e(ac,rb),e(hr,ab),e(Ze,ib),e(Ze,ur),e(ur,lb),e(ur,kl),e(kl,db),e(ur,cb),e(Ze,pb),e(Ze,fr),e(fr,hb),e(fr,mr),e(mr,ub),e(fr,fb),e(Ze,mb),e(Ze,at),v(gr,at,null),e(at,gb),e(at,qo),e(qo,_b),e(qo,Tl),e(Tl,bb),e(qo,kb),e(qo,ic),e(ic,Tb),e(qo,yb),e(at,vb),v(qn,at,null),e(at,wb),e(at,lc),e(lc,$b),e(at,Fb),v(_r,at,null),h(o,ru,g),h(o,Co,g),e(Co,Cn),e(Cn,dc),v(br,dc,null),e(Co,Rb),e(Co,cc),e(cc,Mb),h(o,au,g),h(o,et,g),v(kr,et,null),e(et,Eb),e(et,pc),e(pc,zb),e(et,xb),e(et,Tr),e(Tr,qb),e(Tr,yl),e(yl,Cb),e(Tr,Pb),e(et,jb),e(et,yr),e(yr,Lb),e(yr,vr),e(vr,Ab),e(yr,Ob),e(et,Nb),e(et,Se),v(wr,Se,null),e(Se,Ib),e(Se,Po),e(Po,Db),e(Po,vl),e(vl,Sb),e(Po,Wb),e(Po,hc),e(hc,Bb),e(Po,Ub),e(Se,Hb),v(Pn,Se,null),e(Se,Qb),e(Se,uc),e(uc,Vb),e(Se,Jb),v($r,Se,null),e(Se,Kb),e(Se,fc),e(fc,Gb),e(Se,Xb),v(Fr,Se,null),h(o,iu,g),h(o,jo,g),e(jo,jn),e(jn,mc),v(Rr,mc,null),e(jo,Yb),e(jo,gc),e(gc,Zb),h(o,lu,g),h(o,tt,g),v(Mr,tt,null),e(tt,ek),e(tt,_c),e(_c,tk),e(tt,ok),e(tt,Er),e(Er,nk),e(Er,wl),e(wl,sk),e(Er,rk),e(tt,ak),e(tt,zr),e(zr,ik),e(zr,xr),e(xr,lk),e(zr,dk),e(tt,ck),e(tt,it),v(qr,it,null),e(it,pk),e(it,Lo),e(Lo,hk),e(Lo,$l),e($l,uk),e(Lo,fk),e(Lo,bc),e(bc,mk),e(Lo,gk),e(it,_k),v(Ln,it,null),e(it,bk),e(it,kc),e(kc,kk),e(it,Tk),v(Cr,it,null),h(o,du,g),h(o,Ao,g),e(Ao,An),e(An,Tc),v(Pr,Tc,null),e(Ao,yk),e(Ao,yc),e(yc,vk),h(o,cu,g),h(o,ot,g),v(jr,ot,null),e(ot,wk),e(ot,vc),e(vc,$k),e(ot,Fk),e(ot,Lr),e(Lr,Rk),e(Lr,Fl),e(Fl,Mk),e(Lr,Ek),e(ot,zk),e(ot,Ar),e(Ar,xk),e(Ar,Or),e(Or,qk),e(Ar,Ck),e(ot,Pk),e(ot,lt),v(Nr,lt,null),e(lt,jk),e(lt,Oo),e(Oo,Lk),e(Oo,Rl),e(Rl,Ak),e(Oo,Ok),e(Oo,wc),e(wc,Nk),e(Oo,Ik),e(lt,Dk),v(On,lt,null),e(lt,Sk),e(lt,$c),e($c,Wk),e(lt,Bk),v(Ir,lt,null),h(o,pu,g),h(o,No,g),e(No,Nn),e(Nn,Fc),v(Dr,Fc,null),e(No,Uk),e(No,Rc),e(Rc,Hk),h(o,hu,g),h(o,nt,g),v(Sr,nt,null),e(nt,Qk),e(nt,Io),e(Io,Vk),e(Io,Mc),e(Mc,Jk),e(Io,Kk),e(Io,Ec),e(Ec,Gk),e(Io,Xk),e(nt,Yk),e(nt,Wr),e(Wr,Zk),e(Wr,Ml),e(Ml,e1),e(Wr,t1),e(nt,o1),e(nt,Br),e(Br,n1),e(Br,Ur),e(Ur,s1),e(Br,r1),e(nt,a1),e(nt,dt),v(Hr,dt,null),e(dt,i1),e(dt,Do),e(Do,l1),e(Do,El),e(El,d1),e(Do,c1),e(Do,zc),e(zc,p1),e(Do,h1),e(dt,u1),v(In,dt,null),e(dt,f1),e(dt,xc),e(xc,m1),e(dt,g1),v(Qr,dt,null),h(o,uu,g),h(o,So,g),e(So,Dn),e(Dn,qc),v(Vr,qc,null),e(So,_1),e(So,Cc),e(Cc,b1),h(o,fu,g),h(o,Ue,g),v(Jr,Ue,null),e(Ue,k1),e(Ue,Pc),e(Pc,T1),e(Ue,y1),e(Ue,Kr),e(Kr,v1),e(Kr,zl),e(zl,w1),e(Kr,$1),e(Ue,F1),e(Ue,Gr),e(Gr,R1),e(Gr,Xr),e(Xr,M1),e(Gr,E1),e(Ue,z1),v(Sn,Ue,null),e(Ue,x1),e(Ue,ct),v(Yr,ct,null),e(ct,q1),e(ct,Wo),e(Wo,C1),e(Wo,xl),e(xl,P1),e(Wo,j1),e(Wo,jc),e(jc,L1),e(Wo,A1),e(ct,O1),v(Wn,ct,null),e(ct,N1),e(ct,Lc),e(Lc,I1),e(ct,D1),v(Zr,ct,null),h(o,mu,g),h(o,Bo,g),e(Bo,Bn),e(Bn,Ac),v(ea,Ac,null),e(Bo,S1),e(Bo,Oc),e(Oc,W1),h(o,gu,g),h(o,Uo,g),v(ta,Uo,null),e(Uo,B1),e(Uo,pt),v(oa,pt,null),e(pt,U1),e(pt,Ho),e(Ho,H1),e(Ho,ql),e(ql,Q1),e(Ho,V1),e(Ho,Nc),e(Nc,J1),e(Ho,K1),e(pt,G1),v(Un,pt,null),e(pt,X1),e(pt,Ic),e(Ic,Y1),e(pt,Z1),v(na,pt,null),h(o,_u,g),h(o,Qo,g),e(Qo,Hn),e(Hn,Dc),v(sa,Dc,null),e(Qo,eT),e(Qo,Sc),e(Sc,tT),h(o,bu,g),h(o,He,g),v(ra,He,null),e(He,oT),e(He,aa),e(aa,nT),e(aa,Wc),e(Wc,sT),e(aa,rT),e(He,aT),e(He,ia),e(ia,iT),e(ia,Cl),e(Cl,lT),e(ia,dT),e(He,cT),e(He,la),e(la,pT),e(la,da),e(da,hT),e(la,uT),e(He,fT),v(Qn,He,null),e(He,mT),e(He,ht),v(ca,ht,null),e(ht,gT),e(ht,Vo),e(Vo,_T),e(Vo,Pl),e(Pl,bT),e(Vo,kT),e(Vo,Bc),e(Bc,TT),e(Vo,yT),e(ht,vT),v(Vn,ht,null),e(ht,wT),e(ht,Uc),e(Uc,$T),e(ht,FT),v(pa,ht,null),h(o,ku,g),h(o,Jo,g),e(Jo,Jn),e(Jn,Hc),v(ha,Hc,null),e(Jo,RT),e(Jo,Qc),e(Qc,MT),h(o,Tu,g),h(o,Qe,g),v(ua,Qe,null),e(Qe,ET),e(Qe,Vc),e(Vc,zT),e(Qe,xT),e(Qe,fa),e(fa,qT),e(fa,jl),e(jl,CT),e(fa,PT),e(Qe,jT),e(Qe,ma),e(ma,LT),e(ma,ga),e(ga,AT),e(ma,OT),e(Qe,NT),v(Kn,Qe,null),e(Qe,IT),e(Qe,ut),v(_a,ut,null),e(ut,DT),e(ut,Ko),e(Ko,ST),e(Ko,Ll),e(Ll,WT),e(Ko,BT),e(Ko,Jc),e(Jc,UT),e(Ko,HT),e(ut,QT),v(Gn,ut,null),e(ut,VT),e(ut,Kc),e(Kc,JT),e(ut,KT),v(ba,ut,null),h(o,yu,g),h(o,Go,g),e(Go,Xn),e(Xn,Gc),v(ka,Gc,null),e(Go,GT),e(Go,Xc),e(Xc,XT),h(o,vu,g),h(o,Ve,g),v(Ta,Ve,null),e(Ve,YT),e(Ve,Yc),e(Yc,ZT),e(Ve,ey),e(Ve,ya),e(ya,ty),e(ya,Al),e(Al,oy),e(ya,ny),e(Ve,sy),e(Ve,va),e(va,ry),e(va,wa),e(wa,ay),e(va,iy),e(Ve,ly),v(Yn,Ve,null),e(Ve,dy),e(Ve,ft),v($a,ft,null),e(ft,cy),e(ft,Xo),e(Xo,py),e(Xo,Ol),e(Ol,hy),e(Xo,uy),e(Xo,Zc),e(Zc,fy),e(Xo,my),e(ft,gy),v(Zn,ft,null),e(ft,_y),e(ft,ep),e(ep,by),e(ft,ky),v(Fa,ft,null),h(o,wu,g),h(o,Yo,g),e(Yo,es),e(es,tp),v(Ra,tp,null),e(Yo,Ty),e(Yo,op),e(op,yy),h(o,$u,g),h(o,Je,g),v(Ma,Je,null),e(Je,vy),e(Je,np),e(np,wy),e(Je,$y),e(Je,Ea),e(Ea,Fy),e(Ea,Nl),e(Nl,Ry),e(Ea,My),e(Je,Ey),e(Je,za),e(za,zy),e(za,xa),e(xa,xy),e(za,qy),e(Je,Cy),v(ts,Je,null),e(Je,Py),e(Je,mt),v(qa,mt,null),e(mt,jy),e(mt,Zo),e(Zo,Ly),e(Zo,Il),e(Il,Ay),e(Zo,Oy),e(Zo,sp),e(sp,Ny),e(Zo,Iy),e(mt,Dy),v(os,mt,null),e(mt,Sy),e(mt,rp),e(rp,Wy),e(mt,By),v(Ca,mt,null),h(o,Fu,g),h(o,en,g),e(en,ns),e(ns,ap),v(Pa,ap,null),e(en,Uy),e(en,ip),e(ip,Hy),h(o,Ru,g),h(o,Ke,g),v(ja,Ke,null),e(Ke,Qy),e(Ke,tn),e(tn,Vy),e(tn,lp),e(lp,Jy),e(tn,Ky),e(tn,dp),e(dp,Gy),e(tn,Xy),e(Ke,Yy),e(Ke,La),e(La,Zy),e(La,Dl),e(Dl,ev),e(La,tv),e(Ke,ov),e(Ke,Aa),e(Aa,nv),e(Aa,Oa),e(Oa,sv),e(Aa,rv),e(Ke,av),v(ss,Ke,null),e(Ke,iv),e(Ke,gt),v(Na,gt,null),e(gt,lv),e(gt,on),e(on,dv),e(on,Sl),e(Sl,cv),e(on,pv),e(on,cp),e(cp,hv),e(on,uv),e(gt,fv),v(rs,gt,null),e(gt,mv),e(gt,pp),e(pp,gv),e(gt,_v),v(Ia,gt,null),h(o,Mu,g),h(o,nn,g),e(nn,as),e(as,hp),v(Da,hp,null),e(nn,bv),e(nn,up),e(up,kv),h(o,Eu,g),h(o,Le,g),v(Sa,Le,null),e(Le,Tv),e(Le,fp),e(fp,yv),e(Le,vv),e(Le,Wa),e(Wa,wv),e(Wa,Wl),e(Wl,$v),e(Wa,Fv),e(Le,Rv),e(Le,Ba),e(Ba,Mv),e(Ba,Ua),e(Ua,Ev),e(Ba,zv),e(Le,xv),e(Le,mp),e(mp,qv),e(Le,Cv),e(Le,Nt),e(Nt,gp),e(gp,Ha),e(Ha,Pv),e(Nt,jv),e(Nt,_p),e(_p,Qa),e(Qa,Lv),e(Nt,Av),e(Nt,bp),e(bp,Va),e(Va,Ov),e(Nt,Nv),e(Nt,kp),e(kp,Ja),e(Ja,Iv),e(Le,Dv),e(Le,_t),v(Ka,_t,null),e(_t,Sv),e(_t,sn),e(sn,Wv),e(sn,Tp),e(Tp,Bv),e(sn,Uv),e(sn,yp),e(yp,Hv),e(sn,Qv),e(_t,Vv),v(is,_t,null),e(_t,Jv),e(_t,vp),e(vp,Kv),e(_t,Gv),v(Ga,_t,null),h(o,zu,g),h(o,rn,g),e(rn,ls),e(ls,wp),v(Xa,wp,null),e(rn,Xv),e(rn,$p),e($p,Yv),h(o,xu,g),h(o,Ae,g),v(Ya,Ae,null),e(Ae,Zv),e(Ae,Za),e(Za,ew),e(Za,Fp),e(Fp,tw),e(Za,ow),e(Ae,nw),e(Ae,ei),e(ei,sw),e(ei,Bl),e(Bl,rw),e(ei,aw),e(Ae,iw),e(Ae,ti),e(ti,lw),e(ti,oi),e(oi,dw),e(ti,cw),e(Ae,pw),e(Ae,Rp),e(Rp,hw),e(Ae,uw),e(Ae,It),e(It,Mp),e(Mp,ni),e(ni,fw),e(It,mw),e(It,Ep),e(Ep,si),e(si,gw),e(It,_w),e(It,zp),e(zp,ri),e(ri,bw),e(It,kw),e(It,xp),e(xp,ai),e(ai,Tw),e(Ae,yw),e(Ae,bt),v(ii,bt,null),e(bt,vw),e(bt,an),e(an,ww),e(an,qp),e(qp,$w),e(an,Fw),e(an,Cp),e(Cp,Rw),e(an,Mw),e(bt,Ew),v(ds,bt,null),e(bt,zw),e(bt,Pp),e(Pp,xw),e(bt,qw),v(li,bt,null),h(o,qu,g),h(o,ln,g),e(ln,cs),e(cs,jp),v(di,jp,null),e(ln,Cw),e(ln,Lp),e(Lp,Pw),h(o,Cu,g),h(o,Oe,g),v(ci,Oe,null),e(Oe,jw),e(Oe,Ap),e(Ap,Lw),e(Oe,Aw),e(Oe,pi),e(pi,Ow),e(pi,Ul),e(Ul,Nw),e(pi,Iw),e(Oe,Dw),e(Oe,hi),e(hi,Sw),e(hi,ui),e(ui,Ww),e(hi,Bw),e(Oe,Uw),e(Oe,Op),e(Op,Hw),e(Oe,Qw),e(Oe,Dt),e(Dt,Np),e(Np,fi),e(fi,Vw),e(Dt,Jw),e(Dt,Ip),e(Ip,mi),e(mi,Kw),e(Dt,Gw),e(Dt,Dp),e(Dp,gi),e(gi,Xw),e(Dt,Yw),e(Dt,Sp),e(Sp,_i),e(_i,Zw),e(Oe,e$),e(Oe,kt),v(bi,kt,null),e(kt,t$),e(kt,dn),e(dn,o$),e(dn,Wp),e(Wp,n$),e(dn,s$),e(dn,Bp),e(Bp,r$),e(dn,a$),e(kt,i$),v(ps,kt,null),e(kt,l$),e(kt,Up),e(Up,d$),e(kt,c$),v(ki,kt,null),h(o,Pu,g),h(o,cn,g),e(cn,hs),e(hs,Hp),v(Ti,Hp,null),e(cn,p$),e(cn,Qp),e(Qp,h$),h(o,ju,g),h(o,Ne,g),v(yi,Ne,null),e(Ne,u$),e(Ne,Vp),e(Vp,f$),e(Ne,m$),e(Ne,vi),e(vi,g$),e(vi,Hl),e(Hl,_$),e(vi,b$),e(Ne,k$),e(Ne,wi),e(wi,T$),e(wi,$i),e($i,y$),e(wi,v$),e(Ne,w$),e(Ne,Jp),e(Jp,$$),e(Ne,F$),e(Ne,St),e(St,Kp),e(Kp,Fi),e(Fi,R$),e(St,M$),e(St,Gp),e(Gp,Ri),e(Ri,E$),e(St,z$),e(St,Xp),e(Xp,Mi),e(Mi,x$),e(St,q$),e(St,Yp),e(Yp,Ei),e(Ei,C$),e(Ne,P$),e(Ne,Tt),v(zi,Tt,null),e(Tt,j$),e(Tt,pn),e(pn,L$),e(pn,Zp),e(Zp,A$),e(pn,O$),e(pn,eh),e(eh,N$),e(pn,I$),e(Tt,D$),v(us,Tt,null),e(Tt,S$),e(Tt,th),e(th,W$),e(Tt,B$),v(xi,Tt,null),h(o,Lu,g),h(o,hn,g),e(hn,fs),e(fs,oh),v(qi,oh,null),e(hn,U$),e(hn,nh),e(nh,H$),h(o,Au,g),h(o,Ie,g),v(Ci,Ie,null),e(Ie,Q$),e(Ie,sh),e(sh,V$),e(Ie,J$),e(Ie,Pi),e(Pi,K$),e(Pi,Ql),e(Ql,G$),e(Pi,X$),e(Ie,Y$),e(Ie,ji),e(ji,Z$),e(ji,Li),e(Li,eF),e(ji,tF),e(Ie,oF),e(Ie,rh),e(rh,nF),e(Ie,sF),e(Ie,Wt),e(Wt,ah),e(ah,Ai),e(Ai,rF),e(Wt,aF),e(Wt,ih),e(ih,Oi),e(Oi,iF),e(Wt,lF),e(Wt,lh),e(lh,Ni),e(Ni,dF),e(Wt,cF),e(Wt,dh),e(dh,Ii),e(Ii,pF),e(Ie,hF),e(Ie,yt),v(Di,yt,null),e(yt,uF),e(yt,un),e(un,fF),e(un,ch),e(ch,mF),e(un,gF),e(un,ph),e(ph,_F),e(un,bF),e(yt,kF),v(ms,yt,null),e(yt,TF),e(yt,hh),e(hh,yF),e(yt,vF),v(Si,yt,null),h(o,Ou,g),h(o,fn,g),e(fn,gs),e(gs,uh),v(Wi,uh,null),e(fn,wF),e(fn,fh),e(fh,$F),h(o,Nu,g),h(o,De,g),v(Bi,De,null),e(De,FF),e(De,mn),e(mn,RF),e(mn,mh),e(mh,MF),e(mn,EF),e(mn,gh),e(gh,zF),e(mn,xF),e(De,qF),e(De,Ui),e(Ui,CF),e(Ui,Vl),e(Vl,PF),e(Ui,jF),e(De,LF),e(De,Hi),e(Hi,AF),e(Hi,Qi),e(Qi,OF),e(Hi,NF),e(De,IF),e(De,_h),e(_h,DF),e(De,SF),e(De,Bt),e(Bt,bh),e(bh,Vi),e(Vi,WF),e(Bt,BF),e(Bt,kh),e(kh,Ji),e(Ji,UF),e(Bt,HF),e(Bt,Th),e(Th,Ki),e(Ki,QF),e(Bt,VF),e(Bt,yh),e(yh,Gi),e(Gi,JF),e(De,KF),e(De,vt),v(Xi,vt,null),e(vt,GF),e(vt,gn),e(gn,XF),e(gn,vh),e(vh,YF),e(gn,ZF),e(gn,wh),e(wh,e2),e(gn,t2),e(vt,o2),v(_s,vt,null),e(vt,n2),e(vt,$h),e($h,s2),e(vt,r2),v(Yi,vt,null),Iu=!0},p(o,[g]){const Zi={};g&2&&(Zi.$$scope={dirty:g,ctx:o}),Tn.$set(Zi);const Fh={};g&2&&(Fh.$$scope={dirty:g,ctx:o}),$n.$set(Fh);const Rh={};g&2&&(Rh.$$scope={dirty:g,ctx:o}),Mn.$set(Rh);const Mh={};g&2&&(Mh.$$scope={dirty:g,ctx:o}),zn.$set(Mh);const el={};g&2&&(el.$$scope={dirty:g,ctx:o}),qn.$set(el);const Eh={};g&2&&(Eh.$$scope={dirty:g,ctx:o}),Pn.$set(Eh);const zh={};g&2&&(zh.$$scope={dirty:g,ctx:o}),Ln.$set(zh);const xh={};g&2&&(xh.$$scope={dirty:g,ctx:o}),On.$set(xh);const tl={};g&2&&(tl.$$scope={dirty:g,ctx:o}),In.$set(tl);const qh={};g&2&&(qh.$$scope={dirty:g,ctx:o}),Sn.$set(qh);const Ch={};g&2&&(Ch.$$scope={dirty:g,ctx:o}),Wn.$set(Ch);const Ph={};g&2&&(Ph.$$scope={dirty:g,ctx:o}),Un.$set(Ph);const jh={};g&2&&(jh.$$scope={dirty:g,ctx:o}),Qn.$set(jh);const Lh={};g&2&&(Lh.$$scope={dirty:g,ctx:o}),Vn.$set(Lh);const Ah={};g&2&&(Ah.$$scope={dirty:g,ctx:o}),Kn.$set(Ah);const Ut={};g&2&&(Ut.$$scope={dirty:g,ctx:o}),Gn.$set(Ut);const ol={};g&2&&(ol.$$scope={dirty:g,ctx:o}),Yn.$set(ol);const Oh={};g&2&&(Oh.$$scope={dirty:g,ctx:o}),Zn.$set(Oh);const Nh={};g&2&&(Nh.$$scope={dirty:g,ctx:o}),ts.$set(Nh);const Ht={};g&2&&(Ht.$$scope={dirty:g,ctx:o}),os.$set(Ht);const Ih={};g&2&&(Ih.$$scope={dirty:g,ctx:o}),ss.$set(Ih);const Dh={};g&2&&(Dh.$$scope={dirty:g,ctx:o}),rs.$set(Dh);const Sh={};g&2&&(Sh.$$scope={dirty:g,ctx:o}),is.$set(Sh);const Jl={};g&2&&(Jl.$$scope={dirty:g,ctx:o}),ds.$set(Jl);const Wh={};g&2&&(Wh.$$scope={dirty:g,ctx:o}),ps.$set(Wh);const _n={};g&2&&(_n.$$scope={dirty:g,ctx:o}),us.$set(_n);const Bh={};g&2&&(Bh.$$scope={dirty:g,ctx:o}),ms.$set(Bh);const Uh={};g&2&&(Uh.$$scope={dirty:g,ctx:o}),_s.$set(Uh)},i(o){Iu||(w(_.$$.fragment,o),w(te.$$.fragment,o),w(Rs.$$.fragment,o),w(Ms.$$.fragment,o),w(Es.$$.fragment,o),w(zs.$$.fragment,o),w(xs.$$.fragment,o),w(qs.$$.fragment,o),w(Tn.$$.fragment,o),w(js.$$.fragment,o),w(As.$$.fragment,o),w(Ns.$$.fragment,o),w(Is.$$.fragment,o),w(Ds.$$.fragment,o),w(Ss.$$.fragment,o),w(Bs.$$.fragment,o),w($n.$$.fragment,o),w(Qs.$$.fragment,o),w(Vs.$$.fragment,o),w(Js.$$.fragment,o),w(er.$$.fragment,o),w(Mn.$$.fragment,o),w(tr.$$.fragment,o),w(or.$$.fragment,o),w(nr.$$.fragment,o),w(lr.$$.fragment,o),w(zn.$$.fragment,o),w(dr.$$.fragment,o),w(cr.$$.fragment,o),w(pr.$$.fragment,o),w(gr.$$.fragment,o),w(qn.$$.fragment,o),w(_r.$$.fragment,o),w(br.$$.fragment,o),w(kr.$$.fragment,o),w(wr.$$.fragment,o),w(Pn.$$.fragment,o),w($r.$$.fragment,o),w(Fr.$$.fragment,o),w(Rr.$$.fragment,o),w(Mr.$$.fragment,o),w(qr.$$.fragment,o),w(Ln.$$.fragment,o),w(Cr.$$.fragment,o),w(Pr.$$.fragment,o),w(jr.$$.fragment,o),w(Nr.$$.fragment,o),w(On.$$.fragment,o),w(Ir.$$.fragment,o),w(Dr.$$.fragment,o),w(Sr.$$.fragment,o),w(Hr.$$.fragment,o),w(In.$$.fragment,o),w(Qr.$$.fragment,o),w(Vr.$$.fragment,o),w(Jr.$$.fragment,o),w(Sn.$$.fragment,o),w(Yr.$$.fragment,o),w(Wn.$$.fragment,o),w(Zr.$$.fragment,o),w(ea.$$.fragment,o),w(ta.$$.fragment,o),w(oa.$$.fragment,o),w(Un.$$.fragment,o),w(na.$$.fragment,o),w(sa.$$.fragment,o),w(ra.$$.fragment,o),w(Qn.$$.fragment,o),w(ca.$$.fragment,o),w(Vn.$$.fragment,o),w(pa.$$.fragment,o),w(ha.$$.fragment,o),w(ua.$$.fragment,o),w(Kn.$$.fragment,o),w(_a.$$.fragment,o),w(Gn.$$.fragment,o),w(ba.$$.fragment,o),w(ka.$$.fragment,o),w(Ta.$$.fragment,o),w(Yn.$$.fragment,o),w($a.$$.fragment,o),w(Zn.$$.fragment,o),w(Fa.$$.fragment,o),w(Ra.$$.fragment,o),w(Ma.$$.fragment,o),w(ts.$$.fragment,o),w(qa.$$.fragment,o),w(os.$$.fragment,o),w(Ca.$$.fragment,o),w(Pa.$$.fragment,o),w(ja.$$.fragment,o),w(ss.$$.fragment,o),w(Na.$$.fragment,o),w(rs.$$.fragment,o),w(Ia.$$.fragment,o),w(Da.$$.fragment,o),w(Sa.$$.fragment,o),w(Ka.$$.fragment,o),w(is.$$.fragment,o),w(Ga.$$.fragment,o),w(Xa.$$.fragment,o),w(Ya.$$.fragment,o),w(ii.$$.fragment,o),w(ds.$$.fragment,o),w(li.$$.fragment,o),w(di.$$.fragment,o),w(ci.$$.fragment,o),w(bi.$$.fragment,o),w(ps.$$.fragment,o),w(ki.$$.fragment,o),w(Ti.$$.fragment,o),w(yi.$$.fragment,o),w(zi.$$.fragment,o),w(us.$$.fragment,o),w(xi.$$.fragment,o),w(qi.$$.fragment,o),w(Ci.$$.fragment,o),w(Di.$$.fragment,o),w(ms.$$.fragment,o),w(Si.$$.fragment,o),w(Wi.$$.fragment,o),w(Bi.$$.fragment,o),w(Xi.$$.fragment,o),w(_s.$$.fragment,o),w(Yi.$$.fragment,o),Iu=!0)},o(o){$(_.$$.fragment,o),$(te.$$.fragment,o),$(Rs.$$.fragment,o),$(Ms.$$.fragment,o),$(Es.$$.fragment,o),$(zs.$$.fragment,o),$(xs.$$.fragment,o),$(qs.$$.fragment,o),$(Tn.$$.fragment,o),$(js.$$.fragment,o),$(As.$$.fragment,o),$(Ns.$$.fragment,o),$(Is.$$.fragment,o),$(Ds.$$.fragment,o),$(Ss.$$.fragment,o),$(Bs.$$.fragment,o),$($n.$$.fragment,o),$(Qs.$$.fragment,o),$(Vs.$$.fragment,o),$(Js.$$.fragment,o),$(er.$$.fragment,o),$(Mn.$$.fragment,o),$(tr.$$.fragment,o),$(or.$$.fragment,o),$(nr.$$.fragment,o),$(lr.$$.fragment,o),$(zn.$$.fragment,o),$(dr.$$.fragment,o),$(cr.$$.fragment,o),$(pr.$$.fragment,o),$(gr.$$.fragment,o),$(qn.$$.fragment,o),$(_r.$$.fragment,o),$(br.$$.fragment,o),$(kr.$$.fragment,o),$(wr.$$.fragment,o),$(Pn.$$.fragment,o),$($r.$$.fragment,o),$(Fr.$$.fragment,o),$(Rr.$$.fragment,o),$(Mr.$$.fragment,o),$(qr.$$.fragment,o),$(Ln.$$.fragment,o),$(Cr.$$.fragment,o),$(Pr.$$.fragment,o),$(jr.$$.fragment,o),$(Nr.$$.fragment,o),$(On.$$.fragment,o),$(Ir.$$.fragment,o),$(Dr.$$.fragment,o),$(Sr.$$.fragment,o),$(Hr.$$.fragment,o),$(In.$$.fragment,o),$(Qr.$$.fragment,o),$(Vr.$$.fragment,o),$(Jr.$$.fragment,o),$(Sn.$$.fragment,o),$(Yr.$$.fragment,o),$(Wn.$$.fragment,o),$(Zr.$$.fragment,o),$(ea.$$.fragment,o),$(ta.$$.fragment,o),$(oa.$$.fragment,o),$(Un.$$.fragment,o),$(na.$$.fragment,o),$(sa.$$.fragment,o),$(ra.$$.fragment,o),$(Qn.$$.fragment,o),$(ca.$$.fragment,o),$(Vn.$$.fragment,o),$(pa.$$.fragment,o),$(ha.$$.fragment,o),$(ua.$$.fragment,o),$(Kn.$$.fragment,o),$(_a.$$.fragment,o),$(Gn.$$.fragment,o),$(ba.$$.fragment,o),$(ka.$$.fragment,o),$(Ta.$$.fragment,o),$(Yn.$$.fragment,o),$($a.$$.fragment,o),$(Zn.$$.fragment,o),$(Fa.$$.fragment,o),$(Ra.$$.fragment,o),$(Ma.$$.fragment,o),$(ts.$$.fragment,o),$(qa.$$.fragment,o),$(os.$$.fragment,o),$(Ca.$$.fragment,o),$(Pa.$$.fragment,o),$(ja.$$.fragment,o),$(ss.$$.fragment,o),$(Na.$$.fragment,o),$(rs.$$.fragment,o),$(Ia.$$.fragment,o),$(Da.$$.fragment,o),$(Sa.$$.fragment,o),$(Ka.$$.fragment,o),$(is.$$.fragment,o),$(Ga.$$.fragment,o),$(Xa.$$.fragment,o),$(Ya.$$.fragment,o),$(ii.$$.fragment,o),$(ds.$$.fragment,o),$(li.$$.fragment,o),$(di.$$.fragment,o),$(ci.$$.fragment,o),$(bi.$$.fragment,o),$(ps.$$.fragment,o),$(ki.$$.fragment,o),$(Ti.$$.fragment,o),$(yi.$$.fragment,o),$(zi.$$.fragment,o),$(us.$$.fragment,o),$(xi.$$.fragment,o),$(qi.$$.fragment,o),$(Ci.$$.fragment,o),$(Di.$$.fragment,o),$(ms.$$.fragment,o),$(Si.$$.fragment,o),$(Wi.$$.fragment,o),$(Bi.$$.fragment,o),$(Xi.$$.fragment,o),$(_s.$$.fragment,o),$(Yi.$$.fragment,o),Iu=!1},d(o){t(p),o&&t(R),o&&t(m),F(_),o&&t(I),o&&t(z),F(te),o&&t(pe),o&&t(K),o&&t(P),o&&t(se),o&&t(he),o&&t(re),o&&t(ue),o&&t(ae),o&&t(W),o&&t(ie),o&&t(B),o&&t(G),o&&t(Qh),o&&t(Qt),o&&t(Vh),o&&t(To),F(Rs),o&&t(Jh),o&&t(We),F(Ms),F(Es),o&&t(Kh),o&&t($o),F(zs),o&&t(Gh),o&&t(Te),F(xs),F(qs),F(Tn),F(js),F(As),F(Ns),F(Is),o&&t(Xh),o&&t(Fo),F(Ds),o&&t(Yh),o&&t(Pe),F(Ss),F(Bs),F($n),F(Qs),o&&t(Zh),o&&t(Ro),F(Vs),o&&t(eu),o&&t(je),F(Js),F(er),F(Mn),F(tr),o&&t(tu),o&&t(Eo),F(or),o&&t(ou),o&&t(Ye),F(nr),F(lr),F(zn),F(dr),o&&t(nu),o&&t(xo),F(cr),o&&t(su),o&&t(Ze),F(pr),F(gr),F(qn),F(_r),o&&t(ru),o&&t(Co),F(br),o&&t(au),o&&t(et),F(kr),F(wr),F(Pn),F($r),F(Fr),o&&t(iu),o&&t(jo),F(Rr),o&&t(lu),o&&t(tt),F(Mr),F(qr),F(Ln),F(Cr),o&&t(du),o&&t(Ao),F(Pr),o&&t(cu),o&&t(ot),F(jr),F(Nr),F(On),F(Ir),o&&t(pu),o&&t(No),F(Dr),o&&t(hu),o&&t(nt),F(Sr),F(Hr),F(In),F(Qr),o&&t(uu),o&&t(So),F(Vr),o&&t(fu),o&&t(Ue),F(Jr),F(Sn),F(Yr),F(Wn),F(Zr),o&&t(mu),o&&t(Bo),F(ea),o&&t(gu),o&&t(Uo),F(ta),F(oa),F(Un),F(na),o&&t(_u),o&&t(Qo),F(sa),o&&t(bu),o&&t(He),F(ra),F(Qn),F(ca),F(Vn),F(pa),o&&t(ku),o&&t(Jo),F(ha),o&&t(Tu),o&&t(Qe),F(ua),F(Kn),F(_a),F(Gn),F(ba),o&&t(yu),o&&t(Go),F(ka),o&&t(vu),o&&t(Ve),F(Ta),F(Yn),F($a),F(Zn),F(Fa),o&&t(wu),o&&t(Yo),F(Ra),o&&t($u),o&&t(Je),F(Ma),F(ts),F(qa),F(os),F(Ca),o&&t(Fu),o&&t(en),F(Pa),o&&t(Ru),o&&t(Ke),F(ja),F(ss),F(Na),F(rs),F(Ia),o&&t(Mu),o&&t(nn),F(Da),o&&t(Eu),o&&t(Le),F(Sa),F(Ka),F(is),F(Ga),o&&t(zu),o&&t(rn),F(Xa),o&&t(xu),o&&t(Ae),F(Ya),F(ii),F(ds),F(li),o&&t(qu),o&&t(ln),F(di),o&&t(Cu),o&&t(Oe),F(ci),F(bi),F(ps),F(ki),o&&t(Pu),o&&t(cn),F(Ti),o&&t(ju),o&&t(Ne),F(yi),F(zi),F(us),F(xi),o&&t(Lu),o&&t(hn),F(qi),o&&t(Au),o&&t(Ie),F(Ci),F(Di),F(ms),F(Si),o&&t(Ou),o&&t(fn),F(Wi),o&&t(Nu),o&&t(De),F(Bi),F(Xi),F(_s),F(Yi)}}}const QE={local:"roberta",sections:[{local:"overview",title:"Overview"},{local:"transformers.RobertaConfig",title:"RobertaConfig"},{local:"transformers.RobertaTokenizer",title:"RobertaTokenizer"},{local:"transformers.RobertaTokenizerFast",title:"RobertaTokenizerFast"},{local:"transformers.RobertaModel",title:"RobertaModel"},{local:"transformers.RobertaForCausalLM",title:"RobertaForCausalLM"},{local:"transformers.RobertaForMaskedLM",title:"RobertaForMaskedLM"},{local:"transformers.RobertaForSequenceClassification",title:"RobertaForSequenceClassification"},{local:"transformers.RobertaForMultipleChoice",title:"RobertaForMultipleChoice"},{local:"transformers.RobertaForTokenClassification",title:"RobertaForTokenClassification"},{local:"transformers.RobertaForQuestionAnswering",title:"RobertaForQuestionAnswering"},{local:"transformers.TFRobertaModel",title:"TFRobertaModel"},{local:"transformers.TFRobertaForCausalLM",title:"TFRobertaForCausalLM"},{local:"transformers.TFRobertaForMaskedLM",title:"TFRobertaForMaskedLM"},{local:"transformers.TFRobertaForSequenceClassification",title:"TFRobertaForSequenceClassification"},{local:"transformers.TFRobertaForMultipleChoice",title:"TFRobertaForMultipleChoice"},{local:"transformers.TFRobertaForTokenClassification",title:"TFRobertaForTokenClassification"},{local:"transformers.TFRobertaForQuestionAnswering",title:"TFRobertaForQuestionAnswering"},{local:"transformers.FlaxRobertaModel",title:"FlaxRobertaModel"},{local:"transformers.FlaxRobertaForMaskedLM",title:"FlaxRobertaForMaskedLM"},{local:"transformers.FlaxRobertaForSequenceClassification",title:"FlaxRobertaForSequenceClassification"},{local:"transformers.FlaxRobertaForMultipleChoice",title:"FlaxRobertaForMultipleChoice"},{local:"transformers.FlaxRobertaForTokenClassification",title:"FlaxRobertaForTokenClassification"},{local:"transformers.FlaxRobertaForQuestionAnswering",title:"FlaxRobertaForQuestionAnswering"}],title:"RoBERTa"};function VE(q,p,R){let{fw:m}=p;return q.$$set=b=>{"fw"in b&&R(0,m=b.fw)},[m]}class ez extends uE{constructor(p){super();fE(this,p,VE,HE,mE,{fw:0})}}export{ez as default,QE as metadata};
292
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/data2vec.mdx-72db0e89.js
import{S as H2,i as R2,s as Q2,e as s,k as l,w as _,t as a,M as G2,c as r,d as o,m as c,a as i,x as v,h as n,b as d,F as e,g as f,y as b,q as T,o as k,B as w}from"../../chunks/vendor-4833417e.js";import{T as ge}from"../../chunks/Tip-fffd6df1.js";import{D as A}from"../../chunks/Docstring-4f315ed9.js";import{C as N}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as B}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function X2($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function U2($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function J2($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function K2($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function Y2($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function Z2($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function ev($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function tv($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function ov($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function av($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function nv($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function sv($){let p,x,m,y,D;return{c(){p=s("p"),x=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=s("code"),y=a("Module"),D=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){p=r(g,"P",{});var u=i(p);x=n(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(u,"CODE",{});var V=i(m);y=n(V,"Module"),V.forEach(o),D=n(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(o)},m(g,u){f(g,p,u),e(p,x),e(p,m),e(m,y),e(p,D)},d(g){g&&o(p)}}}function rv($){let p,x,m,y,D,g,u,V,xd,Qr,ze,mt,ms,Ht,Dd,fs,Vd,Gr,ft,$d,Rt,Ad,Fd,Xr,hn,Cd,Ur,un,Qt,Md,Gt,qd,jd,Jr,mn,zd,Kr,fn,Xt,Ed,gn,Pd,Sd,Yr,ve,Ld,Ut,Nd,Wd,Jt,Id,Od,Zr,Ee,gt,gs,Kt,Bd,_s,Hd,ei,H,Yt,Rd,_e,Qd,_n,Gd,Xd,vn,Ud,Jd,Zt,Kd,Yd,Zd,Pe,el,bn,tl,ol,Tn,al,nl,sl,vs,rl,il,eo,ti,Se,_t,bs,to,dl,Ts,ll,oi,R,oo,cl,Le,pl,kn,hl,ul,ao,ml,fl,gl,Ne,_l,wn,vl,bl,yn,Tl,kl,wl,ks,yl,xl,no,ai,We,vt,ws,so,Dl,ys,Vl,ni,Q,ro,$l,io,Al,lo,Fl,Cl,Ml,co,ql,xn,jl,zl,El,po,Pl,ho,Sl,Ll,Nl,J,uo,Wl,Ie,Il,Dn,Ol,Bl,xs,Hl,Rl,Ql,bt,Gl,Ds,Xl,Ul,mo,si,Oe,Tt,Vs,fo,Jl,$s,Kl,ri,M,go,Yl,As,Zl,ec,_o,tc,vo,oc,ac,nc,bo,sc,Vn,rc,ic,dc,To,lc,ko,cc,pc,hc,K,wo,uc,Be,mc,$n,fc,gc,Fs,_c,vc,bc,kt,Tc,Cs,kc,wc,yo,ii,He,wt,Ms,xo,yc,qs,xc,di,G,Do,Dc,Re,Vc,js,$c,Ac,Vo,Fc,Cc,Mc,$o,qc,An,jc,zc,Ec,Ao,Pc,Fo,Sc,Lc,Nc,W,Co,Wc,Qe,Ic,Fn,Oc,Bc,zs,Hc,Rc,Qc,yt,Gc,Es,Xc,Uc,Mo,Jc,qo,li,Ge,xt,Ps,jo,Kc,Ss,Yc,ci,q,zo,Zc,Ls,ep,tp,Eo,op,Po,ap,np,sp,So,rp,Cn,ip,dp,lp,Lo,cp,No,pp,hp,up,I,Wo,mp,Xe,fp,Mn,gp,_p,Ns,vp,bp,Tp,Dt,kp,Ws,wp,yp,Io,xp,Oo,pi,Ue,Vt,Is,Bo,Dp,Os,Vp,hi,j,Ho,$p,Bs,Ap,Fp,Ro,Cp,Qo,Mp,qp,jp,Go,zp,qn,Ep,Pp,Sp,Xo,Lp,Uo,Np,Wp,Ip,Y,Jo,Op,Je,Bp,jn,Hp,Rp,Hs,Qp,Gp,Xp,$t,Up,Rs,Jp,Kp,Ko,ui,Ke,At,Qs,Yo,Yp,Gs,Zp,mi,F,Zo,eh,ea,th,ta,oh,ah,nh,oa,sh,zn,rh,ih,dh,aa,lh,na,ch,ph,hh,sa,uh,Xs,mh,fh,gh,z,_h,Us,vh,bh,Js,Th,kh,Ks,wh,yh,Ys,xh,Dh,Zs,Vh,$h,er,Ah,Fh,Ch,Ft,Mh,tr,qh,jh,ra,zh,Eh,Z,ia,Ph,Ye,Sh,En,Lh,Nh,or,Wh,Ih,Oh,Ct,Bh,ar,Hh,Rh,da,fi,Ze,Mt,nr,la,Qh,sr,Gh,gi,X,ca,Xh,et,Uh,rr,Jh,Kh,pa,Yh,Zh,eu,ha,tu,Pn,ou,au,nu,ua,su,ma,ru,iu,du,ee,fa,lu,tt,cu,Sn,pu,hu,ir,uu,mu,fu,qt,gu,dr,_u,vu,ga,_i,ot,jt,lr,_a,bu,cr,Tu,vi,U,va,ku,at,wu,pr,yu,xu,ba,Du,Vu,$u,Ta,Au,Ln,Fu,Cu,Mu,ka,qu,wa,ju,zu,Eu,te,ya,Pu,nt,Su,Nn,Lu,Nu,hr,Wu,Iu,Ou,zt,Bu,ur,Hu,Ru,xa,bi,st,Et,mr,Da,Qu,fr,Gu,Ti,E,Va,Xu,gr,Uu,Ju,$a,Ku,Aa,Yu,Zu,em,Fa,tm,Wn,om,am,nm,Ca,sm,Ma,rm,im,dm,C,qa,lm,rt,cm,In,pm,hm,_r,um,mm,fm,Pt,gm,vr,_m,vm,ja,bm,br,Tm,km,za,ki,it,St,Tr,Ea,wm,kr,ym,wi,P,Pa,xm,wr,Dm,Vm,Sa,$m,La,Am,Fm,Cm,Na,Mm,On,qm,jm,zm,Wa,Em,Ia,Pm,Sm,Lm,oe,Oa,Nm,dt,Wm,Bn,Im,Om,yr,Bm,Hm,Rm,Lt,Qm,xr,Gm,Xm,Ba,yi,lt,Nt,Dr,Ha,Um,Vr,Jm,xi,S,Ra,Km,$r,Ym,Zm,Qa,ef,Ga,tf,of,af,Xa,nf,Hn,sf,rf,df,Ua,lf,Ja,cf,pf,hf,ae,Ka,uf,ct,mf,Rn,ff,gf,Ar,_f,vf,bf,Wt,Tf,Fr,kf,wf,Ya,Di,pt,It,Cr,Za,yf,Mr,xf,Vi,L,en,Df,ht,Vf,qr,$f,Af,jr,Ff,Cf,Mf,tn,qf,on,jf,zf,Ef,an,Pf,Qn,Sf,Lf,Nf,nn,Wf,sn,If,Of,Bf,ne,rn,Hf,ut,Rf,Gn,Qf,Gf,zr,Xf,Uf,Jf,Ot,Kf,Er,Yf,Zf,dn,$i;return g=new B({}),Ht=new B({}),Kt=new B({}),Yt=new A({props:{name:"class transformers.Data2VecTextConfig",anchor:"transformers.Data2VecTextConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"use_cache",val:" = True"},{name:"classifier_dropout",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/configuration_data2vec_text.py#L31",parametersDescription:[{anchor:"transformers.Data2VecTextConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the DATA2VEC model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <code>Data2VecModel</code>.`,name:"vocab_size"},{anchor:"transformers.Data2VecTextConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.Data2VecTextConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.Data2VecTextConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.Data2VecTextConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.Data2VecTextConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.Data2VecTextConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.Data2VecTextConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.Data2VecTextConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.Data2VecTextConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <code>Data2VecModel</code>.`,name:"type_vocab_size"},{anchor:"transformers.Data2VecTextConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.Data2VecTextConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.Data2VecTextConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"},{anchor:"transformers.Data2VecTextConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if <code>config.is_decoder=True</code>.`,name:"use_cache"},{anchor:"transformers.Data2VecTextConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>) &#x2014; The dropout ratio for the classification head.`,name:"classifier_dropout"}]}}),eo=new N({props:{code:`from transformers import Data2VecTextModel, Data2VecTextConfig # Initializing a Data2VecText facebook/data2vec-text-base style configuration configuration = Data2VecTextConfig() # Initializing a model from the facebook/data2vec-text-base style configuration model = Data2VecTextModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Data2VecTextModel, Data2VecTextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Data2VecText facebook/data2vec-text-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Data2VecTextConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/data2vec-text-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),to=new B({}),oo=new A({props:{name:"class transformers.Data2VecAudioConfig",anchor:"transformers.Data2VecAudioConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"conv_pos_kernel_size",val:" = 19"},{name:"num_conv_pos_embeddings",val:" = 5"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"ctc_loss_reduction",val:" = 'sum'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"tdnn_dim",val:" = (512, 512, 512, 512, 1500)"},{name:"tdnn_kernel",val:" = (5, 3, 3, 1, 1)"},{name:"tdnn_dilation",val:" = (1, 2, 3, 1, 1)"},{name:"xvector_output_dim",val:" = 512"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"add_adapter",val:" = False"},{name:"adapter_kernel_size",val:" = 3"},{name:"adapter_stride",val:" = 2"},{name:"num_adapter_layers",val:" = 3"},{name:"output_hidden_size",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/configuration_data2vec_audio.py#L31",parametersDescription:[{anchor:"transformers.Data2VecAudioConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the Data2VecAudio model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel">Data2VecAudioModel</a> or <code>TFData2VecAudioModel</code>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel">Data2VecAudioModel</a>.`,name:"vocab_size"},{anchor:"transformers.Data2VecAudioConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.Data2VecAudioConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.Data2VecAudioConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.Data2VecAudioConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.Data2VecAudioConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.Data2VecAudioConfig.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.Data2VecAudioConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.Data2VecAudioConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC">Data2VecAudioForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.Data2VecAudioConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.Data2VecAudioConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.Data2VecAudioConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature encoder.`,name:"feat_proj_dropout"},{anchor:"transformers.Data2VecAudioConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.Data2VecAudioConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.Data2VecAudioConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.Data2VecAudioConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.Data2VecAudioConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.Data2VecAudioConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.Data2VecAudioConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.Data2VecAudioConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length\`. Note that overlap may decrease the`,name:"mask_time_prob"},{anchor:"transformers.Data2VecAudioConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.Data2VecAudioConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.Data2VecAudioConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.Data2VecAudioConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.Data2VecAudioConfig.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.Data2VecAudioConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC">Data2VecAudioForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.Data2VecAudioConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC">Data2VecAudioForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.Data2VecAudioConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForSequenceClassification">Data2VecAudioForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.Data2VecAudioConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"},{anchor:"transformers.Data2VecAudioConfig.tdnn_dim",description:`<strong>tdnn_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 1500)</code>) &#x2014; A tuple of integers defining the number of output channels of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dim</em> defines the number of <em>TDNN</em> layers.`,name:"tdnn_dim"},{anchor:"transformers.Data2VecAudioConfig.tdnn_kernel",description:`<strong>tdnn_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 3, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_kernel</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_kernel"},{anchor:"transformers.Data2VecAudioConfig.tdnn_dilation",description:`<strong>tdnn_dilation</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(1, 2, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the dilation factor of each 1D convolutional layer in <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dilation</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_dilation"},{anchor:"transformers.Data2VecAudioConfig.xvector_output_dim",description:`<strong>xvector_output_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the <em>XVector</em> embedding vectors.`,name:"xvector_output_dim"},{anchor:"transformers.Data2VecAudioConfig.add_adapter",description:`<strong>add_adapter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether a convolutional network should be stacked on top of the Data2VecAudio Encoder. Can be very useful for warm-starting Data2VecAudio for SpeechEncoderDecoder models.`,name:"add_adapter"},{anchor:"transformers.Data2VecAudioConfig.adapter_kernel_size",description:`<strong>adapter_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Kernel size of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_kernel_size"},{anchor:"transformers.Data2VecAudioConfig.adapter_stride",description:`<strong>adapter_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Stride of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_stride"},{anchor:"transformers.Data2VecAudioConfig.num_adapter_layers",description:`<strong>num_adapter_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Number of convolutional layers that should be used in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"num_adapter_layers"},{anchor:"transformers.Data2VecAudioConfig.output_hidden_size",description:`<strong>output_hidden_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimensionality of the encoder output layer. If not defined, this defaults to <em>hidden-size</em>. Only relevant if <code>add_adapter is True</code>.`,name:"output_hidden_size"}]}}),no=new N({props:{code:`from transformers import Data2VecAudioModel, Data2VecAudioConfig # Initializing a Data2VecAudio facebook/data2vec-audio-base-960h style configuration configuration = Data2VecAudioConfig() # Initializing a model from the facebook/data2vec-audio-base-960h style configuration model = Data2VecAudioModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Data2VecAudioModel, Data2VecAudioConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Data2VecAudio facebook/data2vec-audio-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Data2VecAudioConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/data2vec-audio-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),so=new B({}),ro=new A({props:{name:"class transformers.Data2VecAudioModel",anchor:"transformers.Data2VecAudioModel",parameters:[{name:"config",val:": Data2VecAudioConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L898",parametersDescription:[{anchor:"transformers.Data2VecAudioModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),uo=new A({props:{name:"forward",anchor:"transformers.Data2VecAudioModel.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L969",parametersDescription:[{anchor:"transformers.Data2VecAudioModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Data2VecAudioModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Data2VecAudioModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecAudioModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecAudioModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioBaseModelOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioBaseModelOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),bt=new ge({props:{$$slots:{default:[X2]},$$scope:{ctx:$}}}),mo=new N({props:{code:`from transformers import Wav2Vec2Processor, Data2VecAudioModel import torch from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h") model = Data2VecAudioModel.from_pretrained("facebook/data2vec-audio-base-960h") # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state list(last_hidden_states.shape)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Data2VecAudioModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioModel.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">292</span>, <span class="hljs-number">768</span>]`}}),fo=new B({}),go=new A({props:{name:"class transformers.Data2VecAudioForAudioFrameClassification",anchor:"transformers.Data2VecAudioForAudioFrameClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1291",parametersDescription:[{anchor:"transformers.Data2VecAudioForAudioFrameClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),wo=new A({props:{name:"forward",anchor:"transformers.Data2VecAudioForAudioFrameClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1334",parametersDescription:[{anchor:"transformers.Data2VecAudioForAudioFrameClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Data2VecAudioForAudioFrameClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Data2VecAudioForAudioFrameClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecAudioForAudioFrameClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecAudioForAudioFrameClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecAudioForAudioFrameClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),kt=new ge({props:{$$slots:{default:[U2]},$$scope:{ctx:$}}}),yo=new N({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Data2VecAudioForAudioFrameClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-data2vec-audio-frame") model = Data2VecAudioForAudioFrameClassification.from_pretrained("hf-internal-testing/tiny-random-data2vec-audio-frame") # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) with torch.no_grad(): logits = model(**inputs).logits probabilities = torch.sigmoid(logits[0]) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (probabilities > 0.5).long() labels[0].tolist()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Data2VecAudioForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-audio-frame&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForAudioFrameClassification.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-audio-frame&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, sampling_rate=sampling_rate) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probabilities = torch.sigmoid(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># labels is a one-hot array of shape (num_frames, num_speakers)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = (probabilities &gt; <span class="hljs-number">0.5</span>).long() <span class="hljs-meta">&gt;&gt;&gt; </span>labels[<span class="hljs-number">0</span>].tolist() [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]`}}),xo=new B({}),Do=new A({props:{name:"class transformers.Data2VecAudioForCTC",anchor:"transformers.Data2VecAudioForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1036",parametersDescription:[{anchor:"transformers.Data2VecAudioForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Co=new A({props:{name:"forward",anchor:"transformers.Data2VecAudioForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1077",parametersDescription:[{anchor:"transformers.Data2VecAudioForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Data2VecAudioForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Data2VecAudioForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecAudioForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecAudioForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecAudioForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),yt=new ge({props:{$$slots:{default:[J2]},$$scope:{ctx:$}}}),Mo=new N({props:{code:`from transformers import Wav2Vec2Processor, Data2VecAudioForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h") model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h") # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) transcription[0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Data2VecAudioForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-audio-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription[<span class="hljs-number">0</span>] <span class="hljs-string">&#x27;MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL&#x27;</span>`}}),qo=new N({props:{code:`with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids # compute loss loss = model(**inputs).loss round(loss.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">66.95</span>`}}),jo=new B({}),zo=new A({props:{name:"class transformers.Data2VecAudioForSequenceClassification",anchor:"transformers.Data2VecAudioForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1167",parametersDescription:[{anchor:"transformers.Data2VecAudioForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Wo=new A({props:{name:"forward",anchor:"transformers.Data2VecAudioForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1212",parametersDescription:[{anchor:"transformers.Data2VecAudioForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Data2VecAudioForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Data2VecAudioForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecAudioForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecAudioForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecAudioForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Dt=new ge({props:{$$slots:{default:[K2]},$$scope:{ctx:$}}}),Io=new N({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Data2VecAudioForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-data2vec-seq-class") model = Data2VecAudioForSequenceClassification.from_pretrained("hf-internal-testing/tiny-random-data2vec-seq-class") # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_ids = torch.argmax(logits, dim=-1).item() predicted_label = model.config.id2label[predicted_class_ids] predicted_label`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Data2VecAudioForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-seq-class&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-seq-class&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label <span class="hljs-string">&#x27;LABEL_1&#x27;</span>`}}),Oo=new N({props:{code:`# compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss round(loss.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">0.69</span>`}}),Bo=new B({}),Ho=new A({props:{name:"class transformers.Data2VecAudioForXVector",anchor:"transformers.Data2VecAudioForXVector",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1449",parametersDescription:[{anchor:"transformers.Data2VecAudioForXVector.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig">Data2VecAudioConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Jo=new A({props:{name:"forward",anchor:"transformers.Data2VecAudioForXVector.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_audio.py#L1511",parametersDescription:[{anchor:"transformers.Data2VecAudioForXVector.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Data2VecAudioForXVector.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/data2vec-audio-base-960h" rel="nofollow">data2vec-audio-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Data2VecAudioForXVector.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecAudioForXVector.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecAudioForXVector.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecAudioForXVector.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.data2vec.modeling_data2vec_audio.XVectorOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioConfig" >Data2VecAudioConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Classification hidden states before AMSoftmax.</p> </li> <li> <p><strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Utterance embeddings used for vector similarity-based retrieval.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.data2vec.modeling_data2vec_audio.XVectorOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),$t=new ge({props:{$$slots:{default:[Y2]},$$scope:{ctx:$}}}),Ko=new N({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Data2VecAudioForXVector from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-data2vec-xvector") model = Data2VecAudioForXVector.from_pretrained("hf-internal-testing/tiny-random-data2vec-xvector") # audio file is decoded on the fly inputs = feature_extractor( [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ) with torch.no_grad(): embeddings = model(**inputs).embeddings embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() # the resulting embeddings can be used for cosine similarity-based retrieval cosine_sim = torch.nn.CosineSimilarity(dim=-1) similarity = cosine_sim(embeddings[0], embeddings[1]) threshold = 0.7 # the optimal threshold is dataset-dependent if similarity < threshold: print("Speakers are not the same!") round(similarity.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Data2VecAudioForXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-xvector&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecAudioForXVector.from_pretrained(<span class="hljs-string">&quot;hf-internal-testing/tiny-random-data2vec-xvector&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor( <span class="hljs-meta">... </span> [d[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> dataset[:<span class="hljs-number">2</span>][<span class="hljs-string">&quot;audio&quot;</span>]], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> embeddings = model(**inputs).embeddings <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = torch.nn.functional.normalize(embeddings, dim=-<span class="hljs-number">1</span>).cpu() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the resulting embeddings can be used for cosine similarity-based retrieval</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.nn.CosineSimilarity(dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>similarity = cosine_sim(embeddings[<span class="hljs-number">0</span>], embeddings[<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>threshold = <span class="hljs-number">0.7</span> <span class="hljs-comment"># the optimal threshold is dataset-dependent</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">if</span> similarity &lt; threshold: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Speakers are not the same!&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(similarity.item(), <span class="hljs-number">2</span>) <span class="hljs-number">1.0</span>`}}),Yo=new B({}),Zo=new A({props:{name:"class transformers.Data2VecTextModel",anchor:"transformers.Data2VecTextModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L700",parametersDescription:[{anchor:"transformers.Data2VecTextModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ia=new A({props:{name:"forward",anchor:"transformers.Data2VecTextModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"position_ids",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L744",parametersDescription:[{anchor:"transformers.Data2VecTextModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Data2VecTextModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Data2VecTextModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.Data2VecTextModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.Data2VecTextModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Data2VecTextModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.Data2VecTextModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecTextModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecTextModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecTextModel.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.Data2VecTextModel.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.Data2VecTextModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.Data2VecTextModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ct=new ge({props:{$$slots:{default:[Z2]},$$scope:{ctx:$}}}),da=new N({props:{code:`from transformers import RobertaTokenizer, Data2VecTextModel import torch tokenizer = RobertaTokenizer.from_pretrained("facebook/data2vec-text-base") model = Data2VecTextModel.from_pretrained("facebook/data2vec-text-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextModel.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),la=new B({}),ca=new A({props:{name:"class transformers.Data2VecTextForCausalLM",anchor:"transformers.Data2VecTextForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L885",parametersDescription:[{anchor:"transformers.Data2VecTextForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),fa=new A({props:{name:"forward",anchor:"transformers.Data2VecTextForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"past_key_values",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L911",parametersDescription:[{anchor:"transformers.Data2VecTextForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Data2VecTextForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Data2VecTextForCausalLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.Data2VecTextForCausalLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.Data2VecTextForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Data2VecTextForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.Data2VecTextForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecTextForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecTextForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecTextForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.Data2VecTextForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"encoder_attention_mask"},{anchor:"transformers.Data2VecTextForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.Data2VecTextForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.Data2VecTextForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qt=new ge({props:{$$slots:{default:[ev]},$$scope:{ctx:$}}}),ga=new N({props:{code:`from transformers import Data2VecTextTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig import torch tokenizer = Data2VecTextTokenizer.from_pretrained("facebook/data2vec-text-base") config = Data2VecTextConfig.from_pretrained("data2vec-base") config.is_decoder = True model = Data2VecTextForCausalLM.from_pretrained("data2vec-base", config=config) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) prediction_logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Data2VecTextTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Data2VecTextTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config = Data2VecTextConfig.from_pretrained(<span class="hljs-string">&quot;data2vec-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config.is_decoder = <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForCausalLM.from_pretrained(<span class="hljs-string">&quot;data2vec-base&quot;</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>prediction_logits = outputs.logits`}}),_a=new B({}),va=new A({props:{name:"class transformers.Data2VecTextForMaskedLM",anchor:"transformers.Data2VecTextForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1037",parametersDescription:[{anchor:"transformers.Data2VecTextForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ya=new A({props:{name:"forward",anchor:"transformers.Data2VecTextForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1066",parametersDescription:[{anchor:"transformers.Data2VecTextForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.Data2VecTextForMaskedLM.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),zt=new ge({props:{$$slots:{default:[tv]},$$scope:{ctx:$}}}),xa=new N({props:{code:`from transformers import RobertaTokenizer, Data2VecTextForMaskedLM import torch tokenizer = RobertaTokenizer.from_pretrained("facebook/data2vec-text-base") model = Data2VecTextForMaskedLM.from_pretrained("facebook/data2vec-text-base") inputs = tokenizer("The capital of France is <mask>.", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForMaskedLM.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Da=new B({}),Va=new A({props:{name:"class transformers.Data2VecTextForSequenceClassification",anchor:"transformers.Data2VecTextForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1167",parametersDescription:[{anchor:"transformers.Data2VecTextForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),qa=new A({props:{name:"forward",anchor:"transformers.Data2VecTextForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1181",parametersDescription:[{anchor:"transformers.Data2VecTextForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecTextForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Pt=new ge({props:{$$slots:{default:[ov]},$$scope:{ctx:$}}}),ja=new N({props:{code:`import torch from transformers import RobertaTokenizer, Data2VecTextForSequenceClassification torch.manual_seed(0) tokenizer = RobertaTokenizer.from_pretrained("facebook/data2vec-text-base") model = Data2VecTextForSequenceClassification.from_pretrained("facebook/data2vec-text-base", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),za=new N({props:{code:`import torch from transformers import RobertaTokenizer, Data2VecTextForSequenceClassification torch.manual_seed(0) tokenizer = RobertaTokenizer.from_pretrained("facebook/data2vec-text-base") model = Data2VecTextForSequenceClassification.from_pretrained("facebook/data2vec-text-base", problem_type="multi_label_classification", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),Ea=new B({}),Pa=new A({props:{name:"class transformers.Data2VecTextForMultipleChoice",anchor:"transformers.Data2VecTextForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1265",parametersDescription:[{anchor:"transformers.Data2VecTextForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Oa=new A({props:{name:"forward",anchor:"transformers.Data2VecTextForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"labels",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1278",parametersDescription:[{anchor:"transformers.Data2VecTextForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecTextForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Lt=new ge({props:{$$slots:{default:[av]},$$scope:{ctx:$}}}),Ba=new N({props:{code:`from transformers import RobertaTokenizer, Data2VecTextForMultipleChoice import torch tokenizer = RobertaTokenizer.from_pretrained("facebook/data2vec-text-base") model = Data2VecTextForMultipleChoice.from_pretrained("facebook/data2vec-text-base") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Ha=new B({}),Ra=new A({props:{name:"class transformers.Data2VecTextForTokenClassification",anchor:"transformers.Data2VecTextForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1360",parametersDescription:[{anchor:"transformers.Data2VecTextForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ka=new A({props:{name:"forward",anchor:"transformers.Data2VecTextForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1378",parametersDescription:[{anchor:"transformers.Data2VecTextForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecTextForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Wt=new ge({props:{$$slots:{default:[nv]},$$scope:{ctx:$}}}),Ya=new N({props:{code:`from transformers import RobertaTokenizer, Data2VecTextForTokenClassification import torch tokenizer = RobertaTokenizer.from_pretrained("facebook/data2vec-text-base") model = Data2VecTextForTokenClassification.from_pretrained("facebook/data2vec-text-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForTokenClassification.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),Za=new B({}),en=new A({props:{name:"class transformers.Data2VecTextForQuestionAnswering",anchor:"transformers.Data2VecTextForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1468",parametersDescription:[{anchor:"transformers.Data2VecTextForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig">Data2VecTextConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rn=new A({props:{name:"forward",anchor:"transformers.Data2VecTextForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/data2vec/modeling_data2vec_text.py#L1482",parametersDescription:[{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.Data2VecTextForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextConfig" >Data2VecTextConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ot=new ge({props:{$$slots:{default:[sv]},$$scope:{ctx:$}}}),dn=new N({props:{code:`from transformers import RobertaTokenizer, Data2VecTextForQuestionAnswering import torch torch.manual_seed(0) tokenizer = RobertaTokenizer.from_pretrained("facebook/data2vec-text-base") model = Data2VecTextForQuestionAnswering.from_pretrained("facebook/data2vec-text-base") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss round(loss.item(), 2) start_scores = outputs.start_logits list(start_scores.shape) end_scores = outputs.end_logits list(end_scores.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, Data2VecTextForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Data2VecTextForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;facebook/data2vec-text-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) `}}),{c(){p=s("meta"),x=l(),m=s("h1"),y=s("a"),D=s("span"),_(g.$$.fragment),u=l(),V=s("span"),xd=a("Data2Vec"),Qr=l(),ze=s("h2"),mt=s("a"),ms=s("span"),_(Ht.$$.fragment),Dd=l(),fs=s("span"),Vd=a("Overview"),Gr=l(),ft=s("p"),$d=a("The Data2Vec model was proposed in "),Rt=s("a"),Ad=a("data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language"),Fd=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli. Data2Vec proposes a unified framework for self-supervised learning across different data modalities - text, audio and images. Importantly, predicted targets for pre-training are contextualized latent representations of the inputs, rather than modality-specific, context-independent targets.`),Xr=l(),hn=s("p"),Cd=a("The abstract from the paper is the following:"),Ur=l(),un=s("p"),Qt=s("em"),Md=a(`While the general idea of self-supervised learning is identical across modalities, the actual algorithms and objectives differ widely because they were developed with a single modality in mind. To get us closer to general self-supervised learning, we present data2vec, a framework that uses the same learning method for either speech, NLP or computer vision. The core idea is to predict latent representations of the full input data based on a masked view of the input in a selfdistillation setup using a standard Transformer architecture. Instead of predicting modality-specific targets such as words, visual tokens or units of human speech which are local in nature, data2vec predicts contextualized latent representations that contain information from the entire input. Experiments on the major benchmarks of speech recognition, image classification, and natural language understanding demonstrate a new state of the art or competitive performance to predominant approaches. Models and code are available at `),Gt=s("a"),qd=a("www.github.com/pytorch/fairseq/tree/master/examples/data2vec"),jd=a("."),Jr=l(),mn=s("p"),zd=a("Tips:"),Kr=l(),fn=s("ul"),Xt=s("li"),Ed=a(`Both Data2VecAudio and Data2VecText have been trained using the same self-supervised learning method. In the case of Data2VecAudio, preprocessing is identical to `),gn=s("a"),Pd=a("RobertaModel"),Sd=a(", including tokenization."),Yr=l(),ve=s("p"),Ld=a("This model was contributed by "),Ut=s("a"),Nd=a("edugp"),Wd=a(`. The original code can be found `),Jt=s("a"),Id=a("here"),Od=a("."),Zr=l(),Ee=s("h2"),gt=s("a"),gs=s("span"),_(Kt.$$.fragment),Bd=l(),_s=s("span"),Hd=a("Data2VecTextConfig"),ei=l(),H=s("div"),_(Yt.$$.fragment),Rd=l(),_e=s("p"),Qd=a("This is the configuration class to store the configuration of a "),_n=s("a"),Gd=a("Data2VecTextModel"),Xd=a(" and "),vn=s("a"),Ud=a("Data2VecTextModel"),Jd=a(`. It is used to instantiate a Data2VecText model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecText `),Zt=s("a"),Kd=a("facebook/data2vec-text-base"),Yd=a(" architecture."),Zd=l(),Pe=s("p"),el=a("Configuration objects inherit from "),bn=s("a"),tl=a("PretrainedConfig"),ol=a(` and can be used to control the model outputs. Read the documentation from `),Tn=s("a"),al=a("PretrainedConfig"),nl=a(" for more information."),sl=l(),vs=s("p"),rl=a("Examples:"),il=l(),_(eo.$$.fragment),ti=l(),Se=s("h2"),_t=s("a"),bs=s("span"),_(to.$$.fragment),dl=l(),Ts=s("span"),ll=a("Data2VecAudioConfig"),oi=l(),R=s("div"),_(oo.$$.fragment),cl=l(),Le=s("p"),pl=a("This is the configuration class to store the configuration of a "),kn=s("a"),hl=a("Data2VecAudioModel"),ul=a(`. It is used to instantiate an Data2VecAudio model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecAudio `),ao=s("a"),ml=a("facebook/data2vec-audio-base-960h"),fl=a(" architecture."),gl=l(),Ne=s("p"),_l=a("Configuration objects inherit from "),wn=s("a"),vl=a("PretrainedConfig"),bl=a(` and can be used to control the model outputs. Read the documentation from `),yn=s("a"),Tl=a("PretrainedConfig"),kl=a(" for more information."),wl=l(),ks=s("p"),yl=a("Example:"),xl=l(),_(no.$$.fragment),ai=l(),We=s("h2"),vt=s("a"),ws=s("span"),_(so.$$.fragment),Dl=l(),ys=s("span"),Vl=a("Data2VecAudioModel"),ni=l(),Q=s("div"),_(ro.$$.fragment),$l=l(),io=s("p"),Al=a(`The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top. Data2VecAudio was proposed in `),lo=s("a"),Fl=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Cl=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Ml=l(),co=s("p"),ql=a("This model inherits from "),xn=s("a"),jl=a("PreTrainedModel"),zl=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),El=l(),po=s("p"),Pl=a("This model is a PyTorch "),ho=s("a"),Sl=a("torch.nn.Module"),Ll=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nl=l(),J=s("div"),_(uo.$$.fragment),Wl=l(),Ie=s("p"),Il=a("The "),Dn=s("a"),Ol=a("Data2VecAudioModel"),Bl=a(" forward method, overrides the "),xs=s("code"),Hl=a("__call__"),Rl=a(" special method."),Ql=l(),_(bt.$$.fragment),Gl=l(),Ds=s("p"),Xl=a("Example:"),Ul=l(),_(mo.$$.fragment),si=l(),Oe=s("h2"),Tt=s("a"),Vs=s("span"),_(fo.$$.fragment),Jl=l(),$s=s("span"),Kl=a("Data2VecAudioForAudioFrameClassification"),ri=l(),M=s("div"),_(go.$$.fragment),Yl=l(),As=s("p"),Zl=a("Data2VecAudio Model with a frame classification head on top for tasks like Speaker Diarization."),ec=l(),_o=s("p"),tc=a("Data2VecAudio was proposed in "),vo=s("a"),oc=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),ac=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),nc=l(),bo=s("p"),sc=a("This model inherits from "),Vn=s("a"),rc=a("PreTrainedModel"),ic=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),dc=l(),To=s("p"),lc=a("This model is a PyTorch "),ko=s("a"),cc=a("torch.nn.Module"),pc=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hc=l(),K=s("div"),_(wo.$$.fragment),uc=l(),Be=s("p"),mc=a("The "),$n=s("a"),fc=a("Data2VecAudioForAudioFrameClassification"),gc=a(" forward method, overrides the "),Fs=s("code"),_c=a("__call__"),vc=a(" special method."),bc=l(),_(kt.$$.fragment),Tc=l(),Cs=s("p"),kc=a("Example:"),wc=l(),_(yo.$$.fragment),ii=l(),He=s("h2"),wt=s("a"),Ms=s("span"),_(xo.$$.fragment),yc=l(),qs=s("span"),xc=a("Data2VecAudioForCTC"),di=l(),G=s("div"),_(Do.$$.fragment),Dc=l(),Re=s("p"),Vc=a("Data2VecAudio Model with a "),js=s("code"),$c=a("language modeling"),Ac=a(` head on top for Connectionist Temporal Classification (CTC). Data2VecAudio was proposed in `),Vo=s("a"),Fc=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Cc=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Mc=l(),$o=s("p"),qc=a("This model inherits from "),An=s("a"),jc=a("PreTrainedModel"),zc=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ec=l(),Ao=s("p"),Pc=a("This model is a PyTorch "),Fo=s("a"),Sc=a("torch.nn.Module"),Lc=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nc=l(),W=s("div"),_(Co.$$.fragment),Wc=l(),Qe=s("p"),Ic=a("The "),Fn=s("a"),Oc=a("Data2VecAudioForCTC"),Bc=a(" forward method, overrides the "),zs=s("code"),Hc=a("__call__"),Rc=a(" special method."),Qc=l(),_(yt.$$.fragment),Gc=l(),Es=s("p"),Xc=a("Example:"),Uc=l(),_(Mo.$$.fragment),Jc=l(),_(qo.$$.fragment),li=l(),Ge=s("h2"),xt=s("a"),Ps=s("span"),_(jo.$$.fragment),Kc=l(),Ss=s("span"),Yc=a("Data2VecAudioForSequenceClassification"),ci=l(),q=s("div"),_(zo.$$.fragment),Zc=l(),Ls=s("p"),ep=a(`Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),tp=l(),Eo=s("p"),op=a("Data2VecAudio was proposed in "),Po=s("a"),ap=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),np=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),sp=l(),So=s("p"),rp=a("This model inherits from "),Cn=s("a"),ip=a("PreTrainedModel"),dp=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),lp=l(),Lo=s("p"),cp=a("This model is a PyTorch "),No=s("a"),pp=a("torch.nn.Module"),hp=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),up=l(),I=s("div"),_(Wo.$$.fragment),mp=l(),Xe=s("p"),fp=a("The "),Mn=s("a"),gp=a("Data2VecAudioForSequenceClassification"),_p=a(" forward method, overrides the "),Ns=s("code"),vp=a("__call__"),bp=a(" special method."),Tp=l(),_(Dt.$$.fragment),kp=l(),Ws=s("p"),wp=a("Example:"),yp=l(),_(Io.$$.fragment),xp=l(),_(Oo.$$.fragment),pi=l(),Ue=s("h2"),Vt=s("a"),Is=s("span"),_(Bo.$$.fragment),Dp=l(),Os=s("span"),Vp=a("Data2VecAudioForXVector"),hi=l(),j=s("div"),_(Ho.$$.fragment),$p=l(),Bs=s("p"),Ap=a("Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification."),Fp=l(),Ro=s("p"),Cp=a("Data2VecAudio was proposed in "),Qo=s("a"),Mp=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),qp=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),jp=l(),Go=s("p"),zp=a("This model inherits from "),qn=s("a"),Ep=a("PreTrainedModel"),Pp=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Sp=l(),Xo=s("p"),Lp=a("This model is a PyTorch "),Uo=s("a"),Np=a("torch.nn.Module"),Wp=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ip=l(),Y=s("div"),_(Jo.$$.fragment),Op=l(),Je=s("p"),Bp=a("The "),jn=s("a"),Hp=a("Data2VecAudioForXVector"),Rp=a(" forward method, overrides the "),Hs=s("code"),Qp=a("__call__"),Gp=a(" special method."),Xp=l(),_($t.$$.fragment),Up=l(),Rs=s("p"),Jp=a("Example:"),Kp=l(),_(Ko.$$.fragment),ui=l(),Ke=s("h2"),At=s("a"),Qs=s("span"),_(Yo.$$.fragment),Yp=l(),Gs=s("span"),Zp=a("Data2VecTextModel"),mi=l(),F=s("div"),_(Zo.$$.fragment),eh=l(),ea=s("p"),th=a(`The bare Data2VecText Model for text transformer outputting raw hidden-states without any specific head on top. Data2VecText was proposed in `),ta=s("a"),oh=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),ah=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),nh=l(),oa=s("p"),sh=a("This model inherits from "),zn=s("a"),rh=a("PreTrainedModel"),ih=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),dh=l(),aa=s("p"),lh=a("This model is also a PyTorch "),na=s("a"),ch=a("torch.nn.Module"),ph=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hh=l(),sa=s("p"),uh=a(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Xs=s("em"),mh=a(`Attention is all you need`),fh=a(`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),gh=l(),z=s("p"),_h=a("To behave as an decoder the model needs to be initialized with the "),Us=s("code"),vh=a("is_decoder"),bh=a(` argument of the configuration set to `),Js=s("code"),Th=a("True"),kh=a(". To be used in a Seq2Seq model, the model needs to initialized with both "),Ks=s("code"),wh=a("is_decoder"),yh=a(` argument and `),Ys=s("code"),xh=a("add_cross_attention"),Dh=a(" set to "),Zs=s("code"),Vh=a("True"),$h=a("; an "),er=s("code"),Ah=a("encoder_hidden_states"),Fh=a(" is then expected as an input to the forward pass."),Ch=l(),Ft=s("p"),Mh=a(".. _"),tr=s("em"),qh=a("Attention is all you need"),jh=a(": "),ra=s("a"),zh=a("https://arxiv.org/abs/1706.03762"),Eh=l(),Z=s("div"),_(ia.$$.fragment),Ph=l(),Ye=s("p"),Sh=a("The "),En=s("a"),Lh=a("Data2VecTextModel"),Nh=a(" forward method, overrides the "),or=s("code"),Wh=a("__call__"),Ih=a(" special method."),Oh=l(),_(Ct.$$.fragment),Bh=l(),ar=s("p"),Hh=a("Example:"),Rh=l(),_(da.$$.fragment),fi=l(),Ze=s("h2"),Mt=s("a"),nr=s("span"),_(la.$$.fragment),Qh=l(),sr=s("span"),Gh=a("Data2VecTextForCausalLM"),gi=l(),X=s("div"),_(ca.$$.fragment),Xh=l(),et=s("p"),Uh=a("Data2VecText Model with a "),rr=s("code"),Jh=a("language modeling"),Kh=a(` head on top for CLM fine-tuning. Data2VecText was proposed in `),pa=s("a"),Yh=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Zh=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),eu=l(),ha=s("p"),tu=a("This model inherits from "),Pn=s("a"),ou=a("PreTrainedModel"),au=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),nu=l(),ua=s("p"),su=a("This model is also a PyTorch "),ma=s("a"),ru=a("torch.nn.Module"),iu=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),du=l(),ee=s("div"),_(fa.$$.fragment),lu=l(),tt=s("p"),cu=a("The "),Sn=s("a"),pu=a("Data2VecTextForCausalLM"),hu=a(" forward method, overrides the "),ir=s("code"),uu=a("__call__"),mu=a(" special method."),fu=l(),_(qt.$$.fragment),gu=l(),dr=s("p"),_u=a("Example:"),vu=l(),_(ga.$$.fragment),_i=l(),ot=s("h2"),jt=s("a"),lr=s("span"),_(_a.$$.fragment),bu=l(),cr=s("span"),Tu=a("Data2VecTextForMaskedLM"),vi=l(),U=s("div"),_(va.$$.fragment),ku=l(),at=s("p"),wu=a("data2vec Model with a "),pr=s("code"),yu=a("language modeling"),xu=a(` head on top. Data2VecText was proposed in `),ba=s("a"),Du=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Vu=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),$u=l(),Ta=s("p"),Au=a("This model inherits from "),Ln=s("a"),Fu=a("PreTrainedModel"),Cu=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Mu=l(),ka=s("p"),qu=a("This model is also a PyTorch "),wa=s("a"),ju=a("torch.nn.Module"),zu=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Eu=l(),te=s("div"),_(ya.$$.fragment),Pu=l(),nt=s("p"),Su=a("The "),Nn=s("a"),Lu=a("Data2VecTextForMaskedLM"),Nu=a(" forward method, overrides the "),hr=s("code"),Wu=a("__call__"),Iu=a(" special method."),Ou=l(),_(zt.$$.fragment),Bu=l(),ur=s("p"),Hu=a("Example:"),Ru=l(),_(xa.$$.fragment),bi=l(),st=s("h2"),Et=s("a"),mr=s("span"),_(Da.$$.fragment),Qu=l(),fr=s("span"),Gu=a("Data2VecTextForSequenceClassification"),Ti=l(),E=s("div"),_(Va.$$.fragment),Xu=l(),gr=s("p"),Uu=a(`Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ju=l(),$a=s("p"),Ku=a("Data2VecText was proposed in "),Aa=s("a"),Yu=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Zu=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),em=l(),Fa=s("p"),tm=a("This model inherits from "),Wn=s("a"),om=a("PreTrainedModel"),am=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),nm=l(),Ca=s("p"),sm=a("This model is also a PyTorch "),Ma=s("a"),rm=a("torch.nn.Module"),im=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dm=l(),C=s("div"),_(qa.$$.fragment),lm=l(),rt=s("p"),cm=a("The "),In=s("a"),pm=a("Data2VecTextForSequenceClassification"),hm=a(" forward method, overrides the "),_r=s("code"),um=a("__call__"),mm=a(" special method."),fm=l(),_(Pt.$$.fragment),gm=l(),vr=s("p"),_m=a("Example of single-label classification:"),vm=l(),_(ja.$$.fragment),bm=l(),br=s("p"),Tm=a("Example of multi-label classification:"),km=l(),_(za.$$.fragment),ki=l(),it=s("h2"),St=s("a"),Tr=s("span"),_(Ea.$$.fragment),wm=l(),kr=s("span"),ym=a("Data2VecTextForMultipleChoice"),wi=l(),P=s("div"),_(Pa.$$.fragment),xm=l(),wr=s("p"),Dm=a(`Data2VecText Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Vm=l(),Sa=s("p"),$m=a("Data2VecText was proposed in "),La=s("a"),Am=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Fm=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Cm=l(),Na=s("p"),Mm=a("This model inherits from "),On=s("a"),qm=a("PreTrainedModel"),jm=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zm=l(),Wa=s("p"),Em=a("This model is also a PyTorch "),Ia=s("a"),Pm=a("torch.nn.Module"),Sm=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lm=l(),oe=s("div"),_(Oa.$$.fragment),Nm=l(),dt=s("p"),Wm=a("The "),Bn=s("a"),Im=a("Data2VecTextForMultipleChoice"),Om=a(" forward method, overrides the "),yr=s("code"),Bm=a("__call__"),Hm=a(" special method."),Rm=l(),_(Lt.$$.fragment),Qm=l(),xr=s("p"),Gm=a("Example:"),Xm=l(),_(Ba.$$.fragment),yi=l(),lt=s("h2"),Nt=s("a"),Dr=s("span"),_(Ha.$$.fragment),Um=l(),Vr=s("span"),Jm=a("Data2VecTextForTokenClassification"),xi=l(),S=s("div"),_(Ra.$$.fragment),Km=l(),$r=s("p"),Ym=a(`Data2VecText Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Zm=l(),Qa=s("p"),ef=a("Data2VecText was proposed in "),Ga=s("a"),tf=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),of=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),af=l(),Xa=s("p"),nf=a("This model inherits from "),Hn=s("a"),sf=a("PreTrainedModel"),rf=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),df=l(),Ua=s("p"),lf=a("This model is also a PyTorch "),Ja=s("a"),cf=a("torch.nn.Module"),pf=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hf=l(),ae=s("div"),_(Ka.$$.fragment),uf=l(),ct=s("p"),mf=a("The "),Rn=s("a"),ff=a("Data2VecTextForTokenClassification"),gf=a(" forward method, overrides the "),Ar=s("code"),_f=a("__call__"),vf=a(" special method."),bf=l(),_(Wt.$$.fragment),Tf=l(),Fr=s("p"),kf=a("Example:"),wf=l(),_(Ya.$$.fragment),Di=l(),pt=s("h2"),It=s("a"),Cr=s("span"),_(Za.$$.fragment),yf=l(),Mr=s("span"),xf=a("Data2VecTextForQuestionAnswering"),Vi=l(),L=s("div"),_(en.$$.fragment),Df=l(),ht=s("p"),Vf=a(`Data2VecText Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),qr=s("code"),$f=a("span start logits"),Af=a(" and "),jr=s("code"),Ff=a("span end logits"),Cf=a(")."),Mf=l(),tn=s("p"),qf=a("Data2VecText was proposed in "),on=s("a"),jf=a(`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),zf=a(` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Ef=l(),an=s("p"),Pf=a("This model inherits from "),Qn=s("a"),Sf=a("PreTrainedModel"),Lf=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nf=l(),nn=s("p"),Wf=a("This model is also a PyTorch "),sn=s("a"),If=a("torch.nn.Module"),Of=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bf=l(),ne=s("div"),_(rn.$$.fragment),Hf=l(),ut=s("p"),Rf=a("The "),Gn=s("a"),Qf=a("Data2VecTextForQuestionAnswering"),Gf=a(" forward method, overrides the "),zr=s("code"),Xf=a("__call__"),Uf=a(" special method."),Jf=l(),_(Ot.$$.fragment),Kf=l(),Er=s("p"),Yf=a("Example:"),Zf=l(),_(dn.$$.fragment),this.h()},l(t){const h=G2('[data-svelte="svelte-1phssyn"]',document.head);p=r(h,"META",{name:!0,content:!0}),h.forEach(o),x=c(t),m=r(t,"H1",{class:!0});var ln=i(m);y=r(ln,"A",{id:!0,class:!0,href:!0});var Pr=i(y);D=r(Pr,"SPAN",{});var Sr=i(D);v(g.$$.fragment,Sr),Sr.forEach(o),Pr.forEach(o),u=c(ln),V=r(ln,"SPAN",{});var Lr=i(V);xd=n(Lr,"Data2Vec"),Lr.forEach(o),ln.forEach(o),Qr=c(t),ze=r(t,"H2",{class:!0});var cn=i(ze);mt=r(cn,"A",{id:!0,class:!0,href:!0});var Nr=i(mt);ms=r(Nr,"SPAN",{});var Wr=i(ms);v(Ht.$$.fragment,Wr),Wr.forEach(o),Nr.forEach(o),Dd=c(cn),fs=r(cn,"SPAN",{});var Ir=i(fs);Vd=n(Ir,"Overview"),Ir.forEach(o),cn.forEach(o),Gr=c(t),ft=r(t,"P",{});var pn=i(ft);$d=n(pn,"The Data2Vec model was proposed in "),Rt=r(pn,"A",{href:!0,rel:!0});var Or=i(Rt);Ad=n(Or,"data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language"),Or.forEach(o),Fd=n(pn,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli. Data2Vec proposes a unified framework for self-supervised learning across different data modalities - text, audio and images. Importantly, predicted targets for pre-training are contextualized latent representations of the inputs, rather than modality-specific, context-independent targets.`),pn.forEach(o),Xr=c(t),hn=r(t,"P",{});var Br=i(hn);Cd=n(Br,"The abstract from the paper is the following:"),Br.forEach(o),Ur=c(t),un=r(t,"P",{});var Hr=i(un);Qt=r(Hr,"EM",{});var Ai=i(Qt);Md=n(Ai,`While the general idea of self-supervised learning is identical across modalities, the actual algorithms and objectives differ widely because they were developed with a single modality in mind. To get us closer to general self-supervised learning, we present data2vec, a framework that uses the same learning method for either speech, NLP or computer vision. The core idea is to predict latent representations of the full input data based on a masked view of the input in a selfdistillation setup using a standard Transformer architecture. Instead of predicting modality-specific targets such as words, visual tokens or units of human speech which are local in nature, data2vec predicts contextualized latent representations that contain information from the entire input. Experiments on the major benchmarks of speech recognition, image classification, and natural language understanding demonstrate a new state of the art or competitive performance to predominant approaches. Models and code are available at `),Gt=r(Ai,"A",{href:!0,rel:!0});var eg=i(Gt);qd=n(eg,"www.github.com/pytorch/fairseq/tree/master/examples/data2vec"),eg.forEach(o),jd=n(Ai,"."),Ai.forEach(o),Hr.forEach(o),Jr=c(t),mn=r(t,"P",{});var tg=i(mn);zd=n(tg,"Tips:"),tg.forEach(o),Kr=c(t),fn=r(t,"UL",{});var og=i(fn);Xt=r(og,"LI",{});var Fi=i(Xt);Ed=n(Fi,`Both Data2VecAudio and Data2VecText have been trained using the same self-supervised learning method. In the case of Data2VecAudio, preprocessing is identical to `),gn=r(Fi,"A",{href:!0});var ag=i(gn);Pd=n(ag,"RobertaModel"),ag.forEach(o),Sd=n(Fi,", including tokenization."),Fi.forEach(o),og.forEach(o),Yr=c(t),ve=r(t,"P",{});var Xn=i(ve);Ld=n(Xn,"This model was contributed by "),Ut=r(Xn,"A",{href:!0,rel:!0});var ng=i(Ut);Nd=n(ng,"edugp"),ng.forEach(o),Wd=n(Xn,`. The original code can be found `),Jt=r(Xn,"A",{href:!0,rel:!0});var sg=i(Jt);Id=n(sg,"here"),sg.forEach(o),Od=n(Xn,"."),Xn.forEach(o),Zr=c(t),Ee=r(t,"H2",{class:!0});var Ci=i(Ee);gt=r(Ci,"A",{id:!0,class:!0,href:!0});var rg=i(gt);gs=r(rg,"SPAN",{});var ig=i(gs);v(Kt.$$.fragment,ig),ig.forEach(o),rg.forEach(o),Bd=c(Ci),_s=r(Ci,"SPAN",{});var dg=i(_s);Hd=n(dg,"Data2VecTextConfig"),dg.forEach(o),Ci.forEach(o),ei=c(t),H=r(t,"DIV",{class:!0});var be=i(H);v(Yt.$$.fragment,be),Rd=c(be),_e=r(be,"P",{});var Bt=i(_e);Qd=n(Bt,"This is the configuration class to store the configuration of a "),_n=r(Bt,"A",{href:!0});var lg=i(_n);Gd=n(lg,"Data2VecTextModel"),lg.forEach(o),Xd=n(Bt," and "),vn=r(Bt,"A",{href:!0});var cg=i(vn);Ud=n(cg,"Data2VecTextModel"),cg.forEach(o),Jd=n(Bt,`. It is used to instantiate a Data2VecText model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecText `),Zt=r(Bt,"A",{href:!0,rel:!0});var pg=i(Zt);Kd=n(pg,"facebook/data2vec-text-base"),pg.forEach(o),Yd=n(Bt," architecture."),Bt.forEach(o),Zd=c(be),Pe=r(be,"P",{});var Un=i(Pe);el=n(Un,"Configuration objects inherit from "),bn=r(Un,"A",{href:!0});var hg=i(bn);tl=n(hg,"PretrainedConfig"),hg.forEach(o),ol=n(Un,` and can be used to control the model outputs. Read the documentation from `),Tn=r(Un,"A",{href:!0});var ug=i(Tn);al=n(ug,"PretrainedConfig"),ug.forEach(o),nl=n(Un," for more information."),Un.forEach(o),sl=c(be),vs=r(be,"P",{});var mg=i(vs);rl=n(mg,"Examples:"),mg.forEach(o),il=c(be),v(eo.$$.fragment,be),be.forEach(o),ti=c(t),Se=r(t,"H2",{class:!0});var Mi=i(Se);_t=r(Mi,"A",{id:!0,class:!0,href:!0});var fg=i(_t);bs=r(fg,"SPAN",{});var gg=i(bs);v(to.$$.fragment,gg),gg.forEach(o),fg.forEach(o),dl=c(Mi),Ts=r(Mi,"SPAN",{});var _g=i(Ts);ll=n(_g,"Data2VecAudioConfig"),_g.forEach(o),Mi.forEach(o),oi=c(t),R=r(t,"DIV",{class:!0});var Te=i(R);v(oo.$$.fragment,Te),cl=c(Te),Le=r(Te,"P",{});var Jn=i(Le);pl=n(Jn,"This is the configuration class to store the configuration of a "),kn=r(Jn,"A",{href:!0});var vg=i(kn);hl=n(vg,"Data2VecAudioModel"),vg.forEach(o),ul=n(Jn,`. It is used to instantiate an Data2VecAudio model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecAudio `),ao=r(Jn,"A",{href:!0,rel:!0});var bg=i(ao);ml=n(bg,"facebook/data2vec-audio-base-960h"),bg.forEach(o),fl=n(Jn," architecture."),Jn.forEach(o),gl=c(Te),Ne=r(Te,"P",{});var Kn=i(Ne);_l=n(Kn,"Configuration objects inherit from "),wn=r(Kn,"A",{href:!0});var Tg=i(wn);vl=n(Tg,"PretrainedConfig"),Tg.forEach(o),bl=n(Kn,` and can be used to control the model outputs. Read the documentation from `),yn=r(Kn,"A",{href:!0});var kg=i(yn);Tl=n(kg,"PretrainedConfig"),kg.forEach(o),kl=n(Kn," for more information."),Kn.forEach(o),wl=c(Te),ks=r(Te,"P",{});var wg=i(ks);yl=n(wg,"Example:"),wg.forEach(o),xl=c(Te),v(no.$$.fragment,Te),Te.forEach(o),ai=c(t),We=r(t,"H2",{class:!0});var qi=i(We);vt=r(qi,"A",{id:!0,class:!0,href:!0});var yg=i(vt);ws=r(yg,"SPAN",{});var xg=i(ws);v(so.$$.fragment,xg),xg.forEach(o),yg.forEach(o),Dl=c(qi),ys=r(qi,"SPAN",{});var Dg=i(ys);Vl=n(Dg,"Data2VecAudioModel"),Dg.forEach(o),qi.forEach(o),ni=c(t),Q=r(t,"DIV",{class:!0});var ke=i(Q);v(ro.$$.fragment,ke),$l=c(ke),io=r(ke,"P",{});var ji=i(io);Al=n(ji,`The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top. Data2VecAudio was proposed in `),lo=r(ji,"A",{href:!0,rel:!0});var Vg=i(lo);Fl=n(Vg,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Vg.forEach(o),Cl=n(ji,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),ji.forEach(o),Ml=c(ke),co=r(ke,"P",{});var zi=i(co);ql=n(zi,"This model inherits from "),xn=r(zi,"A",{href:!0});var $g=i(xn);jl=n($g,"PreTrainedModel"),$g.forEach(o),zl=n(zi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),zi.forEach(o),El=c(ke),po=r(ke,"P",{});var Ei=i(po);Pl=n(Ei,"This model is a PyTorch "),ho=r(Ei,"A",{href:!0,rel:!0});var Ag=i(ho);Sl=n(Ag,"torch.nn.Module"),Ag.forEach(o),Ll=n(Ei,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ei.forEach(o),Nl=c(ke),J=r(ke,"DIV",{class:!0});var we=i(J);v(uo.$$.fragment,we),Wl=c(we),Ie=r(we,"P",{});var Yn=i(Ie);Il=n(Yn,"The "),Dn=r(Yn,"A",{href:!0});var Fg=i(Dn);Ol=n(Fg,"Data2VecAudioModel"),Fg.forEach(o),Bl=n(Yn," forward method, overrides the "),xs=r(Yn,"CODE",{});var Cg=i(xs);Hl=n(Cg,"__call__"),Cg.forEach(o),Rl=n(Yn," special method."),Yn.forEach(o),Ql=c(we),v(bt.$$.fragment,we),Gl=c(we),Ds=r(we,"P",{});var Mg=i(Ds);Xl=n(Mg,"Example:"),Mg.forEach(o),Ul=c(we),v(mo.$$.fragment,we),we.forEach(o),ke.forEach(o),si=c(t),Oe=r(t,"H2",{class:!0});var Pi=i(Oe);Tt=r(Pi,"A",{id:!0,class:!0,href:!0});var qg=i(Tt);Vs=r(qg,"SPAN",{});var jg=i(Vs);v(fo.$$.fragment,jg),jg.forEach(o),qg.forEach(o),Jl=c(Pi),$s=r(Pi,"SPAN",{});var zg=i($s);Kl=n(zg,"Data2VecAudioForAudioFrameClassification"),zg.forEach(o),Pi.forEach(o),ri=c(t),M=r(t,"DIV",{class:!0});var ie=i(M);v(go.$$.fragment,ie),Yl=c(ie),As=r(ie,"P",{});var Eg=i(As);Zl=n(Eg,"Data2VecAudio Model with a frame classification head on top for tasks like Speaker Diarization."),Eg.forEach(o),ec=c(ie),_o=r(ie,"P",{});var Si=i(_o);tc=n(Si,"Data2VecAudio was proposed in "),vo=r(Si,"A",{href:!0,rel:!0});var Pg=i(vo);oc=n(Pg,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Pg.forEach(o),ac=n(Si,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Si.forEach(o),nc=c(ie),bo=r(ie,"P",{});var Li=i(bo);sc=n(Li,"This model inherits from "),Vn=r(Li,"A",{href:!0});var Sg=i(Vn);rc=n(Sg,"PreTrainedModel"),Sg.forEach(o),ic=n(Li,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Li.forEach(o),dc=c(ie),To=r(ie,"P",{});var Ni=i(To);lc=n(Ni,"This model is a PyTorch "),ko=r(Ni,"A",{href:!0,rel:!0});var Lg=i(ko);cc=n(Lg,"torch.nn.Module"),Lg.forEach(o),pc=n(Ni,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ni.forEach(o),hc=c(ie),K=r(ie,"DIV",{class:!0});var ye=i(K);v(wo.$$.fragment,ye),uc=c(ye),Be=r(ye,"P",{});var Zn=i(Be);mc=n(Zn,"The "),$n=r(Zn,"A",{href:!0});var Ng=i($n);fc=n(Ng,"Data2VecAudioForAudioFrameClassification"),Ng.forEach(o),gc=n(Zn," forward method, overrides the "),Fs=r(Zn,"CODE",{});var Wg=i(Fs);_c=n(Wg,"__call__"),Wg.forEach(o),vc=n(Zn," special method."),Zn.forEach(o),bc=c(ye),v(kt.$$.fragment,ye),Tc=c(ye),Cs=r(ye,"P",{});var Ig=i(Cs);kc=n(Ig,"Example:"),Ig.forEach(o),wc=c(ye),v(yo.$$.fragment,ye),ye.forEach(o),ie.forEach(o),ii=c(t),He=r(t,"H2",{class:!0});var Wi=i(He);wt=r(Wi,"A",{id:!0,class:!0,href:!0});var Og=i(wt);Ms=r(Og,"SPAN",{});var Bg=i(Ms);v(xo.$$.fragment,Bg),Bg.forEach(o),Og.forEach(o),yc=c(Wi),qs=r(Wi,"SPAN",{});var Hg=i(qs);xc=n(Hg,"Data2VecAudioForCTC"),Hg.forEach(o),Wi.forEach(o),di=c(t),G=r(t,"DIV",{class:!0});var xe=i(G);v(Do.$$.fragment,xe),Dc=c(xe),Re=r(xe,"P",{});var es=i(Re);Vc=n(es,"Data2VecAudio Model with a "),js=r(es,"CODE",{});var Rg=i(js);$c=n(Rg,"language modeling"),Rg.forEach(o),Ac=n(es,` head on top for Connectionist Temporal Classification (CTC). Data2VecAudio was proposed in `),Vo=r(es,"A",{href:!0,rel:!0});var Qg=i(Vo);Fc=n(Qg,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),Qg.forEach(o),Cc=n(es,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),es.forEach(o),Mc=c(xe),$o=r(xe,"P",{});var Ii=i($o);qc=n(Ii,"This model inherits from "),An=r(Ii,"A",{href:!0});var Gg=i(An);jc=n(Gg,"PreTrainedModel"),Gg.forEach(o),zc=n(Ii,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ii.forEach(o),Ec=c(xe),Ao=r(xe,"P",{});var Oi=i(Ao);Pc=n(Oi,"This model is a PyTorch "),Fo=r(Oi,"A",{href:!0,rel:!0});var Xg=i(Fo);Sc=n(Xg,"torch.nn.Module"),Xg.forEach(o),Lc=n(Oi,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Oi.forEach(o),Nc=c(xe),W=r(xe,"DIV",{class:!0});var de=i(W);v(Co.$$.fragment,de),Wc=c(de),Qe=r(de,"P",{});var ts=i(Qe);Ic=n(ts,"The "),Fn=r(ts,"A",{href:!0});var Ug=i(Fn);Oc=n(Ug,"Data2VecAudioForCTC"),Ug.forEach(o),Bc=n(ts," forward method, overrides the "),zs=r(ts,"CODE",{});var Jg=i(zs);Hc=n(Jg,"__call__"),Jg.forEach(o),Rc=n(ts," special method."),ts.forEach(o),Qc=c(de),v(yt.$$.fragment,de),Gc=c(de),Es=r(de,"P",{});var Kg=i(Es);Xc=n(Kg,"Example:"),Kg.forEach(o),Uc=c(de),v(Mo.$$.fragment,de),Jc=c(de),v(qo.$$.fragment,de),de.forEach(o),xe.forEach(o),li=c(t),Ge=r(t,"H2",{class:!0});var Bi=i(Ge);xt=r(Bi,"A",{id:!0,class:!0,href:!0});var Yg=i(xt);Ps=r(Yg,"SPAN",{});var Zg=i(Ps);v(jo.$$.fragment,Zg),Zg.forEach(o),Yg.forEach(o),Kc=c(Bi),Ss=r(Bi,"SPAN",{});var e_=i(Ss);Yc=n(e_,"Data2VecAudioForSequenceClassification"),e_.forEach(o),Bi.forEach(o),ci=c(t),q=r(t,"DIV",{class:!0});var le=i(q);v(zo.$$.fragment,le),Zc=c(le),Ls=r(le,"P",{});var t_=i(Ls);ep=n(t_,`Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),t_.forEach(o),tp=c(le),Eo=r(le,"P",{});var Hi=i(Eo);op=n(Hi,"Data2VecAudio was proposed in "),Po=r(Hi,"A",{href:!0,rel:!0});var o_=i(Po);ap=n(o_,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),o_.forEach(o),np=n(Hi,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Hi.forEach(o),sp=c(le),So=r(le,"P",{});var Ri=i(So);rp=n(Ri,"This model inherits from "),Cn=r(Ri,"A",{href:!0});var a_=i(Cn);ip=n(a_,"PreTrainedModel"),a_.forEach(o),dp=n(Ri,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ri.forEach(o),lp=c(le),Lo=r(le,"P",{});var Qi=i(Lo);cp=n(Qi,"This model is a PyTorch "),No=r(Qi,"A",{href:!0,rel:!0});var n_=i(No);pp=n(n_,"torch.nn.Module"),n_.forEach(o),hp=n(Qi,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qi.forEach(o),up=c(le),I=r(le,"DIV",{class:!0});var ce=i(I);v(Wo.$$.fragment,ce),mp=c(ce),Xe=r(ce,"P",{});var os=i(Xe);fp=n(os,"The "),Mn=r(os,"A",{href:!0});var s_=i(Mn);gp=n(s_,"Data2VecAudioForSequenceClassification"),s_.forEach(o),_p=n(os," forward method, overrides the "),Ns=r(os,"CODE",{});var r_=i(Ns);vp=n(r_,"__call__"),r_.forEach(o),bp=n(os," special method."),os.forEach(o),Tp=c(ce),v(Dt.$$.fragment,ce),kp=c(ce),Ws=r(ce,"P",{});var i_=i(Ws);wp=n(i_,"Example:"),i_.forEach(o),yp=c(ce),v(Io.$$.fragment,ce),xp=c(ce),v(Oo.$$.fragment,ce),ce.forEach(o),le.forEach(o),pi=c(t),Ue=r(t,"H2",{class:!0});var Gi=i(Ue);Vt=r(Gi,"A",{id:!0,class:!0,href:!0});var d_=i(Vt);Is=r(d_,"SPAN",{});var l_=i(Is);v(Bo.$$.fragment,l_),l_.forEach(o),d_.forEach(o),Dp=c(Gi),Os=r(Gi,"SPAN",{});var c_=i(Os);Vp=n(c_,"Data2VecAudioForXVector"),c_.forEach(o),Gi.forEach(o),hi=c(t),j=r(t,"DIV",{class:!0});var pe=i(j);v(Ho.$$.fragment,pe),$p=c(pe),Bs=r(pe,"P",{});var p_=i(Bs);Ap=n(p_,"Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification."),p_.forEach(o),Fp=c(pe),Ro=r(pe,"P",{});var Xi=i(Ro);Cp=n(Xi,"Data2VecAudio was proposed in "),Qo=r(Xi,"A",{href:!0,rel:!0});var h_=i(Qo);Mp=n(h_,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),h_.forEach(o),qp=n(Xi,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Xi.forEach(o),jp=c(pe),Go=r(pe,"P",{});var Ui=i(Go);zp=n(Ui,"This model inherits from "),qn=r(Ui,"A",{href:!0});var u_=i(qn);Ep=n(u_,"PreTrainedModel"),u_.forEach(o),Pp=n(Ui,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ui.forEach(o),Sp=c(pe),Xo=r(pe,"P",{});var Ji=i(Xo);Lp=n(Ji,"This model is a PyTorch "),Uo=r(Ji,"A",{href:!0,rel:!0});var m_=i(Uo);Np=n(m_,"torch.nn.Module"),m_.forEach(o),Wp=n(Ji,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ji.forEach(o),Ip=c(pe),Y=r(pe,"DIV",{class:!0});var De=i(Y);v(Jo.$$.fragment,De),Op=c(De),Je=r(De,"P",{});var as=i(Je);Bp=n(as,"The "),jn=r(as,"A",{href:!0});var f_=i(jn);Hp=n(f_,"Data2VecAudioForXVector"),f_.forEach(o),Rp=n(as," forward method, overrides the "),Hs=r(as,"CODE",{});var g_=i(Hs);Qp=n(g_,"__call__"),g_.forEach(o),Gp=n(as," special method."),as.forEach(o),Xp=c(De),v($t.$$.fragment,De),Up=c(De),Rs=r(De,"P",{});var __=i(Rs);Jp=n(__,"Example:"),__.forEach(o),Kp=c(De),v(Ko.$$.fragment,De),De.forEach(o),pe.forEach(o),ui=c(t),Ke=r(t,"H2",{class:!0});var Ki=i(Ke);At=r(Ki,"A",{id:!0,class:!0,href:!0});var v_=i(At);Qs=r(v_,"SPAN",{});var b_=i(Qs);v(Yo.$$.fragment,b_),b_.forEach(o),v_.forEach(o),Yp=c(Ki),Gs=r(Ki,"SPAN",{});var T_=i(Gs);Zp=n(T_,"Data2VecTextModel"),T_.forEach(o),Ki.forEach(o),mi=c(t),F=r(t,"DIV",{class:!0});var O=i(F);v(Zo.$$.fragment,O),eh=c(O),ea=r(O,"P",{});var Yi=i(ea);th=n(Yi,`The bare Data2VecText Model for text transformer outputting raw hidden-states without any specific head on top. Data2VecText was proposed in `),ta=r(Yi,"A",{href:!0,rel:!0});var k_=i(ta);oh=n(k_,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),k_.forEach(o),ah=n(Yi,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),Yi.forEach(o),nh=c(O),oa=r(O,"P",{});var Zi=i(oa);sh=n(Zi,"This model inherits from "),zn=r(Zi,"A",{href:!0});var w_=i(zn);rh=n(w_,"PreTrainedModel"),w_.forEach(o),ih=n(Zi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zi.forEach(o),dh=c(O),aa=r(O,"P",{});var ed=i(aa);lh=n(ed,"This model is also a PyTorch "),na=r(ed,"A",{href:!0,rel:!0});var y_=i(na);ch=n(y_,"torch.nn.Module"),y_.forEach(o),ph=n(ed,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ed.forEach(o),hh=c(O),sa=r(O,"P",{});var td=i(sa);uh=n(td,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),Xs=r(td,"EM",{});var x_=i(Xs);mh=n(x_,`Attention is all you need`),x_.forEach(o),fh=n(td,`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),td.forEach(o),gh=c(O),z=r(O,"P",{});var se=i(z);_h=n(se,"To behave as an decoder the model needs to be initialized with the "),Us=r(se,"CODE",{});var D_=i(Us);vh=n(D_,"is_decoder"),D_.forEach(o),bh=n(se,` argument of the configuration set to `),Js=r(se,"CODE",{});var V_=i(Js);Th=n(V_,"True"),V_.forEach(o),kh=n(se,". To be used in a Seq2Seq model, the model needs to initialized with both "),Ks=r(se,"CODE",{});var $_=i(Ks);wh=n($_,"is_decoder"),$_.forEach(o),yh=n(se,` argument and `),Ys=r(se,"CODE",{});var A_=i(Ys);xh=n(A_,"add_cross_attention"),A_.forEach(o),Dh=n(se," set to "),Zs=r(se,"CODE",{});var F_=i(Zs);Vh=n(F_,"True"),F_.forEach(o),$h=n(se,"; an "),er=r(se,"CODE",{});var C_=i(er);Ah=n(C_,"encoder_hidden_states"),C_.forEach(o),Fh=n(se," is then expected as an input to the forward pass."),se.forEach(o),Ch=c(O),Ft=r(O,"P",{});var Rr=i(Ft);Mh=n(Rr,".. _"),tr=r(Rr,"EM",{});var M_=i(tr);qh=n(M_,"Attention is all you need"),M_.forEach(o),jh=n(Rr,": "),ra=r(Rr,"A",{href:!0,rel:!0});var q_=i(ra);zh=n(q_,"https://arxiv.org/abs/1706.03762"),q_.forEach(o),Rr.forEach(o),Eh=c(O),Z=r(O,"DIV",{class:!0});var Ve=i(Z);v(ia.$$.fragment,Ve),Ph=c(Ve),Ye=r(Ve,"P",{});var ns=i(Ye);Sh=n(ns,"The "),En=r(ns,"A",{href:!0});var j_=i(En);Lh=n(j_,"Data2VecTextModel"),j_.forEach(o),Nh=n(ns," forward method, overrides the "),or=r(ns,"CODE",{});var z_=i(or);Wh=n(z_,"__call__"),z_.forEach(o),Ih=n(ns," special method."),ns.forEach(o),Oh=c(Ve),v(Ct.$$.fragment,Ve),Bh=c(Ve),ar=r(Ve,"P",{});var E_=i(ar);Hh=n(E_,"Example:"),E_.forEach(o),Rh=c(Ve),v(da.$$.fragment,Ve),Ve.forEach(o),O.forEach(o),fi=c(t),Ze=r(t,"H2",{class:!0});var od=i(Ze);Mt=r(od,"A",{id:!0,class:!0,href:!0});var P_=i(Mt);nr=r(P_,"SPAN",{});var S_=i(nr);v(la.$$.fragment,S_),S_.forEach(o),P_.forEach(o),Qh=c(od),sr=r(od,"SPAN",{});var L_=i(sr);Gh=n(L_,"Data2VecTextForCausalLM"),L_.forEach(o),od.forEach(o),gi=c(t),X=r(t,"DIV",{class:!0});var $e=i(X);v(ca.$$.fragment,$e),Xh=c($e),et=r($e,"P",{});var ss=i(et);Uh=n(ss,"Data2VecText Model with a "),rr=r(ss,"CODE",{});var N_=i(rr);Jh=n(N_,"language modeling"),N_.forEach(o),Kh=n(ss,` head on top for CLM fine-tuning. Data2VecText was proposed in `),pa=r(ss,"A",{href:!0,rel:!0});var W_=i(pa);Yh=n(W_,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),W_.forEach(o),Zh=n(ss,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),ss.forEach(o),eu=c($e),ha=r($e,"P",{});var ad=i(ha);tu=n(ad,"This model inherits from "),Pn=r(ad,"A",{href:!0});var I_=i(Pn);ou=n(I_,"PreTrainedModel"),I_.forEach(o),au=n(ad,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ad.forEach(o),nu=c($e),ua=r($e,"P",{});var nd=i(ua);su=n(nd,"This model is also a PyTorch "),ma=r(nd,"A",{href:!0,rel:!0});var O_=i(ma);ru=n(O_,"torch.nn.Module"),O_.forEach(o),iu=n(nd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),nd.forEach(o),du=c($e),ee=r($e,"DIV",{class:!0});var Ae=i(ee);v(fa.$$.fragment,Ae),lu=c(Ae),tt=r(Ae,"P",{});var rs=i(tt);cu=n(rs,"The "),Sn=r(rs,"A",{href:!0});var B_=i(Sn);pu=n(B_,"Data2VecTextForCausalLM"),B_.forEach(o),hu=n(rs," forward method, overrides the "),ir=r(rs,"CODE",{});var H_=i(ir);uu=n(H_,"__call__"),H_.forEach(o),mu=n(rs," special method."),rs.forEach(o),fu=c(Ae),v(qt.$$.fragment,Ae),gu=c(Ae),dr=r(Ae,"P",{});var R_=i(dr);_u=n(R_,"Example:"),R_.forEach(o),vu=c(Ae),v(ga.$$.fragment,Ae),Ae.forEach(o),$e.forEach(o),_i=c(t),ot=r(t,"H2",{class:!0});var sd=i(ot);jt=r(sd,"A",{id:!0,class:!0,href:!0});var Q_=i(jt);lr=r(Q_,"SPAN",{});var G_=i(lr);v(_a.$$.fragment,G_),G_.forEach(o),Q_.forEach(o),bu=c(sd),cr=r(sd,"SPAN",{});var X_=i(cr);Tu=n(X_,"Data2VecTextForMaskedLM"),X_.forEach(o),sd.forEach(o),vi=c(t),U=r(t,"DIV",{class:!0});var Fe=i(U);v(va.$$.fragment,Fe),ku=c(Fe),at=r(Fe,"P",{});var is=i(at);wu=n(is,"data2vec Model with a "),pr=r(is,"CODE",{});var U_=i(pr);yu=n(U_,"language modeling"),U_.forEach(o),xu=n(is,` head on top. Data2VecText was proposed in `),ba=r(is,"A",{href:!0,rel:!0});var J_=i(ba);Du=n(J_,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),J_.forEach(o),Vu=n(is,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),is.forEach(o),$u=c(Fe),Ta=r(Fe,"P",{});var rd=i(Ta);Au=n(rd,"This model inherits from "),Ln=r(rd,"A",{href:!0});var K_=i(Ln);Fu=n(K_,"PreTrainedModel"),K_.forEach(o),Cu=n(rd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rd.forEach(o),Mu=c(Fe),ka=r(Fe,"P",{});var id=i(ka);qu=n(id,"This model is also a PyTorch "),wa=r(id,"A",{href:!0,rel:!0});var Y_=i(wa);ju=n(Y_,"torch.nn.Module"),Y_.forEach(o),zu=n(id,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),id.forEach(o),Eu=c(Fe),te=r(Fe,"DIV",{class:!0});var Ce=i(te);v(ya.$$.fragment,Ce),Pu=c(Ce),nt=r(Ce,"P",{});var ds=i(nt);Su=n(ds,"The "),Nn=r(ds,"A",{href:!0});var Z_=i(Nn);Lu=n(Z_,"Data2VecTextForMaskedLM"),Z_.forEach(o),Nu=n(ds," forward method, overrides the "),hr=r(ds,"CODE",{});var e2=i(hr);Wu=n(e2,"__call__"),e2.forEach(o),Iu=n(ds," special method."),ds.forEach(o),Ou=c(Ce),v(zt.$$.fragment,Ce),Bu=c(Ce),ur=r(Ce,"P",{});var t2=i(ur);Hu=n(t2,"Example:"),t2.forEach(o),Ru=c(Ce),v(xa.$$.fragment,Ce),Ce.forEach(o),Fe.forEach(o),bi=c(t),st=r(t,"H2",{class:!0});var dd=i(st);Et=r(dd,"A",{id:!0,class:!0,href:!0});var o2=i(Et);mr=r(o2,"SPAN",{});var a2=i(mr);v(Da.$$.fragment,a2),a2.forEach(o),o2.forEach(o),Qu=c(dd),fr=r(dd,"SPAN",{});var n2=i(fr);Gu=n(n2,"Data2VecTextForSequenceClassification"),n2.forEach(o),dd.forEach(o),Ti=c(t),E=r(t,"DIV",{class:!0});var he=i(E);v(Va.$$.fragment,he),Xu=c(he),gr=r(he,"P",{});var s2=i(gr);Uu=n(s2,`Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),s2.forEach(o),Ju=c(he),$a=r(he,"P",{});var ld=i($a);Ku=n(ld,"Data2VecText was proposed in "),Aa=r(ld,"A",{href:!0,rel:!0});var r2=i(Aa);Yu=n(r2,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),r2.forEach(o),Zu=n(ld,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),ld.forEach(o),em=c(he),Fa=r(he,"P",{});var cd=i(Fa);tm=n(cd,"This model inherits from "),Wn=r(cd,"A",{href:!0});var i2=i(Wn);om=n(i2,"PreTrainedModel"),i2.forEach(o),am=n(cd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),cd.forEach(o),nm=c(he),Ca=r(he,"P",{});var pd=i(Ca);sm=n(pd,"This model is also a PyTorch "),Ma=r(pd,"A",{href:!0,rel:!0});var d2=i(Ma);rm=n(d2,"torch.nn.Module"),d2.forEach(o),im=n(pd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pd.forEach(o),dm=c(he),C=r(he,"DIV",{class:!0});var re=i(C);v(qa.$$.fragment,re),lm=c(re),rt=r(re,"P",{});var ls=i(rt);cm=n(ls,"The "),In=r(ls,"A",{href:!0});var l2=i(In);pm=n(l2,"Data2VecTextForSequenceClassification"),l2.forEach(o),hm=n(ls," forward method, overrides the "),_r=r(ls,"CODE",{});var c2=i(_r);um=n(c2,"__call__"),c2.forEach(o),mm=n(ls," special method."),ls.forEach(o),fm=c(re),v(Pt.$$.fragment,re),gm=c(re),vr=r(re,"P",{});var p2=i(vr);_m=n(p2,"Example of single-label classification:"),p2.forEach(o),vm=c(re),v(ja.$$.fragment,re),bm=c(re),br=r(re,"P",{});var h2=i(br);Tm=n(h2,"Example of multi-label classification:"),h2.forEach(o),km=c(re),v(za.$$.fragment,re),re.forEach(o),he.forEach(o),ki=c(t),it=r(t,"H2",{class:!0});var hd=i(it);St=r(hd,"A",{id:!0,class:!0,href:!0});var u2=i(St);Tr=r(u2,"SPAN",{});var m2=i(Tr);v(Ea.$$.fragment,m2),m2.forEach(o),u2.forEach(o),wm=c(hd),kr=r(hd,"SPAN",{});var f2=i(kr);ym=n(f2,"Data2VecTextForMultipleChoice"),f2.forEach(o),hd.forEach(o),wi=c(t),P=r(t,"DIV",{class:!0});var ue=i(P);v(Pa.$$.fragment,ue),xm=c(ue),wr=r(ue,"P",{});var g2=i(wr);Dm=n(g2,`Data2VecText Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),g2.forEach(o),Vm=c(ue),Sa=r(ue,"P",{});var ud=i(Sa);$m=n(ud,"Data2VecText was proposed in "),La=r(ud,"A",{href:!0,rel:!0});var _2=i(La);Am=n(_2,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),_2.forEach(o),Fm=n(ud,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),ud.forEach(o),Cm=c(ue),Na=r(ue,"P",{});var md=i(Na);Mm=n(md,"This model inherits from "),On=r(md,"A",{href:!0});var v2=i(On);qm=n(v2,"PreTrainedModel"),v2.forEach(o),jm=n(md,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),md.forEach(o),zm=c(ue),Wa=r(ue,"P",{});var fd=i(Wa);Em=n(fd,"This model is also a PyTorch "),Ia=r(fd,"A",{href:!0,rel:!0});var b2=i(Ia);Pm=n(b2,"torch.nn.Module"),b2.forEach(o),Sm=n(fd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fd.forEach(o),Lm=c(ue),oe=r(ue,"DIV",{class:!0});var Me=i(oe);v(Oa.$$.fragment,Me),Nm=c(Me),dt=r(Me,"P",{});var cs=i(dt);Wm=n(cs,"The "),Bn=r(cs,"A",{href:!0});var T2=i(Bn);Im=n(T2,"Data2VecTextForMultipleChoice"),T2.forEach(o),Om=n(cs," forward method, overrides the "),yr=r(cs,"CODE",{});var k2=i(yr);Bm=n(k2,"__call__"),k2.forEach(o),Hm=n(cs," special method."),cs.forEach(o),Rm=c(Me),v(Lt.$$.fragment,Me),Qm=c(Me),xr=r(Me,"P",{});var w2=i(xr);Gm=n(w2,"Example:"),w2.forEach(o),Xm=c(Me),v(Ba.$$.fragment,Me),Me.forEach(o),ue.forEach(o),yi=c(t),lt=r(t,"H2",{class:!0});var gd=i(lt);Nt=r(gd,"A",{id:!0,class:!0,href:!0});var y2=i(Nt);Dr=r(y2,"SPAN",{});var x2=i(Dr);v(Ha.$$.fragment,x2),x2.forEach(o),y2.forEach(o),Um=c(gd),Vr=r(gd,"SPAN",{});var D2=i(Vr);Jm=n(D2,"Data2VecTextForTokenClassification"),D2.forEach(o),gd.forEach(o),xi=c(t),S=r(t,"DIV",{class:!0});var me=i(S);v(Ra.$$.fragment,me),Km=c(me),$r=r(me,"P",{});var V2=i($r);Ym=n(V2,`Data2VecText Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),V2.forEach(o),Zm=c(me),Qa=r(me,"P",{});var _d=i(Qa);ef=n(_d,"Data2VecText was proposed in "),Ga=r(_d,"A",{href:!0,rel:!0});var $2=i(Ga);tf=n($2,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),$2.forEach(o),of=n(_d,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),_d.forEach(o),af=c(me),Xa=r(me,"P",{});var vd=i(Xa);nf=n(vd,"This model inherits from "),Hn=r(vd,"A",{href:!0});var A2=i(Hn);sf=n(A2,"PreTrainedModel"),A2.forEach(o),rf=n(vd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vd.forEach(o),df=c(me),Ua=r(me,"P",{});var bd=i(Ua);lf=n(bd,"This model is also a PyTorch "),Ja=r(bd,"A",{href:!0,rel:!0});var F2=i(Ja);cf=n(F2,"torch.nn.Module"),F2.forEach(o),pf=n(bd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bd.forEach(o),hf=c(me),ae=r(me,"DIV",{class:!0});var qe=i(ae);v(Ka.$$.fragment,qe),uf=c(qe),ct=r(qe,"P",{});var ps=i(ct);mf=n(ps,"The "),Rn=r(ps,"A",{href:!0});var C2=i(Rn);ff=n(C2,"Data2VecTextForTokenClassification"),C2.forEach(o),gf=n(ps," forward method, overrides the "),Ar=r(ps,"CODE",{});var M2=i(Ar);_f=n(M2,"__call__"),M2.forEach(o),vf=n(ps," special method."),ps.forEach(o),bf=c(qe),v(Wt.$$.fragment,qe),Tf=c(qe),Fr=r(qe,"P",{});var q2=i(Fr);kf=n(q2,"Example:"),q2.forEach(o),wf=c(qe),v(Ya.$$.fragment,qe),qe.forEach(o),me.forEach(o),Di=c(t),pt=r(t,"H2",{class:!0});var Td=i(pt);It=r(Td,"A",{id:!0,class:!0,href:!0});var j2=i(It);Cr=r(j2,"SPAN",{});var z2=i(Cr);v(Za.$$.fragment,z2),z2.forEach(o),j2.forEach(o),yf=c(Td),Mr=r(Td,"SPAN",{});var E2=i(Mr);xf=n(E2,"Data2VecTextForQuestionAnswering"),E2.forEach(o),Td.forEach(o),Vi=c(t),L=r(t,"DIV",{class:!0});var fe=i(L);v(en.$$.fragment,fe),Df=c(fe),ht=r(fe,"P",{});var hs=i(ht);Vf=n(hs,`Data2VecText Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),qr=r(hs,"CODE",{});var P2=i(qr);$f=n(P2,"span start logits"),P2.forEach(o),Af=n(hs," and "),jr=r(hs,"CODE",{});var S2=i(jr);Ff=n(S2,"span end logits"),S2.forEach(o),Cf=n(hs,")."),hs.forEach(o),Mf=c(fe),tn=r(fe,"P",{});var kd=i(tn);qf=n(kd,"Data2VecText was proposed in "),on=r(kd,"A",{href:!0,rel:!0});var L2=i(on);jf=n(L2,`data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language`),L2.forEach(o),zf=n(kd,` by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli.`),kd.forEach(o),Ef=c(fe),an=r(fe,"P",{});var wd=i(an);Pf=n(wd,"This model inherits from "),Qn=r(wd,"A",{href:!0});var N2=i(Qn);Sf=n(N2,"PreTrainedModel"),N2.forEach(o),Lf=n(wd,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wd.forEach(o),Nf=c(fe),nn=r(fe,"P",{});var yd=i(nn);Wf=n(yd,"This model is also a PyTorch "),sn=r(yd,"A",{href:!0,rel:!0});var W2=i(sn);If=n(W2,"torch.nn.Module"),W2.forEach(o),Of=n(yd,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),yd.forEach(o),Bf=c(fe),ne=r(fe,"DIV",{class:!0});var je=i(ne);v(rn.$$.fragment,je),Hf=c(je),ut=r(je,"P",{});var us=i(ut);Rf=n(us,"The "),Gn=r(us,"A",{href:!0});var I2=i(Gn);Qf=n(I2,"Data2VecTextForQuestionAnswering"),I2.forEach(o),Gf=n(us," forward method, overrides the "),zr=r(us,"CODE",{});var O2=i(zr);Xf=n(O2,"__call__"),O2.forEach(o),Uf=n(us," special method."),us.forEach(o),Jf=c(je),v(Ot.$$.fragment,je),Kf=c(je),Er=r(je,"P",{});var B2=i(Er);Yf=n(B2,"Example:"),B2.forEach(o),Zf=c(je),v(dn.$$.fragment,je),je.forEach(o),fe.forEach(o),this.h()},h(){d(p,"name","hf:doc:metadata"),d(p,"content",JSON.stringify(iv)),d(y,"id","data2vec"),d(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(y,"href","#data2vec"),d(m,"class","relative group"),d(mt,"id","overview"),d(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(mt,"href","#overview"),d(ze,"class","relative group"),d(Rt,"href","https://arxiv.org/pdf/2202.03555"),d(Rt,"rel","nofollow"),d(Gt,"href","http://www.github.com/pytorch/fairseq/tree/master/examples/data2vec"),d(Gt,"rel","nofollow"),d(gn,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaModel"),d(Ut,"href","https://huggingface.co/edugp"),d(Ut,"rel","nofollow"),d(Jt,"href","https://github.com/pytorch/fairseq/tree/main/examples/data2vec"),d(Jt,"rel","nofollow"),d(gt,"id","transformers.Data2VecTextConfig"),d(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(gt,"href","#transformers.Data2VecTextConfig"),d(Ee,"class","relative group"),d(_n,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextModel"),d(vn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextModel"),d(Zt,"href","https://huggingface.co/facebook/data2vec-text-base"),d(Zt,"rel","nofollow"),d(bn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(Tn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(H,"class","docstring"),d(_t,"id","transformers.Data2VecAudioConfig"),d(_t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(_t,"href","#transformers.Data2VecAudioConfig"),d(Se,"class","relative group"),d(kn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel"),d(ao,"href","https://huggingface.co/facebook/data2vec-audio-base-960h"),d(ao,"rel","nofollow"),d(wn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(yn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(R,"class","docstring"),d(vt,"id","transformers.Data2VecAudioModel"),d(vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(vt,"href","#transformers.Data2VecAudioModel"),d(We,"class","relative group"),d(lo,"href","https://arxiv.org/pdf/2202.03555"),d(lo,"rel","nofollow"),d(xn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(ho,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(ho,"rel","nofollow"),d(Dn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioModel"),d(J,"class","docstring"),d(Q,"class","docstring"),d(Tt,"id","transformers.Data2VecAudioForAudioFrameClassification"),d(Tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Tt,"href","#transformers.Data2VecAudioForAudioFrameClassification"),d(Oe,"class","relative group"),d(vo,"href","https://arxiv.org/pdf/2202.03555"),d(vo,"rel","nofollow"),d(Vn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(ko,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(ko,"rel","nofollow"),d($n,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForAudioFrameClassification"),d(K,"class","docstring"),d(M,"class","docstring"),d(wt,"id","transformers.Data2VecAudioForCTC"),d(wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(wt,"href","#transformers.Data2VecAudioForCTC"),d(He,"class","relative group"),d(Vo,"href","https://arxiv.org/pdf/2202.03555"),d(Vo,"rel","nofollow"),d(An,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Fo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Fo,"rel","nofollow"),d(Fn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForCTC"),d(W,"class","docstring"),d(G,"class","docstring"),d(xt,"id","transformers.Data2VecAudioForSequenceClassification"),d(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(xt,"href","#transformers.Data2VecAudioForSequenceClassification"),d(Ge,"class","relative group"),d(Po,"href","https://arxiv.org/pdf/2202.03555"),d(Po,"rel","nofollow"),d(Cn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(No,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(No,"rel","nofollow"),d(Mn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForSequenceClassification"),d(I,"class","docstring"),d(q,"class","docstring"),d(Vt,"id","transformers.Data2VecAudioForXVector"),d(Vt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Vt,"href","#transformers.Data2VecAudioForXVector"),d(Ue,"class","relative group"),d(Qo,"href","https://arxiv.org/pdf/2202.03555"),d(Qo,"rel","nofollow"),d(qn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Uo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Uo,"rel","nofollow"),d(jn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecAudioForXVector"),d(Y,"class","docstring"),d(j,"class","docstring"),d(At,"id","transformers.Data2VecTextModel"),d(At,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(At,"href","#transformers.Data2VecTextModel"),d(Ke,"class","relative group"),d(ta,"href","https://arxiv.org/pdf/2202.03555"),d(ta,"rel","nofollow"),d(zn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(na,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(na,"rel","nofollow"),d(ra,"href","https://arxiv.org/abs/1706.03762"),d(ra,"rel","nofollow"),d(En,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextModel"),d(Z,"class","docstring"),d(F,"class","docstring"),d(Mt,"id","transformers.Data2VecTextForCausalLM"),d(Mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Mt,"href","#transformers.Data2VecTextForCausalLM"),d(Ze,"class","relative group"),d(pa,"href","https://arxiv.org/pdf/2202.03555"),d(pa,"rel","nofollow"),d(Pn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(ma,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(ma,"rel","nofollow"),d(Sn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForCausalLM"),d(ee,"class","docstring"),d(X,"class","docstring"),d(jt,"id","transformers.Data2VecTextForMaskedLM"),d(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(jt,"href","#transformers.Data2VecTextForMaskedLM"),d(ot,"class","relative group"),d(ba,"href","https://arxiv.org/pdf/2202.03555"),d(ba,"rel","nofollow"),d(Ln,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(wa,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(wa,"rel","nofollow"),d(Nn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForMaskedLM"),d(te,"class","docstring"),d(U,"class","docstring"),d(Et,"id","transformers.Data2VecTextForSequenceClassification"),d(Et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Et,"href","#transformers.Data2VecTextForSequenceClassification"),d(st,"class","relative group"),d(Aa,"href","https://arxiv.org/pdf/2202.03555"),d(Aa,"rel","nofollow"),d(Wn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Ma,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Ma,"rel","nofollow"),d(In,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForSequenceClassification"),d(C,"class","docstring"),d(E,"class","docstring"),d(St,"id","transformers.Data2VecTextForMultipleChoice"),d(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(St,"href","#transformers.Data2VecTextForMultipleChoice"),d(it,"class","relative group"),d(La,"href","https://arxiv.org/pdf/2202.03555"),d(La,"rel","nofollow"),d(On,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Ia,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Ia,"rel","nofollow"),d(Bn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForMultipleChoice"),d(oe,"class","docstring"),d(P,"class","docstring"),d(Nt,"id","transformers.Data2VecTextForTokenClassification"),d(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Nt,"href","#transformers.Data2VecTextForTokenClassification"),d(lt,"class","relative group"),d(Ga,"href","https://arxiv.org/pdf/2202.03555"),d(Ga,"rel","nofollow"),d(Hn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Ja,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Ja,"rel","nofollow"),d(Rn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForTokenClassification"),d(ae,"class","docstring"),d(S,"class","docstring"),d(It,"id","transformers.Data2VecTextForQuestionAnswering"),d(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(It,"href","#transformers.Data2VecTextForQuestionAnswering"),d(pt,"class","relative group"),d(on,"href","https://arxiv.org/pdf/2202.03555"),d(on,"rel","nofollow"),d(Qn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(sn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(sn,"rel","nofollow"),d(Gn,"href","/docs/transformers/pr_16143/en/model_doc/data2vec#transformers.Data2VecTextForQuestionAnswering"),d(ne,"class","docstring"),d(L,"class","docstring")},m(t,h){e(document.head,p),f(t,x,h),f(t,m,h),e(m,y),e(y,D),b(g,D,null),e(m,u),e(m,V),e(V,xd),f(t,Qr,h),f(t,ze,h),e(ze,mt),e(mt,ms),b(Ht,ms,null),e(ze,Dd),e(ze,fs),e(fs,Vd),f(t,Gr,h),f(t,ft,h),e(ft,$d),e(ft,Rt),e(Rt,Ad),e(ft,Fd),f(t,Xr,h),f(t,hn,h),e(hn,Cd),f(t,Ur,h),f(t,un,h),e(un,Qt),e(Qt,Md),e(Qt,Gt),e(Gt,qd),e(Qt,jd),f(t,Jr,h),f(t,mn,h),e(mn,zd),f(t,Kr,h),f(t,fn,h),e(fn,Xt),e(Xt,Ed),e(Xt,gn),e(gn,Pd),e(Xt,Sd),f(t,Yr,h),f(t,ve,h),e(ve,Ld),e(ve,Ut),e(Ut,Nd),e(ve,Wd),e(ve,Jt),e(Jt,Id),e(ve,Od),f(t,Zr,h),f(t,Ee,h),e(Ee,gt),e(gt,gs),b(Kt,gs,null),e(Ee,Bd),e(Ee,_s),e(_s,Hd),f(t,ei,h),f(t,H,h),b(Yt,H,null),e(H,Rd),e(H,_e),e(_e,Qd),e(_e,_n),e(_n,Gd),e(_e,Xd),e(_e,vn),e(vn,Ud),e(_e,Jd),e(_e,Zt),e(Zt,Kd),e(_e,Yd),e(H,Zd),e(H,Pe),e(Pe,el),e(Pe,bn),e(bn,tl),e(Pe,ol),e(Pe,Tn),e(Tn,al),e(Pe,nl),e(H,sl),e(H,vs),e(vs,rl),e(H,il),b(eo,H,null),f(t,ti,h),f(t,Se,h),e(Se,_t),e(_t,bs),b(to,bs,null),e(Se,dl),e(Se,Ts),e(Ts,ll),f(t,oi,h),f(t,R,h),b(oo,R,null),e(R,cl),e(R,Le),e(Le,pl),e(Le,kn),e(kn,hl),e(Le,ul),e(Le,ao),e(ao,ml),e(Le,fl),e(R,gl),e(R,Ne),e(Ne,_l),e(Ne,wn),e(wn,vl),e(Ne,bl),e(Ne,yn),e(yn,Tl),e(Ne,kl),e(R,wl),e(R,ks),e(ks,yl),e(R,xl),b(no,R,null),f(t,ai,h),f(t,We,h),e(We,vt),e(vt,ws),b(so,ws,null),e(We,Dl),e(We,ys),e(ys,Vl),f(t,ni,h),f(t,Q,h),b(ro,Q,null),e(Q,$l),e(Q,io),e(io,Al),e(io,lo),e(lo,Fl),e(io,Cl),e(Q,Ml),e(Q,co),e(co,ql),e(co,xn),e(xn,jl),e(co,zl),e(Q,El),e(Q,po),e(po,Pl),e(po,ho),e(ho,Sl),e(po,Ll),e(Q,Nl),e(Q,J),b(uo,J,null),e(J,Wl),e(J,Ie),e(Ie,Il),e(Ie,Dn),e(Dn,Ol),e(Ie,Bl),e(Ie,xs),e(xs,Hl),e(Ie,Rl),e(J,Ql),b(bt,J,null),e(J,Gl),e(J,Ds),e(Ds,Xl),e(J,Ul),b(mo,J,null),f(t,si,h),f(t,Oe,h),e(Oe,Tt),e(Tt,Vs),b(fo,Vs,null),e(Oe,Jl),e(Oe,$s),e($s,Kl),f(t,ri,h),f(t,M,h),b(go,M,null),e(M,Yl),e(M,As),e(As,Zl),e(M,ec),e(M,_o),e(_o,tc),e(_o,vo),e(vo,oc),e(_o,ac),e(M,nc),e(M,bo),e(bo,sc),e(bo,Vn),e(Vn,rc),e(bo,ic),e(M,dc),e(M,To),e(To,lc),e(To,ko),e(ko,cc),e(To,pc),e(M,hc),e(M,K),b(wo,K,null),e(K,uc),e(K,Be),e(Be,mc),e(Be,$n),e($n,fc),e(Be,gc),e(Be,Fs),e(Fs,_c),e(Be,vc),e(K,bc),b(kt,K,null),e(K,Tc),e(K,Cs),e(Cs,kc),e(K,wc),b(yo,K,null),f(t,ii,h),f(t,He,h),e(He,wt),e(wt,Ms),b(xo,Ms,null),e(He,yc),e(He,qs),e(qs,xc),f(t,di,h),f(t,G,h),b(Do,G,null),e(G,Dc),e(G,Re),e(Re,Vc),e(Re,js),e(js,$c),e(Re,Ac),e(Re,Vo),e(Vo,Fc),e(Re,Cc),e(G,Mc),e(G,$o),e($o,qc),e($o,An),e(An,jc),e($o,zc),e(G,Ec),e(G,Ao),e(Ao,Pc),e(Ao,Fo),e(Fo,Sc),e(Ao,Lc),e(G,Nc),e(G,W),b(Co,W,null),e(W,Wc),e(W,Qe),e(Qe,Ic),e(Qe,Fn),e(Fn,Oc),e(Qe,Bc),e(Qe,zs),e(zs,Hc),e(Qe,Rc),e(W,Qc),b(yt,W,null),e(W,Gc),e(W,Es),e(Es,Xc),e(W,Uc),b(Mo,W,null),e(W,Jc),b(qo,W,null),f(t,li,h),f(t,Ge,h),e(Ge,xt),e(xt,Ps),b(jo,Ps,null),e(Ge,Kc),e(Ge,Ss),e(Ss,Yc),f(t,ci,h),f(t,q,h),b(zo,q,null),e(q,Zc),e(q,Ls),e(Ls,ep),e(q,tp),e(q,Eo),e(Eo,op),e(Eo,Po),e(Po,ap),e(Eo,np),e(q,sp),e(q,So),e(So,rp),e(So,Cn),e(Cn,ip),e(So,dp),e(q,lp),e(q,Lo),e(Lo,cp),e(Lo,No),e(No,pp),e(Lo,hp),e(q,up),e(q,I),b(Wo,I,null),e(I,mp),e(I,Xe),e(Xe,fp),e(Xe,Mn),e(Mn,gp),e(Xe,_p),e(Xe,Ns),e(Ns,vp),e(Xe,bp),e(I,Tp),b(Dt,I,null),e(I,kp),e(I,Ws),e(Ws,wp),e(I,yp),b(Io,I,null),e(I,xp),b(Oo,I,null),f(t,pi,h),f(t,Ue,h),e(Ue,Vt),e(Vt,Is),b(Bo,Is,null),e(Ue,Dp),e(Ue,Os),e(Os,Vp),f(t,hi,h),f(t,j,h),b(Ho,j,null),e(j,$p),e(j,Bs),e(Bs,Ap),e(j,Fp),e(j,Ro),e(Ro,Cp),e(Ro,Qo),e(Qo,Mp),e(Ro,qp),e(j,jp),e(j,Go),e(Go,zp),e(Go,qn),e(qn,Ep),e(Go,Pp),e(j,Sp),e(j,Xo),e(Xo,Lp),e(Xo,Uo),e(Uo,Np),e(Xo,Wp),e(j,Ip),e(j,Y),b(Jo,Y,null),e(Y,Op),e(Y,Je),e(Je,Bp),e(Je,jn),e(jn,Hp),e(Je,Rp),e(Je,Hs),e(Hs,Qp),e(Je,Gp),e(Y,Xp),b($t,Y,null),e(Y,Up),e(Y,Rs),e(Rs,Jp),e(Y,Kp),b(Ko,Y,null),f(t,ui,h),f(t,Ke,h),e(Ke,At),e(At,Qs),b(Yo,Qs,null),e(Ke,Yp),e(Ke,Gs),e(Gs,Zp),f(t,mi,h),f(t,F,h),b(Zo,F,null),e(F,eh),e(F,ea),e(ea,th),e(ea,ta),e(ta,oh),e(ea,ah),e(F,nh),e(F,oa),e(oa,sh),e(oa,zn),e(zn,rh),e(oa,ih),e(F,dh),e(F,aa),e(aa,lh),e(aa,na),e(na,ch),e(aa,ph),e(F,hh),e(F,sa),e(sa,uh),e(sa,Xs),e(Xs,mh),e(sa,fh),e(F,gh),e(F,z),e(z,_h),e(z,Us),e(Us,vh),e(z,bh),e(z,Js),e(Js,Th),e(z,kh),e(z,Ks),e(Ks,wh),e(z,yh),e(z,Ys),e(Ys,xh),e(z,Dh),e(z,Zs),e(Zs,Vh),e(z,$h),e(z,er),e(er,Ah),e(z,Fh),e(F,Ch),e(F,Ft),e(Ft,Mh),e(Ft,tr),e(tr,qh),e(Ft,jh),e(Ft,ra),e(ra,zh),e(F,Eh),e(F,Z),b(ia,Z,null),e(Z,Ph),e(Z,Ye),e(Ye,Sh),e(Ye,En),e(En,Lh),e(Ye,Nh),e(Ye,or),e(or,Wh),e(Ye,Ih),e(Z,Oh),b(Ct,Z,null),e(Z,Bh),e(Z,ar),e(ar,Hh),e(Z,Rh),b(da,Z,null),f(t,fi,h),f(t,Ze,h),e(Ze,Mt),e(Mt,nr),b(la,nr,null),e(Ze,Qh),e(Ze,sr),e(sr,Gh),f(t,gi,h),f(t,X,h),b(ca,X,null),e(X,Xh),e(X,et),e(et,Uh),e(et,rr),e(rr,Jh),e(et,Kh),e(et,pa),e(pa,Yh),e(et,Zh),e(X,eu),e(X,ha),e(ha,tu),e(ha,Pn),e(Pn,ou),e(ha,au),e(X,nu),e(X,ua),e(ua,su),e(ua,ma),e(ma,ru),e(ua,iu),e(X,du),e(X,ee),b(fa,ee,null),e(ee,lu),e(ee,tt),e(tt,cu),e(tt,Sn),e(Sn,pu),e(tt,hu),e(tt,ir),e(ir,uu),e(tt,mu),e(ee,fu),b(qt,ee,null),e(ee,gu),e(ee,dr),e(dr,_u),e(ee,vu),b(ga,ee,null),f(t,_i,h),f(t,ot,h),e(ot,jt),e(jt,lr),b(_a,lr,null),e(ot,bu),e(ot,cr),e(cr,Tu),f(t,vi,h),f(t,U,h),b(va,U,null),e(U,ku),e(U,at),e(at,wu),e(at,pr),e(pr,yu),e(at,xu),e(at,ba),e(ba,Du),e(at,Vu),e(U,$u),e(U,Ta),e(Ta,Au),e(Ta,Ln),e(Ln,Fu),e(Ta,Cu),e(U,Mu),e(U,ka),e(ka,qu),e(ka,wa),e(wa,ju),e(ka,zu),e(U,Eu),e(U,te),b(ya,te,null),e(te,Pu),e(te,nt),e(nt,Su),e(nt,Nn),e(Nn,Lu),e(nt,Nu),e(nt,hr),e(hr,Wu),e(nt,Iu),e(te,Ou),b(zt,te,null),e(te,Bu),e(te,ur),e(ur,Hu),e(te,Ru),b(xa,te,null),f(t,bi,h),f(t,st,h),e(st,Et),e(Et,mr),b(Da,mr,null),e(st,Qu),e(st,fr),e(fr,Gu),f(t,Ti,h),f(t,E,h),b(Va,E,null),e(E,Xu),e(E,gr),e(gr,Uu),e(E,Ju),e(E,$a),e($a,Ku),e($a,Aa),e(Aa,Yu),e($a,Zu),e(E,em),e(E,Fa),e(Fa,tm),e(Fa,Wn),e(Wn,om),e(Fa,am),e(E,nm),e(E,Ca),e(Ca,sm),e(Ca,Ma),e(Ma,rm),e(Ca,im),e(E,dm),e(E,C),b(qa,C,null),e(C,lm),e(C,rt),e(rt,cm),e(rt,In),e(In,pm),e(rt,hm),e(rt,_r),e(_r,um),e(rt,mm),e(C,fm),b(Pt,C,null),e(C,gm),e(C,vr),e(vr,_m),e(C,vm),b(ja,C,null),e(C,bm),e(C,br),e(br,Tm),e(C,km),b(za,C,null),f(t,ki,h),f(t,it,h),e(it,St),e(St,Tr),b(Ea,Tr,null),e(it,wm),e(it,kr),e(kr,ym),f(t,wi,h),f(t,P,h),b(Pa,P,null),e(P,xm),e(P,wr),e(wr,Dm),e(P,Vm),e(P,Sa),e(Sa,$m),e(Sa,La),e(La,Am),e(Sa,Fm),e(P,Cm),e(P,Na),e(Na,Mm),e(Na,On),e(On,qm),e(Na,jm),e(P,zm),e(P,Wa),e(Wa,Em),e(Wa,Ia),e(Ia,Pm),e(Wa,Sm),e(P,Lm),e(P,oe),b(Oa,oe,null),e(oe,Nm),e(oe,dt),e(dt,Wm),e(dt,Bn),e(Bn,Im),e(dt,Om),e(dt,yr),e(yr,Bm),e(dt,Hm),e(oe,Rm),b(Lt,oe,null),e(oe,Qm),e(oe,xr),e(xr,Gm),e(oe,Xm),b(Ba,oe,null),f(t,yi,h),f(t,lt,h),e(lt,Nt),e(Nt,Dr),b(Ha,Dr,null),e(lt,Um),e(lt,Vr),e(Vr,Jm),f(t,xi,h),f(t,S,h),b(Ra,S,null),e(S,Km),e(S,$r),e($r,Ym),e(S,Zm),e(S,Qa),e(Qa,ef),e(Qa,Ga),e(Ga,tf),e(Qa,of),e(S,af),e(S,Xa),e(Xa,nf),e(Xa,Hn),e(Hn,sf),e(Xa,rf),e(S,df),e(S,Ua),e(Ua,lf),e(Ua,Ja),e(Ja,cf),e(Ua,pf),e(S,hf),e(S,ae),b(Ka,ae,null),e(ae,uf),e(ae,ct),e(ct,mf),e(ct,Rn),e(Rn,ff),e(ct,gf),e(ct,Ar),e(Ar,_f),e(ct,vf),e(ae,bf),b(Wt,ae,null),e(ae,Tf),e(ae,Fr),e(Fr,kf),e(ae,wf),b(Ya,ae,null),f(t,Di,h),f(t,pt,h),e(pt,It),e(It,Cr),b(Za,Cr,null),e(pt,yf),e(pt,Mr),e(Mr,xf),f(t,Vi,h),f(t,L,h),b(en,L,null),e(L,Df),e(L,ht),e(ht,Vf),e(ht,qr),e(qr,$f),e(ht,Af),e(ht,jr),e(jr,Ff),e(ht,Cf),e(L,Mf),e(L,tn),e(tn,qf),e(tn,on),e(on,jf),e(tn,zf),e(L,Ef),e(L,an),e(an,Pf),e(an,Qn),e(Qn,Sf),e(an,Lf),e(L,Nf),e(L,nn),e(nn,Wf),e(nn,sn),e(sn,If),e(nn,Of),e(L,Bf),e(L,ne),b(rn,ne,null),e(ne,Hf),e(ne,ut),e(ut,Rf),e(ut,Gn),e(Gn,Qf),e(ut,Gf),e(ut,zr),e(zr,Xf),e(ut,Uf),e(ne,Jf),b(Ot,ne,null),e(ne,Kf),e(ne,Er),e(Er,Yf),e(ne,Zf),b(dn,ne,null),$i=!0},p(t,[h]){const ln={};h&2&&(ln.$$scope={dirty:h,ctx:t}),bt.$set(ln);const Pr={};h&2&&(Pr.$$scope={dirty:h,ctx:t}),kt.$set(Pr);const Sr={};h&2&&(Sr.$$scope={dirty:h,ctx:t}),yt.$set(Sr);const Lr={};h&2&&(Lr.$$scope={dirty:h,ctx:t}),Dt.$set(Lr);const cn={};h&2&&(cn.$$scope={dirty:h,ctx:t}),$t.$set(cn);const Nr={};h&2&&(Nr.$$scope={dirty:h,ctx:t}),Ct.$set(Nr);const Wr={};h&2&&(Wr.$$scope={dirty:h,ctx:t}),qt.$set(Wr);const Ir={};h&2&&(Ir.$$scope={dirty:h,ctx:t}),zt.$set(Ir);const pn={};h&2&&(pn.$$scope={dirty:h,ctx:t}),Pt.$set(pn);const Or={};h&2&&(Or.$$scope={dirty:h,ctx:t}),Lt.$set(Or);const Br={};h&2&&(Br.$$scope={dirty:h,ctx:t}),Wt.$set(Br);const Hr={};h&2&&(Hr.$$scope={dirty:h,ctx:t}),Ot.$set(Hr)},i(t){$i||(T(g.$$.fragment,t),T(Ht.$$.fragment,t),T(Kt.$$.fragment,t),T(Yt.$$.fragment,t),T(eo.$$.fragment,t),T(to.$$.fragment,t),T(oo.$$.fragment,t),T(no.$$.fragment,t),T(so.$$.fragment,t),T(ro.$$.fragment,t),T(uo.$$.fragment,t),T(bt.$$.fragment,t),T(mo.$$.fragment,t),T(fo.$$.fragment,t),T(go.$$.fragment,t),T(wo.$$.fragment,t),T(kt.$$.fragment,t),T(yo.$$.fragment,t),T(xo.$$.fragment,t),T(Do.$$.fragment,t),T(Co.$$.fragment,t),T(yt.$$.fragment,t),T(Mo.$$.fragment,t),T(qo.$$.fragment,t),T(jo.$$.fragment,t),T(zo.$$.fragment,t),T(Wo.$$.fragment,t),T(Dt.$$.fragment,t),T(Io.$$.fragment,t),T(Oo.$$.fragment,t),T(Bo.$$.fragment,t),T(Ho.$$.fragment,t),T(Jo.$$.fragment,t),T($t.$$.fragment,t),T(Ko.$$.fragment,t),T(Yo.$$.fragment,t),T(Zo.$$.fragment,t),T(ia.$$.fragment,t),T(Ct.$$.fragment,t),T(da.$$.fragment,t),T(la.$$.fragment,t),T(ca.$$.fragment,t),T(fa.$$.fragment,t),T(qt.$$.fragment,t),T(ga.$$.fragment,t),T(_a.$$.fragment,t),T(va.$$.fragment,t),T(ya.$$.fragment,t),T(zt.$$.fragment,t),T(xa.$$.fragment,t),T(Da.$$.fragment,t),T(Va.$$.fragment,t),T(qa.$$.fragment,t),T(Pt.$$.fragment,t),T(ja.$$.fragment,t),T(za.$$.fragment,t),T(Ea.$$.fragment,t),T(Pa.$$.fragment,t),T(Oa.$$.fragment,t),T(Lt.$$.fragment,t),T(Ba.$$.fragment,t),T(Ha.$$.fragment,t),T(Ra.$$.fragment,t),T(Ka.$$.fragment,t),T(Wt.$$.fragment,t),T(Ya.$$.fragment,t),T(Za.$$.fragment,t),T(en.$$.fragment,t),T(rn.$$.fragment,t),T(Ot.$$.fragment,t),T(dn.$$.fragment,t),$i=!0)},o(t){k(g.$$.fragment,t),k(Ht.$$.fragment,t),k(Kt.$$.fragment,t),k(Yt.$$.fragment,t),k(eo.$$.fragment,t),k(to.$$.fragment,t),k(oo.$$.fragment,t),k(no.$$.fragment,t),k(so.$$.fragment,t),k(ro.$$.fragment,t),k(uo.$$.fragment,t),k(bt.$$.fragment,t),k(mo.$$.fragment,t),k(fo.$$.fragment,t),k(go.$$.fragment,t),k(wo.$$.fragment,t),k(kt.$$.fragment,t),k(yo.$$.fragment,t),k(xo.$$.fragment,t),k(Do.$$.fragment,t),k(Co.$$.fragment,t),k(yt.$$.fragment,t),k(Mo.$$.fragment,t),k(qo.$$.fragment,t),k(jo.$$.fragment,t),k(zo.$$.fragment,t),k(Wo.$$.fragment,t),k(Dt.$$.fragment,t),k(Io.$$.fragment,t),k(Oo.$$.fragment,t),k(Bo.$$.fragment,t),k(Ho.$$.fragment,t),k(Jo.$$.fragment,t),k($t.$$.fragment,t),k(Ko.$$.fragment,t),k(Yo.$$.fragment,t),k(Zo.$$.fragment,t),k(ia.$$.fragment,t),k(Ct.$$.fragment,t),k(da.$$.fragment,t),k(la.$$.fragment,t),k(ca.$$.fragment,t),k(fa.$$.fragment,t),k(qt.$$.fragment,t),k(ga.$$.fragment,t),k(_a.$$.fragment,t),k(va.$$.fragment,t),k(ya.$$.fragment,t),k(zt.$$.fragment,t),k(xa.$$.fragment,t),k(Da.$$.fragment,t),k(Va.$$.fragment,t),k(qa.$$.fragment,t),k(Pt.$$.fragment,t),k(ja.$$.fragment,t),k(za.$$.fragment,t),k(Ea.$$.fragment,t),k(Pa.$$.fragment,t),k(Oa.$$.fragment,t),k(Lt.$$.fragment,t),k(Ba.$$.fragment,t),k(Ha.$$.fragment,t),k(Ra.$$.fragment,t),k(Ka.$$.fragment,t),k(Wt.$$.fragment,t),k(Ya.$$.fragment,t),k(Za.$$.fragment,t),k(en.$$.fragment,t),k(rn.$$.fragment,t),k(Ot.$$.fragment,t),k(dn.$$.fragment,t),$i=!1},d(t){o(p),t&&o(x),t&&o(m),w(g),t&&o(Qr),t&&o(ze),w(Ht),t&&o(Gr),t&&o(ft),t&&o(Xr),t&&o(hn),t&&o(Ur),t&&o(un),t&&o(Jr),t&&o(mn),t&&o(Kr),t&&o(fn),t&&o(Yr),t&&o(ve),t&&o(Zr),t&&o(Ee),w(Kt),t&&o(ei),t&&o(H),w(Yt),w(eo),t&&o(ti),t&&o(Se),w(to),t&&o(oi),t&&o(R),w(oo),w(no),t&&o(ai),t&&o(We),w(so),t&&o(ni),t&&o(Q),w(ro),w(uo),w(bt),w(mo),t&&o(si),t&&o(Oe),w(fo),t&&o(ri),t&&o(M),w(go),w(wo),w(kt),w(yo),t&&o(ii),t&&o(He),w(xo),t&&o(di),t&&o(G),w(Do),w(Co),w(yt),w(Mo),w(qo),t&&o(li),t&&o(Ge),w(jo),t&&o(ci),t&&o(q),w(zo),w(Wo),w(Dt),w(Io),w(Oo),t&&o(pi),t&&o(Ue),w(Bo),t&&o(hi),t&&o(j),w(Ho),w(Jo),w($t),w(Ko),t&&o(ui),t&&o(Ke),w(Yo),t&&o(mi),t&&o(F),w(Zo),w(ia),w(Ct),w(da),t&&o(fi),t&&o(Ze),w(la),t&&o(gi),t&&o(X),w(ca),w(fa),w(qt),w(ga),t&&o(_i),t&&o(ot),w(_a),t&&o(vi),t&&o(U),w(va),w(ya),w(zt),w(xa),t&&o(bi),t&&o(st),w(Da),t&&o(Ti),t&&o(E),w(Va),w(qa),w(Pt),w(ja),w(za),t&&o(ki),t&&o(it),w(Ea),t&&o(wi),t&&o(P),w(Pa),w(Oa),w(Lt),w(Ba),t&&o(yi),t&&o(lt),w(Ha),t&&o(xi),t&&o(S),w(Ra),w(Ka),w(Wt),w(Ya),t&&o(Di),t&&o(pt),w(Za),t&&o(Vi),t&&o(L),w(en),w(rn),w(Ot),w(dn)}}}const iv={local:"data2vec",sections:[{local:"overview",title:"Overview"},{local:"transformers.Data2VecTextConfig",title:"Data2VecTextConfig"},{local:"transformers.Data2VecAudioConfig",title:"Data2VecAudioConfig"},{local:"transformers.Data2VecAudioModel",title:"Data2VecAudioModel"},{local:"transformers.Data2VecAudioForAudioFrameClassification",title:"Data2VecAudioForAudioFrameClassification"},{local:"transformers.Data2VecAudioForCTC",title:"Data2VecAudioForCTC"},{local:"transformers.Data2VecAudioForSequenceClassification",title:"Data2VecAudioForSequenceClassification"},{local:"transformers.Data2VecAudioForXVector",title:"Data2VecAudioForXVector"},{local:"transformers.Data2VecTextModel",title:"Data2VecTextModel"},{local:"transformers.Data2VecTextForCausalLM",title:"Data2VecTextForCausalLM"},{local:"transformers.Data2VecTextForMaskedLM",title:"Data2VecTextForMaskedLM"},{local:"transformers.Data2VecTextForSequenceClassification",title:"Data2VecTextForSequenceClassification"},{local:"transformers.Data2VecTextForMultipleChoice",title:"Data2VecTextForMultipleChoice"},{local:"transformers.Data2VecTextForTokenClassification",title:"Data2VecTextForTokenClassification"},{local:"transformers.Data2VecTextForQuestionAnswering",title:"Data2VecTextForQuestionAnswering"}],title:"Data2Vec"};function dv($,p,x){let{fw:m}=p;return $.$$set=y=>{"fw"in y&&x(0,m=y.fw)},[m]}class fv extends H2{constructor(p){super();R2(this,p,dv,rv,Q2,{fw:0})}}export{fv as default,iv as metadata};
293
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/segformer.mdx-50f6b8f8.js
import{S as Hc,i as Rc,s as Bc,e as o,k as d,w as u,t as s,M as Wc,c as a,d as t,m as c,a as n,x as _,h as i,b as l,N as Uc,F as e,g as f,y as v,q as b,o as S,B as w}from"../../chunks/vendor-4833417e.js";import{T as va}from"../../chunks/Tip-fffd6df1.js";import{D as re}from"../../chunks/Docstring-4f315ed9.js";import{C as ba}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as He}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Vc(Q){let h,$;return{c(){h=o("p"),$=s(`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`)},l(g){h=a(g,"P",{});var p=n(h);$=i(p,`NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass PIL images.`),p.forEach(t)},m(g,p){f(g,h,p),e(h,$)},d(g){g&&t(h)}}}function Gc(Q){let h,$,g,p,F;return{c(){h=o("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),p=s("Module"),F=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=a(T,"P",{});var E=n(h);$=i(E,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(E,"CODE",{});var L=n(g);p=i(L,"Module"),L.forEach(t),F=i(E,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),E.forEach(t)},m(T,E){f(T,h,E),e(h,$),e(h,g),e(g,p),e(h,F)},d(T){T&&t(h)}}}function Kc(Q){let h,$,g,p,F;return{c(){h=o("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),p=s("Module"),F=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=a(T,"P",{});var E=n(h);$=i(E,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(E,"CODE",{});var L=n(g);p=i(L,"Module"),L.forEach(t),F=i(E,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),E.forEach(t)},m(T,E){f(T,h,E),e(h,$),e(h,g),e(g,p),e(h,F)},d(T){T&&t(h)}}}function Jc(Q){let h,$,g,p,F;return{c(){h=o("p"),$=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),p=s("Module"),F=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(T){h=a(T,"P",{});var E=n(h);$=i(E,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=a(E,"CODE",{});var L=n(g);p=i(L,"Module"),L.forEach(t),F=i(E,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),E.forEach(t)},m(T,E){f(T,h,E),e(h,$),e(h,g),e(g,p),e(h,F)},d(T){T&&t(h)}}}function Xc(Q){let h,$,g,p,F,T,E,L,Sa,Do,me,$e,kr,Re,wa,Mr,Ta,zo,xe,Ea,Be,ya,$a,jo,It,xa,Ao,kt,Pr,Fa,Lo,Fe,Ia,We,ka,Ma,No,Ue,Pl,qo,oe,Pa,Ve,Ca,Da,Ge,za,ja,Oo,Mt,Aa,Ho,N,R,La,Pt,Na,qa,Ct,Oa,Ha,Dt,Ra,Ba,Ke,Wa,Ua,Va,Je,Ga,Xe,Ka,Ja,Xa,fe,Ya,zt,Za,Qa,Ye,en,tn,rn,y,on,jt,an,nn,Cr,sn,ln,Dr,dn,cn,zr,mn,fn,jr,hn,gn,Ar,pn,un,At,_n,vn,Lr,bn,Sn,Nr,wn,Tn,En,qr,yn,Ro,Ie,Or,I,Lt,Hr,$n,xn,Rr,Br,Fn,In,Wr,Ur,kn,Mn,Nt,Vr,Pn,Cn,qt,Gr,Dn,zn,Ot,Kr,jn,An,k,M,Ht,Ln,Nn,Jr,qn,On,Xr,Hn,Rn,Rt,Bn,Wn,Bt,Un,Vn,Wt,Gn,Kn,P,Ut,Jn,Xn,Yr,Yn,Zn,Zr,Qn,es,Vt,ts,rs,Gt,os,as,Kt,ns,ss,C,Jt,is,ls,Qr,ds,cs,eo,ms,fs,Xt,hs,gs,Yt,ps,us,Zt,_s,vs,D,Qt,bs,Ss,to,ws,Ts,ro,Es,ys,er,$s,xs,tr,Fs,Is,rr,ks,Ms,z,or,Ps,Cs,oo,Ds,zs,ao,js,As,ar,Ls,Ns,nr,qs,Os,sr,Hs,Rs,j,ir,Bs,Ws,no,Us,Vs,so,Gs,Ks,lr,Js,Xs,dr,Ys,Zs,cr,Qs,Bo,he,ke,io,Ze,ei,lo,ti,Wo,A,Qe,ri,ge,oi,mr,ai,ni,et,si,ii,li,pe,di,fr,ci,mi,hr,fi,hi,gi,co,pi,ui,tt,Uo,ue,Me,mo,rt,_i,fo,vi,Vo,B,ot,bi,ho,Si,wi,at,Ti,gr,Ei,yi,$i,ae,nt,xi,go,Fi,Ii,Pe,Go,_e,Ce,po,st,ki,uo,Mi,Ko,ee,it,Pi,lt,Ci,dt,Di,zi,ji,q,ct,Ai,ve,Li,pr,Ni,qi,_o,Oi,Hi,Ri,De,Bi,vo,Wi,Ui,mt,Jo,be,ze,bo,ft,Vi,So,Gi,Xo,Se,ht,Ki,ur,gt,Yo,we,je,wo,pt,Ji,To,Xi,Zo,W,ut,Yi,Eo,Zi,Qi,_t,el,vt,tl,rl,ol,O,bt,al,Te,nl,_r,sl,il,yo,ll,dl,cl,Ae,ml,$o,fl,hl,St,Qo,Ee,Le,xo,wt,gl,Fo,pl,ea,te,Tt,ul,Et,_l,yt,vl,bl,Sl,H,$t,wl,ye,Tl,vr,El,yl,Io,$l,xl,Fl,Ne,Il,ko,kl,Ml,xt,ta;return T=new He({}),Re=new He({}),Ze=new He({}),Qe=new re({props:{name:"class transformers.SegformerConfig",anchor:"transformers.SegformerConfig",parameters:[{name:"image_size",val:" = 224"},{name:"num_channels",val:" = 3"},{name:"num_encoder_blocks",val:" = 4"},{name:"depths",val:" = [2, 2, 2, 2]"},{name:"sr_ratios",val:" = [8, 4, 2, 1]"},{name:"hidden_sizes",val:" = [32, 64, 160, 256]"},{name:"downsampling_rates",val:" = [1, 4, 8, 16]"},{name:"patch_sizes",val:" = [7, 3, 3, 3]"},{name:"strides",val:" = [4, 2, 2, 2]"},{name:"num_attention_heads",val:" = [1, 2, 5, 8]"},{name:"mlp_ratios",val:" = [4, 4, 4, 4]"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.0"},{name:"classifier_dropout_prob",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"drop_path_rate",val:" = 0.1"},{name:"layer_norm_eps",val:" = 1e-06"},{name:"decoder_hidden_size",val:" = 256"},{name:"is_encoder_decoder",val:" = False"},{name:"semantic_loss_ignore_index",val:" = 255"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/configuration_segformer.py#L31",parametersDescription:[{anchor:"transformers.SegformerConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.SegformerConfig.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The number of input channels.`,name:"num_channels"},{anchor:"transformers.SegformerConfig.num_encoder_blocks",description:`<strong>num_encoder_blocks</strong> (<code>int</code>, <em>optional</em>, defaults to 4) &#x2014; The number of encoder blocks (i.e. stages in the Mix Transformer encoder).`,name:"num_encoder_blocks"},{anchor:"transformers.SegformerConfig.depths",description:`<strong>depths</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [2, 2, 2, 2]) &#x2014; The number of layers in each encoder block.`,name:"depths"},{anchor:"transformers.SegformerConfig.sr_ratios",description:`<strong>sr_ratios</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [8, 4, 2, 1]) &#x2014; Sequence reduction ratios in each encoder block.`,name:"sr_ratios"},{anchor:"transformers.SegformerConfig.hidden_sizes",description:`<strong>hidden_sizes</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [32, 64, 160, 256]) &#x2014; Dimension of each of the encoder blocks.`,name:"hidden_sizes"},{anchor:"transformers.SegformerConfig.downsampling_rates",description:`<strong>downsampling_rates</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [1, 4, 8, 16]) &#x2014; Downsample rate of the image resolution compared to the original image size before each encoder block.`,name:"downsampling_rates"},{anchor:"transformers.SegformerConfig.patch_sizes",description:`<strong>patch_sizes</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [7, 3, 3, 3]) &#x2014; Patch size before each encoder block.`,name:"patch_sizes"},{anchor:"transformers.SegformerConfig.strides",description:`<strong>strides</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [4, 2, 2, 2]) &#x2014; Stride before each encoder block.`,name:"strides"},{anchor:"transformers.SegformerConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [1, 2, 4, 8]) &#x2014; Number of attention heads for each attention layer in each block of the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.SegformerConfig.mlp_ratios",description:`<strong>mlp_ratios</strong> (<code>List[int]</code>, <em>optional</em>, defaults to [4, 4, 4, 4]) &#x2014; Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks.`,name:"mlp_ratios"},{anchor:"transformers.SegformerConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.SegformerConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.SegformerConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.SegformerConfig.classifier_dropout_prob",description:`<strong>classifier_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability before the classification head.`,name:"classifier_dropout_prob"},{anchor:"transformers.SegformerConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.SegformerConfig.drop_path_rate",description:`<strong>drop_path_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.`,name:"drop_path_rate"},{anchor:"transformers.SegformerConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.SegformerConfig.decoder_hidden_size",description:`<strong>decoder_hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The dimension of the all-MLP decode head.`,name:"decoder_hidden_size"},{anchor:"transformers.SegformerConfig.semantic_loss_ignore_index",description:`<strong>semantic_loss_ignore_index</strong> (<code>int</code>, <em>optional</em>, defaults to 255) &#x2014; The index that is ignored by the loss function of the semantic segmentation model.`,name:"semantic_loss_ignore_index"}]}}),tt=new ba({props:{code:`from transformers import SegformerModel, SegformerConfig # Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration configuration = SegformerConfig() # Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration model = SegformerModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerModel, SegformerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a SegFormer nvidia/segformer-b0-finetuned-ade-512-512 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = SegformerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the nvidia/segformer-b0-finetuned-ade-512-512 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),rt=new He({}),ot=new re({props:{name:"class transformers.SegformerFeatureExtractor",anchor:"transformers.SegformerFeatureExtractor",parameters:[{name:"do_resize",val:" = True"},{name:"size",val:" = 512"},{name:"resample",val:" = 2"},{name:"do_normalize",val:" = True"},{name:"image_mean",val:" = None"},{name:"image_std",val:" = None"},{name:"reduce_labels",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/feature_extraction_segformer.py#L37",parametersDescription:[{anchor:"transformers.SegformerFeatureExtractor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to resize the input based on a certain <code>size</code>.`,name:"do_resize"},{anchor:"transformers.SegformerFeatureExtractor.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple(int)</code>, <em>optional</em>, defaults to 512) &#x2014; Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an integer is provided, then the input will be resized to (size, size). Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"size"},{anchor:"transformers.SegformerFeatureExtractor.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; An optional resampling filter. This can be one of <code>PIL.Image.NEAREST</code>, <code>PIL.Image.BOX</code>, <code>PIL.Image.BILINEAR</code>, <code>PIL.Image.HAMMING</code>, <code>PIL.Image.BICUBIC</code> or <code>PIL.Image.LANCZOS</code>. Only has an effect if <code>do_resize</code> is set to <code>True</code>.`,name:"resample"},{anchor:"transformers.SegformerFeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to normalize the input with mean and standard deviation.`,name:"do_normalize"},{anchor:"transformers.SegformerFeatureExtractor.image_mean",description:`<strong>image_mean</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.485, 0.456, 0.406]</code>) &#x2014; The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.`,name:"image_mean"},{anchor:"transformers.SegformerFeatureExtractor.image_std",description:`<strong>image_std</strong> (<code>int</code>, <em>optional</em>, defaults to <code>[0.229, 0.224, 0.225]</code>) &#x2014; The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std.`,name:"image_std"},{anchor:"transformers.SegformerFeatureExtractor.reduce_labels",description:`<strong>reduce_labels</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.`,name:"reduce_labels"}]}}),nt=new re({props:{name:"__call__",anchor:"transformers.SegformerFeatureExtractor.__call__",parameters:[{name:"images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]]"},{name:"segmentation_maps",val:": typing.Union[PIL.Image.Image, numpy.ndarray, ForwardRef('torch.Tensor'), typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[ForwardRef('torch.Tensor')]] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/feature_extraction_segformer.py#L90",parametersDescription:[{anchor:"transformers.SegformerFeatureExtractor.__call__.images",description:`<strong>images</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>) &#x2014; The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is the number of channels, H and W are image height and width.`,name:"images"},{anchor:"transformers.SegformerFeatureExtractor.__call__.segmentation_maps",description:`<strong>segmentation_maps</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, <code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014; Optionally, the corresponding semantic segmentation maps with the pixel-wise annotations.`,name:"segmentation_maps"},{anchor:"transformers.SegformerFeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>, defaults to <code>&apos;np&apos;</code>) &#x2014; If set, will return tensors of a particular framework. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return NumPy <code>np.ndarray</code> objects.</li> <li><code>&apos;jax&apos;</code>: Return JAX <code>jnp.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a> with the following fields:</p> <ul> <li><strong>pixel_values</strong> \u2014 Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width).</li> <li><strong>labels</strong> \u2014 Optional labels to be fed to a model (when <code>segmentation_maps</code> are provided)</li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),Pe=new va({props:{warning:"&lcub;true}",$$slots:{default:[Vc]},$$scope:{ctx:Q}}}),st=new He({}),it=new re({props:{name:"class transformers.SegformerModel",anchor:"transformers.SegformerModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L477",parametersDescription:[{anchor:"transformers.SegformerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ct=new re({props:{name:"forward",anchor:"transformers.SegformerModel.forward",parameters:[{name:"pixel_values",val:": FloatTensor"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L496",parametersDescription:[{anchor:"transformers.SegformerModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor">SegformerFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor.__call__">SegformerFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.SegformerModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SegformerModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SegformerModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerConfig" >SegformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),De=new va({props:{$$slots:{default:[Gc]},$$scope:{ctx:Q}}}),mt=new ba({props:{code:`from transformers import SegformerFeatureExtractor, SegformerModel import torch from datasets import load_dataset dataset = load_dataset("huggingface/cats-image") image = dataset["test"]["image"][0] feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/mit-b0") model = SegformerModel.from_pretrained("nvidia/mit-b0") inputs = feature_extractor(image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state list(last_hidden_states.shape)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerFeatureExtractor, SegformerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;huggingface/cats-image&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = dataset[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-string">&quot;image&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = SegformerFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;nvidia/mit-b0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerModel.from_pretrained(<span class="hljs-string">&quot;nvidia/mit-b0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">256</span>, <span class="hljs-number">16</span>, <span class="hljs-number">16</span>]`}}),ft=new He({}),ht=new re({props:{name:"class transformers.SegformerDecodeHead",anchor:"transformers.SegformerDecodeHead",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L650"}}),gt=new re({props:{name:"forward",anchor:"transformers.SegformerDecodeHead.forward",parameters:[{name:"encoder_hidden_states",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L675"}}),pt=new He({}),ut=new re({props:{name:"class transformers.SegformerForImageClassification",anchor:"transformers.SegformerForImageClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L543",parametersDescription:[{anchor:"transformers.SegformerForImageClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),bt=new re({props:{name:"forward",anchor:"transformers.SegformerForImageClassification.forward",parameters:[{name:"pixel_values",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L556",parametersDescription:[{anchor:"transformers.SegformerForImageClassification.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor">SegformerFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor.__call__">SegformerFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.SegformerForImageClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SegformerForImageClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SegformerForImageClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SegformerForImageClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the image classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerConfig" >SegformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ae=new va({props:{$$slots:{default:[Kc]},$$scope:{ctx:Q}}}),St=new ba({props:{code:`from transformers import SegformerFeatureExtractor, SegformerForImageClassification import torch from datasets import load_dataset dataset = load_dataset("huggingface/cats-image") image = dataset["test"]["image"][0] feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/mit-b0") model = SegformerForImageClassification.from_pretrained("nvidia/mit-b0") inputs = feature_extractor(image, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits # model predicts one of the 1000 ImageNet classes predicted_label = logits.argmax(-1).item() print(model.config.id2label[predicted_label])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerFeatureExtractor, SegformerForImageClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;huggingface/cats-image&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>image = dataset[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-string">&quot;image&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = SegformerFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;nvidia/mit-b0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerForImageClassification.from_pretrained(<span class="hljs-string">&quot;nvidia/mit-b0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># model predicts one of the 1000 ImageNet classes</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(model.config.id2label[predicted_label]) tabby, tabby cat`}}),wt=new He({}),Tt=new re({props:{name:"class transformers.SegformerForSemanticSegmentation",anchor:"transformers.SegformerForSemanticSegmentation",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L712",parametersDescription:[{anchor:"transformers.SegformerForSemanticSegmentation.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerConfig">SegformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),$t=new re({props:{name:"forward",anchor:"transformers.SegformerForSemanticSegmentation.forward",parameters:[{name:"pixel_values",val:": FloatTensor"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/segformer/modeling_segformer.py#L721",parametersDescription:[{anchor:"transformers.SegformerForSemanticSegmentation.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor">SegformerFeatureExtractor</a>. See <a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor.__call__">SegformerFeatureExtractor.<strong>call</strong>()</a> for details.`,name:"pixel_values"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.SegformerForSemanticSegmentation.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, height, width)</code>, <em>optional</em>) &#x2014; Ground truth semantic segmentation maps for computing the loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels &gt; 1</code>, a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.modeling_outputs.SemanticSegmentationModelOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerConfig" >SegformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels, logits_height, logits_width)</code>) \u2014 Classification scores for each pixel.</p> <Tip warning={true}> <p>The logits returned do not necessarily have the same size as the <code>pixel_values</code> passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed.</p> </Tip> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, patch_size, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, patch_size, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.modeling_outputs.SemanticSegmentationModelOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new va({props:{$$slots:{default:[Jc]},$$scope:{ctx:Q}}}),xt=new ba({props:{code:`from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation from PIL import Image import requests feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # shape (batch_size, num_labels, height, width)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> SegformerFeatureExtractor, SegformerForSemanticSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = SegformerFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;nvidia/segformer-b0-finetuned-ade-512-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = SegformerForSemanticSegmentation.from_pretrained(<span class="hljs-string">&quot;nvidia/segformer-b0-finetuned-ade-512-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-comment"># shape (batch_size, num_labels, height, width)</span>`}}),{c(){h=o("meta"),$=d(),g=o("h1"),p=o("a"),F=o("span"),u(T.$$.fragment),E=d(),L=o("span"),Sa=s("SegFormer"),Do=d(),me=o("h2"),$e=o("a"),kr=o("span"),u(Re.$$.fragment),wa=d(),Mr=o("span"),Ta=s("Overview"),zo=d(),xe=o("p"),Ea=s("The SegFormer model was proposed in "),Be=o("a"),ya=s("SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),$a=s(` by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. The model consists of a hierarchical Transformer encoder and a lightweight all-MLP decode head to achieve great results on image segmentation benchmarks such as ADE20K and Cityscapes.`),jo=d(),It=o("p"),xa=s("The abstract from the paper is the following:"),Ao=d(),kt=o("p"),Pr=o("em"),Fa=s(`We present SegFormer, a simple, efficient yet powerful semantic segmentation framework which unifies Transformers with lightweight multilayer perception (MLP) decoders. SegFormer has two appealing features: 1) SegFormer comprises a novel hierarchically structured Transformer encoder which outputs multiscale features. It does not need positional encoding, thereby avoiding the interpolation of positional codes which leads to decreased performance when the testing resolution differs from training. 2) SegFormer avoids complex decoders. The proposed MLP decoder aggregates information from different layers, and thus combining both local attention and global attention to render powerful representations. We show that this simple and lightweight design is the key to efficient segmentation on Transformers. We scale our approach up to obtain a series of models from SegFormer-B0 to SegFormer-B5, reaching significantly better performance and efficiency than previous counterparts. For example, SegFormer-B4 achieves 50.3% mIoU on ADE20K with 64M parameters, being 5x smaller and 2.2% better than the previous best method. Our best model, SegFormer-B5, achieves 84.0% mIoU on Cityscapes validation set and shows excellent zero-shot robustness on Cityscapes-C.`),Lo=d(),Fe=o("p"),Ia=s("The figure below illustrates the architecture of SegFormer. Taken from the "),We=o("a"),ka=s("original paper"),Ma=s("."),No=d(),Ue=o("img"),qo=d(),oe=o("p"),Pa=s("This model was contributed by "),Ve=o("a"),Ca=s("nielsr"),Da=s(". The original code can be found "),Ge=o("a"),za=s("here"),ja=s("."),Oo=d(),Mt=o("p"),Aa=s("Tips:"),Ho=d(),N=o("ul"),R=o("li"),La=s(`SegFormer consists of a hierarchical Transformer encoder, and a lightweight all-MLP decode head. `),Pt=o("a"),Na=s("SegformerModel"),qa=s(` is the hierarchical Transformer encoder (which in the paper is also referred to as Mix Transformer or MiT). `),Ct=o("a"),Oa=s("SegformerForSemanticSegmentation"),Ha=s(` adds the all-MLP decode head on top to perform semantic segmentation of images. In addition, there\u2019s `),Dt=o("a"),Ra=s("SegformerForImageClassification"),Ba=s(` which can be used to - you guessed it - classify images. The authors of SegFormer first pre-trained the Transformer encoder on ImageNet-1k to classify images. Next, they throw away the classification head, and replace it by the all-MLP decode head. Next, they fine-tune the model altogether on ADE20K, Cityscapes and COCO-stuff, which are important benchmarks for semantic segmentation. All checkpoints can be found on the `),Ke=o("a"),Wa=s("hub"),Ua=s("."),Va=d(),Je=o("li"),Ga=s("The quickest way to get started with SegFormer is by checking the "),Xe=o("a"),Ka=s("example notebooks"),Ja=s(` (which showcase both inference and fine-tuning on custom data).`),Xa=d(),fe=o("li"),Ya=s("One can use "),zt=o("a"),Za=s("SegformerFeatureExtractor"),Qa=s(` to prepare images and corresponding segmentation maps for the model. Note that this feature extractor is fairly basic and does not include all data augmentations used in the original paper. The original preprocessing pipelines (for the ADE20k dataset for instance) can be found `),Ye=o("a"),en=s("here"),tn=s(`. The most important preprocessing step is that images and segmentation maps are randomly cropped and padded to the same size, such as 512x512 or 640x640, after which they are normalized.`),rn=d(),y=o("li"),on=s("One additional thing to keep in mind is that one can initialize "),jt=o("a"),an=s("SegformerFeatureExtractor"),nn=s(` with `),Cr=o("code"),sn=s("reduce_labels"),ln=s(" set to "),Dr=o("code"),dn=s("True"),cn=s(" or "),zr=o("code"),mn=s("False"),fn=s(`. In some datasets (like ADE20k), the 0 index is used in the annotated segmentation maps for background. However, ADE20k doesn\u2019t include the \u201Cbackground\u201D class in its 150 labels. Therefore, `),jr=o("code"),hn=s("reduce_labels"),gn=s(` is used to reduce all labels by 1, and to make sure no loss is computed for the background class (i.e. it replaces 0 in the annotated maps by 255, which is the `),Ar=o("em"),pn=s("ignore_index"),un=s(` of the loss function used by `),At=o("a"),_n=s("SegformerForSemanticSegmentation"),vn=s(`). However, other datasets use the 0 index as background class and include this class as part of all labels. In that case, `),Lr=o("code"),bn=s("reduce_labels"),Sn=s(` should be set to `),Nr=o("code"),wn=s("False"),Tn=s(", as loss should also be computed for the background class."),En=d(),qr=o("li"),yn=s("As most models, SegFormer comes in different sizes, the details of which can be found in the table below."),Ro=d(),Ie=o("table"),Or=o("thead"),I=o("tr"),Lt=o("th"),Hr=o("strong"),$n=s("Model variant"),xn=d(),Rr=o("th"),Br=o("strong"),Fn=s("Depths"),In=d(),Wr=o("th"),Ur=o("strong"),kn=s("Hidden sizes"),Mn=d(),Nt=o("th"),Vr=o("strong"),Pn=s("Decoder hidden size"),Cn=d(),qt=o("th"),Gr=o("strong"),Dn=s("Params (M)"),zn=d(),Ot=o("th"),Kr=o("strong"),jn=s("ImageNet-1k Top 1"),An=d(),k=o("tbody"),M=o("tr"),Ht=o("td"),Ln=s("MiT-b0"),Nn=d(),Jr=o("td"),qn=s("[2, 2, 2, 2]"),On=d(),Xr=o("td"),Hn=s("[32, 64, 160, 256]"),Rn=d(),Rt=o("td"),Bn=s("256"),Wn=d(),Bt=o("td"),Un=s("3.7"),Vn=d(),Wt=o("td"),Gn=s("70.5"),Kn=d(),P=o("tr"),Ut=o("td"),Jn=s("MiT-b1"),Xn=d(),Yr=o("td"),Yn=s("[2, 2, 2, 2]"),Zn=d(),Zr=o("td"),Qn=s("[64, 128, 320, 512]"),es=d(),Vt=o("td"),ts=s("256"),rs=d(),Gt=o("td"),os=s("14.0"),as=d(),Kt=o("td"),ns=s("78.7"),ss=d(),C=o("tr"),Jt=o("td"),is=s("MiT-b2"),ls=d(),Qr=o("td"),ds=s("[3, 4, 6, 3]"),cs=d(),eo=o("td"),ms=s("[64, 128, 320, 512]"),fs=d(),Xt=o("td"),hs=s("768"),gs=d(),Yt=o("td"),ps=s("25.4"),us=d(),Zt=o("td"),_s=s("81.6"),vs=d(),D=o("tr"),Qt=o("td"),bs=s("MiT-b3"),Ss=d(),to=o("td"),ws=s("[3, 4, 18, 3]"),Ts=d(),ro=o("td"),Es=s("[64, 128, 320, 512]"),ys=d(),er=o("td"),$s=s("768"),xs=d(),tr=o("td"),Fs=s("45.2"),Is=d(),rr=o("td"),ks=s("83.1"),Ms=d(),z=o("tr"),or=o("td"),Ps=s("MiT-b4"),Cs=d(),oo=o("td"),Ds=s("[3, 8, 27, 3]"),zs=d(),ao=o("td"),js=s("[64, 128, 320, 512]"),As=d(),ar=o("td"),Ls=s("768"),Ns=d(),nr=o("td"),qs=s("62.6"),Os=d(),sr=o("td"),Hs=s("83.6"),Rs=d(),j=o("tr"),ir=o("td"),Bs=s("MiT-b5"),Ws=d(),no=o("td"),Us=s("[3, 6, 40, 3]"),Vs=d(),so=o("td"),Gs=s("[64, 128, 320, 512]"),Ks=d(),lr=o("td"),Js=s("768"),Xs=d(),dr=o("td"),Ys=s("82.0"),Zs=d(),cr=o("td"),Qs=s("83.8"),Bo=d(),he=o("h2"),ke=o("a"),io=o("span"),u(Ze.$$.fragment),ei=d(),lo=o("span"),ti=s("SegformerConfig"),Wo=d(),A=o("div"),u(Qe.$$.fragment),ri=d(),ge=o("p"),oi=s("This is the configuration class to store the configuration of a "),mr=o("a"),ai=s("SegformerModel"),ni=s(`. It is used to instantiate an SegFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SegFormer `),et=o("a"),si=s("nvidia/segformer-b0-finetuned-ade-512-512"),ii=s(` architecture.`),li=d(),pe=o("p"),di=s("Configuration objects inherit from "),fr=o("a"),ci=s("PretrainedConfig"),mi=s(` and can be used to control the model outputs. Read the documentation from `),hr=o("a"),fi=s("PretrainedConfig"),hi=s(" for more information."),gi=d(),co=o("p"),pi=s("Example:"),ui=d(),u(tt.$$.fragment),Uo=d(),ue=o("h2"),Me=o("a"),mo=o("span"),u(rt.$$.fragment),_i=d(),fo=o("span"),vi=s("SegformerFeatureExtractor"),Vo=d(),B=o("div"),u(ot.$$.fragment),bi=d(),ho=o("p"),Si=s("Constructs a SegFormer feature extractor."),wi=d(),at=o("p"),Ti=s("This feature extractor inherits from "),gr=o("a"),Ei=s("FeatureExtractionMixin"),yi=s(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),$i=d(),ae=o("div"),u(nt.$$.fragment),xi=d(),go=o("p"),Fi=s("Main method to prepare for the model one or several image(s) and optional corresponding segmentation maps."),Ii=d(),u(Pe.$$.fragment),Go=d(),_e=o("h2"),Ce=o("a"),po=o("span"),u(st.$$.fragment),ki=d(),uo=o("span"),Mi=s("SegformerModel"),Ko=d(),ee=o("div"),u(it.$$.fragment),Pi=d(),lt=o("p"),Ci=s(`The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. This model is a PyTorch `),dt=o("a"),Di=s("torch.nn.Module"),zi=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ji=d(),q=o("div"),u(ct.$$.fragment),Ai=d(),ve=o("p"),Li=s("The "),pr=o("a"),Ni=s("SegformerModel"),qi=s(" forward method, overrides the "),_o=o("code"),Oi=s("__call__"),Hi=s(" special method."),Ri=d(),u(De.$$.fragment),Bi=d(),vo=o("p"),Wi=s("Example:"),Ui=d(),u(mt.$$.fragment),Jo=d(),be=o("h2"),ze=o("a"),bo=o("span"),u(ft.$$.fragment),Vi=d(),So=o("span"),Gi=s("SegformerDecodeHead"),Xo=d(),Se=o("div"),u(ht.$$.fragment),Ki=d(),ur=o("div"),u(gt.$$.fragment),Yo=d(),we=o("h2"),je=o("a"),wo=o("span"),u(pt.$$.fragment),Ji=d(),To=o("span"),Xi=s("SegformerForImageClassification"),Zo=d(),W=o("div"),u(ut.$$.fragment),Yi=d(),Eo=o("p"),Zi=s(`SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet.`),Qi=d(),_t=o("p"),el=s("This model is a PyTorch "),vt=o("a"),tl=s("torch.nn.Module"),rl=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ol=d(),O=o("div"),u(bt.$$.fragment),al=d(),Te=o("p"),nl=s("The "),_r=o("a"),sl=s("SegformerForImageClassification"),il=s(" forward method, overrides the "),yo=o("code"),ll=s("__call__"),dl=s(" special method."),cl=d(),u(Ae.$$.fragment),ml=d(),$o=o("p"),fl=s("Example:"),hl=d(),u(St.$$.fragment),Qo=d(),Ee=o("h2"),Le=o("a"),xo=o("span"),u(wt.$$.fragment),gl=d(),Fo=o("span"),pl=s("SegformerForSemanticSegmentation"),ea=d(),te=o("div"),u(Tt.$$.fragment),ul=d(),Et=o("p"),_l=s(`SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes. This model is a PyTorch `),yt=o("a"),vl=s("torch.nn.Module"),bl=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sl=d(),H=o("div"),u($t.$$.fragment),wl=d(),ye=o("p"),Tl=s("The "),vr=o("a"),El=s("SegformerForSemanticSegmentation"),yl=s(" forward method, overrides the "),Io=o("code"),$l=s("__call__"),xl=s(" special method."),Fl=d(),u(Ne.$$.fragment),Il=d(),ko=o("p"),kl=s("Examples:"),Ml=d(),u(xt.$$.fragment),this.h()},l(r){const m=Wc('[data-svelte="svelte-1phssyn"]',document.head);h=a(m,"META",{name:!0,content:!0}),m.forEach(t),$=c(r),g=a(r,"H1",{class:!0});var Ft=n(g);p=a(Ft,"A",{id:!0,class:!0,href:!0});var Mo=n(p);F=a(Mo,"SPAN",{});var Po=n(F);_(T.$$.fragment,Po),Po.forEach(t),Mo.forEach(t),E=c(Ft),L=a(Ft,"SPAN",{});var Co=n(L);Sa=i(Co,"SegFormer"),Co.forEach(t),Ft.forEach(t),Do=c(r),me=a(r,"H2",{class:!0});var ra=n(me);$e=a(ra,"A",{id:!0,class:!0,href:!0});var Cl=n($e);kr=a(Cl,"SPAN",{});var Dl=n(kr);_(Re.$$.fragment,Dl),Dl.forEach(t),Cl.forEach(t),wa=c(ra),Mr=a(ra,"SPAN",{});var zl=n(Mr);Ta=i(zl,"Overview"),zl.forEach(t),ra.forEach(t),zo=c(r),xe=a(r,"P",{});var oa=n(xe);Ea=i(oa,"The SegFormer model was proposed in "),Be=a(oa,"A",{href:!0,rel:!0});var jl=n(Be);ya=i(jl,"SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),jl.forEach(t),$a=i(oa,` by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. The model consists of a hierarchical Transformer encoder and a lightweight all-MLP decode head to achieve great results on image segmentation benchmarks such as ADE20K and Cityscapes.`),oa.forEach(t),jo=c(r),It=a(r,"P",{});var Al=n(It);xa=i(Al,"The abstract from the paper is the following:"),Al.forEach(t),Ao=c(r),kt=a(r,"P",{});var Ll=n(kt);Pr=a(Ll,"EM",{});var Nl=n(Pr);Fa=i(Nl,`We present SegFormer, a simple, efficient yet powerful semantic segmentation framework which unifies Transformers with lightweight multilayer perception (MLP) decoders. SegFormer has two appealing features: 1) SegFormer comprises a novel hierarchically structured Transformer encoder which outputs multiscale features. It does not need positional encoding, thereby avoiding the interpolation of positional codes which leads to decreased performance when the testing resolution differs from training. 2) SegFormer avoids complex decoders. The proposed MLP decoder aggregates information from different layers, and thus combining both local attention and global attention to render powerful representations. We show that this simple and lightweight design is the key to efficient segmentation on Transformers. We scale our approach up to obtain a series of models from SegFormer-B0 to SegFormer-B5, reaching significantly better performance and efficiency than previous counterparts. For example, SegFormer-B4 achieves 50.3% mIoU on ADE20K with 64M parameters, being 5x smaller and 2.2% better than the previous best method. Our best model, SegFormer-B5, achieves 84.0% mIoU on Cityscapes validation set and shows excellent zero-shot robustness on Cityscapes-C.`),Nl.forEach(t),Ll.forEach(t),Lo=c(r),Fe=a(r,"P",{});var aa=n(Fe);Ia=i(aa,"The figure below illustrates the architecture of SegFormer. Taken from the "),We=a(aa,"A",{href:!0,rel:!0});var ql=n(We);ka=i(ql,"original paper"),ql.forEach(t),Ma=i(aa,"."),aa.forEach(t),No=c(r),Ue=a(r,"IMG",{width:!0,src:!0}),qo=c(r),oe=a(r,"P",{});var br=n(oe);Pa=i(br,"This model was contributed by "),Ve=a(br,"A",{href:!0,rel:!0});var Ol=n(Ve);Ca=i(Ol,"nielsr"),Ol.forEach(t),Da=i(br,". The original code can be found "),Ge=a(br,"A",{href:!0,rel:!0});var Hl=n(Ge);za=i(Hl,"here"),Hl.forEach(t),ja=i(br,"."),br.forEach(t),Oo=c(r),Mt=a(r,"P",{});var Rl=n(Mt);Aa=i(Rl,"Tips:"),Rl.forEach(t),Ho=c(r),N=a(r,"UL",{});var ne=n(N);R=a(ne,"LI",{});var se=n(R);La=i(se,`SegFormer consists of a hierarchical Transformer encoder, and a lightweight all-MLP decode head. `),Pt=a(se,"A",{href:!0});var Bl=n(Pt);Na=i(Bl,"SegformerModel"),Bl.forEach(t),qa=i(se,` is the hierarchical Transformer encoder (which in the paper is also referred to as Mix Transformer or MiT). `),Ct=a(se,"A",{href:!0});var Wl=n(Ct);Oa=i(Wl,"SegformerForSemanticSegmentation"),Wl.forEach(t),Ha=i(se,` adds the all-MLP decode head on top to perform semantic segmentation of images. In addition, there\u2019s `),Dt=a(se,"A",{href:!0});var Ul=n(Dt);Ra=i(Ul,"SegformerForImageClassification"),Ul.forEach(t),Ba=i(se,` which can be used to - you guessed it - classify images. The authors of SegFormer first pre-trained the Transformer encoder on ImageNet-1k to classify images. Next, they throw away the classification head, and replace it by the all-MLP decode head. Next, they fine-tune the model altogether on ADE20K, Cityscapes and COCO-stuff, which are important benchmarks for semantic segmentation. All checkpoints can be found on the `),Ke=a(se,"A",{href:!0,rel:!0});var Vl=n(Ke);Wa=i(Vl,"hub"),Vl.forEach(t),Ua=i(se,"."),se.forEach(t),Va=c(ne),Je=a(ne,"LI",{});var na=n(Je);Ga=i(na,"The quickest way to get started with SegFormer is by checking the "),Xe=a(na,"A",{href:!0,rel:!0});var Gl=n(Xe);Ka=i(Gl,"example notebooks"),Gl.forEach(t),Ja=i(na,` (which showcase both inference and fine-tuning on custom data).`),na.forEach(t),Xa=c(ne),fe=a(ne,"LI",{});var Sr=n(fe);Ya=i(Sr,"One can use "),zt=a(Sr,"A",{href:!0});var Kl=n(zt);Za=i(Kl,"SegformerFeatureExtractor"),Kl.forEach(t),Qa=i(Sr,` to prepare images and corresponding segmentation maps for the model. Note that this feature extractor is fairly basic and does not include all data augmentations used in the original paper. The original preprocessing pipelines (for the ADE20k dataset for instance) can be found `),Ye=a(Sr,"A",{href:!0,rel:!0});var Jl=n(Ye);en=i(Jl,"here"),Jl.forEach(t),tn=i(Sr,`. The most important preprocessing step is that images and segmentation maps are randomly cropped and padded to the same size, such as 512x512 or 640x640, after which they are normalized.`),Sr.forEach(t),rn=c(ne),y=a(ne,"LI",{});var x=n(y);on=i(x,"One additional thing to keep in mind is that one can initialize "),jt=a(x,"A",{href:!0});var Xl=n(jt);an=i(Xl,"SegformerFeatureExtractor"),Xl.forEach(t),nn=i(x,` with `),Cr=a(x,"CODE",{});var Yl=n(Cr);sn=i(Yl,"reduce_labels"),Yl.forEach(t),ln=i(x," set to "),Dr=a(x,"CODE",{});var Zl=n(Dr);dn=i(Zl,"True"),Zl.forEach(t),cn=i(x," or "),zr=a(x,"CODE",{});var Ql=n(zr);mn=i(Ql,"False"),Ql.forEach(t),fn=i(x,`. In some datasets (like ADE20k), the 0 index is used in the annotated segmentation maps for background. However, ADE20k doesn\u2019t include the \u201Cbackground\u201D class in its 150 labels. Therefore, `),jr=a(x,"CODE",{});var ed=n(jr);hn=i(ed,"reduce_labels"),ed.forEach(t),gn=i(x,` is used to reduce all labels by 1, and to make sure no loss is computed for the background class (i.e. it replaces 0 in the annotated maps by 255, which is the `),Ar=a(x,"EM",{});var td=n(Ar);pn=i(td,"ignore_index"),td.forEach(t),un=i(x,` of the loss function used by `),At=a(x,"A",{href:!0});var rd=n(At);_n=i(rd,"SegformerForSemanticSegmentation"),rd.forEach(t),vn=i(x,`). However, other datasets use the 0 index as background class and include this class as part of all labels. In that case, `),Lr=a(x,"CODE",{});var od=n(Lr);bn=i(od,"reduce_labels"),od.forEach(t),Sn=i(x,` should be set to `),Nr=a(x,"CODE",{});var ad=n(Nr);wn=i(ad,"False"),ad.forEach(t),Tn=i(x,", as loss should also be computed for the background class."),x.forEach(t),En=c(ne),qr=a(ne,"LI",{});var nd=n(qr);yn=i(nd,"As most models, SegFormer comes in different sizes, the details of which can be found in the table below."),nd.forEach(t),ne.forEach(t),Ro=c(r),Ie=a(r,"TABLE",{});var sa=n(Ie);Or=a(sa,"THEAD",{});var sd=n(Or);I=a(sd,"TR",{});var U=n(I);Lt=a(U,"TH",{align:!0});var id=n(Lt);Hr=a(id,"STRONG",{});var ld=n(Hr);$n=i(ld,"Model variant"),ld.forEach(t),id.forEach(t),xn=c(U),Rr=a(U,"TH",{});var dd=n(Rr);Br=a(dd,"STRONG",{});var cd=n(Br);Fn=i(cd,"Depths"),cd.forEach(t),dd.forEach(t),In=c(U),Wr=a(U,"TH",{});var md=n(Wr);Ur=a(md,"STRONG",{});var fd=n(Ur);kn=i(fd,"Hidden sizes"),fd.forEach(t),md.forEach(t),Mn=c(U),Nt=a(U,"TH",{align:!0});var hd=n(Nt);Vr=a(hd,"STRONG",{});var gd=n(Vr);Pn=i(gd,"Decoder hidden size"),gd.forEach(t),hd.forEach(t),Cn=c(U),qt=a(U,"TH",{align:!0});var pd=n(qt);Gr=a(pd,"STRONG",{});var ud=n(Gr);Dn=i(ud,"Params (M)"),ud.forEach(t),pd.forEach(t),zn=c(U),Ot=a(U,"TH",{align:!0});var _d=n(Ot);Kr=a(_d,"STRONG",{});var vd=n(Kr);jn=i(vd,"ImageNet-1k Top 1"),vd.forEach(t),_d.forEach(t),U.forEach(t),sd.forEach(t),An=c(sa),k=a(sa,"TBODY",{});var V=n(k);M=a(V,"TR",{});var G=n(M);Ht=a(G,"TD",{align:!0});var bd=n(Ht);Ln=i(bd,"MiT-b0"),bd.forEach(t),Nn=c(G),Jr=a(G,"TD",{});var Sd=n(Jr);qn=i(Sd,"[2, 2, 2, 2]"),Sd.forEach(t),On=c(G),Xr=a(G,"TD",{});var wd=n(Xr);Hn=i(wd,"[32, 64, 160, 256]"),wd.forEach(t),Rn=c(G),Rt=a(G,"TD",{align:!0});var Td=n(Rt);Bn=i(Td,"256"),Td.forEach(t),Wn=c(G),Bt=a(G,"TD",{align:!0});var Ed=n(Bt);Un=i(Ed,"3.7"),Ed.forEach(t),Vn=c(G),Wt=a(G,"TD",{align:!0});var yd=n(Wt);Gn=i(yd,"70.5"),yd.forEach(t),G.forEach(t),Kn=c(V),P=a(V,"TR",{});var K=n(P);Ut=a(K,"TD",{align:!0});var $d=n(Ut);Jn=i($d,"MiT-b1"),$d.forEach(t),Xn=c(K),Yr=a(K,"TD",{});var xd=n(Yr);Yn=i(xd,"[2, 2, 2, 2]"),xd.forEach(t),Zn=c(K),Zr=a(K,"TD",{});var Fd=n(Zr);Qn=i(Fd,"[64, 128, 320, 512]"),Fd.forEach(t),es=c(K),Vt=a(K,"TD",{align:!0});var Id=n(Vt);ts=i(Id,"256"),Id.forEach(t),rs=c(K),Gt=a(K,"TD",{align:!0});var kd=n(Gt);os=i(kd,"14.0"),kd.forEach(t),as=c(K),Kt=a(K,"TD",{align:!0});var Md=n(Kt);ns=i(Md,"78.7"),Md.forEach(t),K.forEach(t),ss=c(V),C=a(V,"TR",{});var J=n(C);Jt=a(J,"TD",{align:!0});var Pd=n(Jt);is=i(Pd,"MiT-b2"),Pd.forEach(t),ls=c(J),Qr=a(J,"TD",{});var Cd=n(Qr);ds=i(Cd,"[3, 4, 6, 3]"),Cd.forEach(t),cs=c(J),eo=a(J,"TD",{});var Dd=n(eo);ms=i(Dd,"[64, 128, 320, 512]"),Dd.forEach(t),fs=c(J),Xt=a(J,"TD",{align:!0});var zd=n(Xt);hs=i(zd,"768"),zd.forEach(t),gs=c(J),Yt=a(J,"TD",{align:!0});var jd=n(Yt);ps=i(jd,"25.4"),jd.forEach(t),us=c(J),Zt=a(J,"TD",{align:!0});var Ad=n(Zt);_s=i(Ad,"81.6"),Ad.forEach(t),J.forEach(t),vs=c(V),D=a(V,"TR",{});var X=n(D);Qt=a(X,"TD",{align:!0});var Ld=n(Qt);bs=i(Ld,"MiT-b3"),Ld.forEach(t),Ss=c(X),to=a(X,"TD",{});var Nd=n(to);ws=i(Nd,"[3, 4, 18, 3]"),Nd.forEach(t),Ts=c(X),ro=a(X,"TD",{});var qd=n(ro);Es=i(qd,"[64, 128, 320, 512]"),qd.forEach(t),ys=c(X),er=a(X,"TD",{align:!0});var Od=n(er);$s=i(Od,"768"),Od.forEach(t),xs=c(X),tr=a(X,"TD",{align:!0});var Hd=n(tr);Fs=i(Hd,"45.2"),Hd.forEach(t),Is=c(X),rr=a(X,"TD",{align:!0});var Rd=n(rr);ks=i(Rd,"83.1"),Rd.forEach(t),X.forEach(t),Ms=c(V),z=a(V,"TR",{});var Y=n(z);or=a(Y,"TD",{align:!0});var Bd=n(or);Ps=i(Bd,"MiT-b4"),Bd.forEach(t),Cs=c(Y),oo=a(Y,"TD",{});var Wd=n(oo);Ds=i(Wd,"[3, 8, 27, 3]"),Wd.forEach(t),zs=c(Y),ao=a(Y,"TD",{});var Ud=n(ao);js=i(Ud,"[64, 128, 320, 512]"),Ud.forEach(t),As=c(Y),ar=a(Y,"TD",{align:!0});var Vd=n(ar);Ls=i(Vd,"768"),Vd.forEach(t),Ns=c(Y),nr=a(Y,"TD",{align:!0});var Gd=n(nr);qs=i(Gd,"62.6"),Gd.forEach(t),Os=c(Y),sr=a(Y,"TD",{align:!0});var Kd=n(sr);Hs=i(Kd,"83.6"),Kd.forEach(t),Y.forEach(t),Rs=c(V),j=a(V,"TR",{});var Z=n(j);ir=a(Z,"TD",{align:!0});var Jd=n(ir);Bs=i(Jd,"MiT-b5"),Jd.forEach(t),Ws=c(Z),no=a(Z,"TD",{});var Xd=n(no);Us=i(Xd,"[3, 6, 40, 3]"),Xd.forEach(t),Vs=c(Z),so=a(Z,"TD",{});var Yd=n(so);Gs=i(Yd,"[64, 128, 320, 512]"),Yd.forEach(t),Ks=c(Z),lr=a(Z,"TD",{align:!0});var Zd=n(lr);Js=i(Zd,"768"),Zd.forEach(t),Xs=c(Z),dr=a(Z,"TD",{align:!0});var Qd=n(dr);Ys=i(Qd,"82.0"),Qd.forEach(t),Zs=c(Z),cr=a(Z,"TD",{align:!0});var ec=n(cr);Qs=i(ec,"83.8"),ec.forEach(t),Z.forEach(t),V.forEach(t),sa.forEach(t),Bo=c(r),he=a(r,"H2",{class:!0});var ia=n(he);ke=a(ia,"A",{id:!0,class:!0,href:!0});var tc=n(ke);io=a(tc,"SPAN",{});var rc=n(io);_(Ze.$$.fragment,rc),rc.forEach(t),tc.forEach(t),ei=c(ia),lo=a(ia,"SPAN",{});var oc=n(lo);ti=i(oc,"SegformerConfig"),oc.forEach(t),ia.forEach(t),Wo=c(r),A=a(r,"DIV",{class:!0});var ie=n(A);_(Qe.$$.fragment,ie),ri=c(ie),ge=a(ie,"P",{});var wr=n(ge);oi=i(wr,"This is the configuration class to store the configuration of a "),mr=a(wr,"A",{href:!0});var ac=n(mr);ai=i(ac,"SegformerModel"),ac.forEach(t),ni=i(wr,`. It is used to instantiate an SegFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SegFormer `),et=a(wr,"A",{href:!0,rel:!0});var nc=n(et);si=i(nc,"nvidia/segformer-b0-finetuned-ade-512-512"),nc.forEach(t),ii=i(wr,` architecture.`),wr.forEach(t),li=c(ie),pe=a(ie,"P",{});var Tr=n(pe);di=i(Tr,"Configuration objects inherit from "),fr=a(Tr,"A",{href:!0});var sc=n(fr);ci=i(sc,"PretrainedConfig"),sc.forEach(t),mi=i(Tr,` and can be used to control the model outputs. Read the documentation from `),hr=a(Tr,"A",{href:!0});var ic=n(hr);fi=i(ic,"PretrainedConfig"),ic.forEach(t),hi=i(Tr," for more information."),Tr.forEach(t),gi=c(ie),co=a(ie,"P",{});var lc=n(co);pi=i(lc,"Example:"),lc.forEach(t),ui=c(ie),_(tt.$$.fragment,ie),ie.forEach(t),Uo=c(r),ue=a(r,"H2",{class:!0});var la=n(ue);Me=a(la,"A",{id:!0,class:!0,href:!0});var dc=n(Me);mo=a(dc,"SPAN",{});var cc=n(mo);_(rt.$$.fragment,cc),cc.forEach(t),dc.forEach(t),_i=c(la),fo=a(la,"SPAN",{});var mc=n(fo);vi=i(mc,"SegformerFeatureExtractor"),mc.forEach(t),la.forEach(t),Vo=c(r),B=a(r,"DIV",{class:!0});var qe=n(B);_(ot.$$.fragment,qe),bi=c(qe),ho=a(qe,"P",{});var fc=n(ho);Si=i(fc,"Constructs a SegFormer feature extractor."),fc.forEach(t),wi=c(qe),at=a(qe,"P",{});var da=n(at);Ti=i(da,"This feature extractor inherits from "),gr=a(da,"A",{href:!0});var hc=n(gr);Ei=i(hc,"FeatureExtractionMixin"),hc.forEach(t),yi=i(da,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),da.forEach(t),$i=c(qe),ae=a(qe,"DIV",{class:!0});var Er=n(ae);_(nt.$$.fragment,Er),xi=c(Er),go=a(Er,"P",{});var gc=n(go);Fi=i(gc,"Main method to prepare for the model one or several image(s) and optional corresponding segmentation maps."),gc.forEach(t),Ii=c(Er),_(Pe.$$.fragment,Er),Er.forEach(t),qe.forEach(t),Go=c(r),_e=a(r,"H2",{class:!0});var ca=n(_e);Ce=a(ca,"A",{id:!0,class:!0,href:!0});var pc=n(Ce);po=a(pc,"SPAN",{});var uc=n(po);_(st.$$.fragment,uc),uc.forEach(t),pc.forEach(t),ki=c(ca),uo=a(ca,"SPAN",{});var _c=n(uo);Mi=i(_c,"SegformerModel"),_c.forEach(t),ca.forEach(t),Ko=c(r),ee=a(r,"DIV",{class:!0});var yr=n(ee);_(it.$$.fragment,yr),Pi=c(yr),lt=a(yr,"P",{});var ma=n(lt);Ci=i(ma,`The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. This model is a PyTorch `),dt=a(ma,"A",{href:!0,rel:!0});var vc=n(dt);Di=i(vc,"torch.nn.Module"),vc.forEach(t),zi=i(ma,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ma.forEach(t),ji=c(yr),q=a(yr,"DIV",{class:!0});var le=n(q);_(ct.$$.fragment,le),Ai=c(le),ve=a(le,"P",{});var $r=n(ve);Li=i($r,"The "),pr=a($r,"A",{href:!0});var bc=n(pr);Ni=i(bc,"SegformerModel"),bc.forEach(t),qi=i($r," forward method, overrides the "),_o=a($r,"CODE",{});var Sc=n(_o);Oi=i(Sc,"__call__"),Sc.forEach(t),Hi=i($r," special method."),$r.forEach(t),Ri=c(le),_(De.$$.fragment,le),Bi=c(le),vo=a(le,"P",{});var wc=n(vo);Wi=i(wc,"Example:"),wc.forEach(t),Ui=c(le),_(mt.$$.fragment,le),le.forEach(t),yr.forEach(t),Jo=c(r),be=a(r,"H2",{class:!0});var fa=n(be);ze=a(fa,"A",{id:!0,class:!0,href:!0});var Tc=n(ze);bo=a(Tc,"SPAN",{});var Ec=n(bo);_(ft.$$.fragment,Ec),Ec.forEach(t),Tc.forEach(t),Vi=c(fa),So=a(fa,"SPAN",{});var yc=n(So);Gi=i(yc,"SegformerDecodeHead"),yc.forEach(t),fa.forEach(t),Xo=c(r),Se=a(r,"DIV",{class:!0});var ha=n(Se);_(ht.$$.fragment,ha),Ki=c(ha),ur=a(ha,"DIV",{class:!0});var $c=n(ur);_(gt.$$.fragment,$c),$c.forEach(t),ha.forEach(t),Yo=c(r),we=a(r,"H2",{class:!0});var ga=n(we);je=a(ga,"A",{id:!0,class:!0,href:!0});var xc=n(je);wo=a(xc,"SPAN",{});var Fc=n(wo);_(pt.$$.fragment,Fc),Fc.forEach(t),xc.forEach(t),Ji=c(ga),To=a(ga,"SPAN",{});var Ic=n(To);Xi=i(Ic,"SegformerForImageClassification"),Ic.forEach(t),ga.forEach(t),Zo=c(r),W=a(r,"DIV",{class:!0});var Oe=n(W);_(ut.$$.fragment,Oe),Yi=c(Oe),Eo=a(Oe,"P",{});var kc=n(Eo);Zi=i(kc,`SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet.`),kc.forEach(t),Qi=c(Oe),_t=a(Oe,"P",{});var pa=n(_t);el=i(pa,"This model is a PyTorch "),vt=a(pa,"A",{href:!0,rel:!0});var Mc=n(vt);tl=i(Mc,"torch.nn.Module"),Mc.forEach(t),rl=i(pa,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),pa.forEach(t),ol=c(Oe),O=a(Oe,"DIV",{class:!0});var de=n(O);_(bt.$$.fragment,de),al=c(de),Te=a(de,"P",{});var xr=n(Te);nl=i(xr,"The "),_r=a(xr,"A",{href:!0});var Pc=n(_r);sl=i(Pc,"SegformerForImageClassification"),Pc.forEach(t),il=i(xr," forward method, overrides the "),yo=a(xr,"CODE",{});var Cc=n(yo);ll=i(Cc,"__call__"),Cc.forEach(t),dl=i(xr," special method."),xr.forEach(t),cl=c(de),_(Ae.$$.fragment,de),ml=c(de),$o=a(de,"P",{});var Dc=n($o);fl=i(Dc,"Example:"),Dc.forEach(t),hl=c(de),_(St.$$.fragment,de),de.forEach(t),Oe.forEach(t),Qo=c(r),Ee=a(r,"H2",{class:!0});var ua=n(Ee);Le=a(ua,"A",{id:!0,class:!0,href:!0});var zc=n(Le);xo=a(zc,"SPAN",{});var jc=n(xo);_(wt.$$.fragment,jc),jc.forEach(t),zc.forEach(t),gl=c(ua),Fo=a(ua,"SPAN",{});var Ac=n(Fo);pl=i(Ac,"SegformerForSemanticSegmentation"),Ac.forEach(t),ua.forEach(t),ea=c(r),te=a(r,"DIV",{class:!0});var Fr=n(te);_(Tt.$$.fragment,Fr),ul=c(Fr),Et=a(Fr,"P",{});var _a=n(Et);_l=i(_a,`SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes. This model is a PyTorch `),yt=a(_a,"A",{href:!0,rel:!0});var Lc=n(yt);vl=i(Lc,"torch.nn.Module"),Lc.forEach(t),bl=i(_a,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_a.forEach(t),Sl=c(Fr),H=a(Fr,"DIV",{class:!0});var ce=n(H);_($t.$$.fragment,ce),wl=c(ce),ye=a(ce,"P",{});var Ir=n(ye);Tl=i(Ir,"The "),vr=a(Ir,"A",{href:!0});var Nc=n(vr);El=i(Nc,"SegformerForSemanticSegmentation"),Nc.forEach(t),yl=i(Ir," forward method, overrides the "),Io=a(Ir,"CODE",{});var qc=n(Io);$l=i(qc,"__call__"),qc.forEach(t),xl=i(Ir," special method."),Ir.forEach(t),Fl=c(ce),_(Ne.$$.fragment,ce),Il=c(ce),ko=a(ce,"P",{});var Oc=n(ko);kl=i(Oc,"Examples:"),Oc.forEach(t),Ml=c(ce),_(xt.$$.fragment,ce),ce.forEach(t),Fr.forEach(t),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(Yc)),l(p,"id","segformer"),l(p,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(p,"href","#segformer"),l(g,"class","relative group"),l($e,"id","overview"),l($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l($e,"href","#overview"),l(me,"class","relative group"),l(Be,"href","https://arxiv.org/abs/2105.15203"),l(Be,"rel","nofollow"),l(We,"href","https://arxiv.org/abs/2105.15203"),l(We,"rel","nofollow"),l(Ue,"width","600"),Uc(Ue.src,Pl="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/segformer_architecture.png")||l(Ue,"src",Pl),l(Ve,"href","https://huggingface.co/nielsr"),l(Ve,"rel","nofollow"),l(Ge,"href","https://github.com/NVlabs/SegFormer"),l(Ge,"rel","nofollow"),l(Pt,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerModel"),l(Ct,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerForSemanticSegmentation"),l(Dt,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerForImageClassification"),l(Ke,"href","https://huggingface.co/models?other=segformer"),l(Ke,"rel","nofollow"),l(Xe,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/SegFormer"),l(Xe,"rel","nofollow"),l(zt,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor"),l(Ye,"href","https://github.com/NVlabs/SegFormer/blob/master/local_configs/_base_/datasets/ade20k_repeat.py"),l(Ye,"rel","nofollow"),l(jt,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerFeatureExtractor"),l(At,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerForSemanticSegmentation"),l(Lt,"align","center"),l(Nt,"align","center"),l(qt,"align","center"),l(Ot,"align","center"),l(Ht,"align","center"),l(Rt,"align","center"),l(Bt,"align","center"),l(Wt,"align","center"),l(Ut,"align","center"),l(Vt,"align","center"),l(Gt,"align","center"),l(Kt,"align","center"),l(Jt,"align","center"),l(Xt,"align","center"),l(Yt,"align","center"),l(Zt,"align","center"),l(Qt,"align","center"),l(er,"align","center"),l(tr,"align","center"),l(rr,"align","center"),l(or,"align","center"),l(ar,"align","center"),l(nr,"align","center"),l(sr,"align","center"),l(ir,"align","center"),l(lr,"align","center"),l(dr,"align","center"),l(cr,"align","center"),l(ke,"id","transformers.SegformerConfig"),l(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ke,"href","#transformers.SegformerConfig"),l(he,"class","relative group"),l(mr,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerModel"),l(et,"href","https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512"),l(et,"rel","nofollow"),l(fr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(hr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(A,"class","docstring"),l(Me,"id","transformers.SegformerFeatureExtractor"),l(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Me,"href","#transformers.SegformerFeatureExtractor"),l(ue,"class","relative group"),l(gr,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin"),l(ae,"class","docstring"),l(B,"class","docstring"),l(Ce,"id","transformers.SegformerModel"),l(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ce,"href","#transformers.SegformerModel"),l(_e,"class","relative group"),l(dt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(dt,"rel","nofollow"),l(pr,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerModel"),l(q,"class","docstring"),l(ee,"class","docstring"),l(ze,"id","transformers.SegformerDecodeHead"),l(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ze,"href","#transformers.SegformerDecodeHead"),l(be,"class","relative group"),l(ur,"class","docstring"),l(Se,"class","docstring"),l(je,"id","transformers.SegformerForImageClassification"),l(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(je,"href","#transformers.SegformerForImageClassification"),l(we,"class","relative group"),l(vt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(vt,"rel","nofollow"),l(_r,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerForImageClassification"),l(O,"class","docstring"),l(W,"class","docstring"),l(Le,"id","transformers.SegformerForSemanticSegmentation"),l(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Le,"href","#transformers.SegformerForSemanticSegmentation"),l(Ee,"class","relative group"),l(yt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(yt,"rel","nofollow"),l(vr,"href","/docs/transformers/pr_16143/en/model_doc/segformer#transformers.SegformerForSemanticSegmentation"),l(H,"class","docstring"),l(te,"class","docstring")},m(r,m){e(document.head,h),f(r,$,m),f(r,g,m),e(g,p),e(p,F),v(T,F,null),e(g,E),e(g,L),e(L,Sa),f(r,Do,m),f(r,me,m),e(me,$e),e($e,kr),v(Re,kr,null),e(me,wa),e(me,Mr),e(Mr,Ta),f(r,zo,m),f(r,xe,m),e(xe,Ea),e(xe,Be),e(Be,ya),e(xe,$a),f(r,jo,m),f(r,It,m),e(It,xa),f(r,Ao,m),f(r,kt,m),e(kt,Pr),e(Pr,Fa),f(r,Lo,m),f(r,Fe,m),e(Fe,Ia),e(Fe,We),e(We,ka),e(Fe,Ma),f(r,No,m),f(r,Ue,m),f(r,qo,m),f(r,oe,m),e(oe,Pa),e(oe,Ve),e(Ve,Ca),e(oe,Da),e(oe,Ge),e(Ge,za),e(oe,ja),f(r,Oo,m),f(r,Mt,m),e(Mt,Aa),f(r,Ho,m),f(r,N,m),e(N,R),e(R,La),e(R,Pt),e(Pt,Na),e(R,qa),e(R,Ct),e(Ct,Oa),e(R,Ha),e(R,Dt),e(Dt,Ra),e(R,Ba),e(R,Ke),e(Ke,Wa),e(R,Ua),e(N,Va),e(N,Je),e(Je,Ga),e(Je,Xe),e(Xe,Ka),e(Je,Ja),e(N,Xa),e(N,fe),e(fe,Ya),e(fe,zt),e(zt,Za),e(fe,Qa),e(fe,Ye),e(Ye,en),e(fe,tn),e(N,rn),e(N,y),e(y,on),e(y,jt),e(jt,an),e(y,nn),e(y,Cr),e(Cr,sn),e(y,ln),e(y,Dr),e(Dr,dn),e(y,cn),e(y,zr),e(zr,mn),e(y,fn),e(y,jr),e(jr,hn),e(y,gn),e(y,Ar),e(Ar,pn),e(y,un),e(y,At),e(At,_n),e(y,vn),e(y,Lr),e(Lr,bn),e(y,Sn),e(y,Nr),e(Nr,wn),e(y,Tn),e(N,En),e(N,qr),e(qr,yn),f(r,Ro,m),f(r,Ie,m),e(Ie,Or),e(Or,I),e(I,Lt),e(Lt,Hr),e(Hr,$n),e(I,xn),e(I,Rr),e(Rr,Br),e(Br,Fn),e(I,In),e(I,Wr),e(Wr,Ur),e(Ur,kn),e(I,Mn),e(I,Nt),e(Nt,Vr),e(Vr,Pn),e(I,Cn),e(I,qt),e(qt,Gr),e(Gr,Dn),e(I,zn),e(I,Ot),e(Ot,Kr),e(Kr,jn),e(Ie,An),e(Ie,k),e(k,M),e(M,Ht),e(Ht,Ln),e(M,Nn),e(M,Jr),e(Jr,qn),e(M,On),e(M,Xr),e(Xr,Hn),e(M,Rn),e(M,Rt),e(Rt,Bn),e(M,Wn),e(M,Bt),e(Bt,Un),e(M,Vn),e(M,Wt),e(Wt,Gn),e(k,Kn),e(k,P),e(P,Ut),e(Ut,Jn),e(P,Xn),e(P,Yr),e(Yr,Yn),e(P,Zn),e(P,Zr),e(Zr,Qn),e(P,es),e(P,Vt),e(Vt,ts),e(P,rs),e(P,Gt),e(Gt,os),e(P,as),e(P,Kt),e(Kt,ns),e(k,ss),e(k,C),e(C,Jt),e(Jt,is),e(C,ls),e(C,Qr),e(Qr,ds),e(C,cs),e(C,eo),e(eo,ms),e(C,fs),e(C,Xt),e(Xt,hs),e(C,gs),e(C,Yt),e(Yt,ps),e(C,us),e(C,Zt),e(Zt,_s),e(k,vs),e(k,D),e(D,Qt),e(Qt,bs),e(D,Ss),e(D,to),e(to,ws),e(D,Ts),e(D,ro),e(ro,Es),e(D,ys),e(D,er),e(er,$s),e(D,xs),e(D,tr),e(tr,Fs),e(D,Is),e(D,rr),e(rr,ks),e(k,Ms),e(k,z),e(z,or),e(or,Ps),e(z,Cs),e(z,oo),e(oo,Ds),e(z,zs),e(z,ao),e(ao,js),e(z,As),e(z,ar),e(ar,Ls),e(z,Ns),e(z,nr),e(nr,qs),e(z,Os),e(z,sr),e(sr,Hs),e(k,Rs),e(k,j),e(j,ir),e(ir,Bs),e(j,Ws),e(j,no),e(no,Us),e(j,Vs),e(j,so),e(so,Gs),e(j,Ks),e(j,lr),e(lr,Js),e(j,Xs),e(j,dr),e(dr,Ys),e(j,Zs),e(j,cr),e(cr,Qs),f(r,Bo,m),f(r,he,m),e(he,ke),e(ke,io),v(Ze,io,null),e(he,ei),e(he,lo),e(lo,ti),f(r,Wo,m),f(r,A,m),v(Qe,A,null),e(A,ri),e(A,ge),e(ge,oi),e(ge,mr),e(mr,ai),e(ge,ni),e(ge,et),e(et,si),e(ge,ii),e(A,li),e(A,pe),e(pe,di),e(pe,fr),e(fr,ci),e(pe,mi),e(pe,hr),e(hr,fi),e(pe,hi),e(A,gi),e(A,co),e(co,pi),e(A,ui),v(tt,A,null),f(r,Uo,m),f(r,ue,m),e(ue,Me),e(Me,mo),v(rt,mo,null),e(ue,_i),e(ue,fo),e(fo,vi),f(r,Vo,m),f(r,B,m),v(ot,B,null),e(B,bi),e(B,ho),e(ho,Si),e(B,wi),e(B,at),e(at,Ti),e(at,gr),e(gr,Ei),e(at,yi),e(B,$i),e(B,ae),v(nt,ae,null),e(ae,xi),e(ae,go),e(go,Fi),e(ae,Ii),v(Pe,ae,null),f(r,Go,m),f(r,_e,m),e(_e,Ce),e(Ce,po),v(st,po,null),e(_e,ki),e(_e,uo),e(uo,Mi),f(r,Ko,m),f(r,ee,m),v(it,ee,null),e(ee,Pi),e(ee,lt),e(lt,Ci),e(lt,dt),e(dt,Di),e(lt,zi),e(ee,ji),e(ee,q),v(ct,q,null),e(q,Ai),e(q,ve),e(ve,Li),e(ve,pr),e(pr,Ni),e(ve,qi),e(ve,_o),e(_o,Oi),e(ve,Hi),e(q,Ri),v(De,q,null),e(q,Bi),e(q,vo),e(vo,Wi),e(q,Ui),v(mt,q,null),f(r,Jo,m),f(r,be,m),e(be,ze),e(ze,bo),v(ft,bo,null),e(be,Vi),e(be,So),e(So,Gi),f(r,Xo,m),f(r,Se,m),v(ht,Se,null),e(Se,Ki),e(Se,ur),v(gt,ur,null),f(r,Yo,m),f(r,we,m),e(we,je),e(je,wo),v(pt,wo,null),e(we,Ji),e(we,To),e(To,Xi),f(r,Zo,m),f(r,W,m),v(ut,W,null),e(W,Yi),e(W,Eo),e(Eo,Zi),e(W,Qi),e(W,_t),e(_t,el),e(_t,vt),e(vt,tl),e(_t,rl),e(W,ol),e(W,O),v(bt,O,null),e(O,al),e(O,Te),e(Te,nl),e(Te,_r),e(_r,sl),e(Te,il),e(Te,yo),e(yo,ll),e(Te,dl),e(O,cl),v(Ae,O,null),e(O,ml),e(O,$o),e($o,fl),e(O,hl),v(St,O,null),f(r,Qo,m),f(r,Ee,m),e(Ee,Le),e(Le,xo),v(wt,xo,null),e(Ee,gl),e(Ee,Fo),e(Fo,pl),f(r,ea,m),f(r,te,m),v(Tt,te,null),e(te,ul),e(te,Et),e(Et,_l),e(Et,yt),e(yt,vl),e(Et,bl),e(te,Sl),e(te,H),v($t,H,null),e(H,wl),e(H,ye),e(ye,Tl),e(ye,vr),e(vr,El),e(ye,yl),e(ye,Io),e(Io,$l),e(ye,xl),e(H,Fl),v(Ne,H,null),e(H,Il),e(H,ko),e(ko,kl),e(H,Ml),v(xt,H,null),ta=!0},p(r,[m]){const Ft={};m&2&&(Ft.$$scope={dirty:m,ctx:r}),Pe.$set(Ft);const Mo={};m&2&&(Mo.$$scope={dirty:m,ctx:r}),De.$set(Mo);const Po={};m&2&&(Po.$$scope={dirty:m,ctx:r}),Ae.$set(Po);const Co={};m&2&&(Co.$$scope={dirty:m,ctx:r}),Ne.$set(Co)},i(r){ta||(b(T.$$.fragment,r),b(Re.$$.fragment,r),b(Ze.$$.fragment,r),b(Qe.$$.fragment,r),b(tt.$$.fragment,r),b(rt.$$.fragment,r),b(ot.$$.fragment,r),b(nt.$$.fragment,r),b(Pe.$$.fragment,r),b(st.$$.fragment,r),b(it.$$.fragment,r),b(ct.$$.fragment,r),b(De.$$.fragment,r),b(mt.$$.fragment,r),b(ft.$$.fragment,r),b(ht.$$.fragment,r),b(gt.$$.fragment,r),b(pt.$$.fragment,r),b(ut.$$.fragment,r),b(bt.$$.fragment,r),b(Ae.$$.fragment,r),b(St.$$.fragment,r),b(wt.$$.fragment,r),b(Tt.$$.fragment,r),b($t.$$.fragment,r),b(Ne.$$.fragment,r),b(xt.$$.fragment,r),ta=!0)},o(r){S(T.$$.fragment,r),S(Re.$$.fragment,r),S(Ze.$$.fragment,r),S(Qe.$$.fragment,r),S(tt.$$.fragment,r),S(rt.$$.fragment,r),S(ot.$$.fragment,r),S(nt.$$.fragment,r),S(Pe.$$.fragment,r),S(st.$$.fragment,r),S(it.$$.fragment,r),S(ct.$$.fragment,r),S(De.$$.fragment,r),S(mt.$$.fragment,r),S(ft.$$.fragment,r),S(ht.$$.fragment,r),S(gt.$$.fragment,r),S(pt.$$.fragment,r),S(ut.$$.fragment,r),S(bt.$$.fragment,r),S(Ae.$$.fragment,r),S(St.$$.fragment,r),S(wt.$$.fragment,r),S(Tt.$$.fragment,r),S($t.$$.fragment,r),S(Ne.$$.fragment,r),S(xt.$$.fragment,r),ta=!1},d(r){t(h),r&&t($),r&&t(g),w(T),r&&t(Do),r&&t(me),w(Re),r&&t(zo),r&&t(xe),r&&t(jo),r&&t(It),r&&t(Ao),r&&t(kt),r&&t(Lo),r&&t(Fe),r&&t(No),r&&t(Ue),r&&t(qo),r&&t(oe),r&&t(Oo),r&&t(Mt),r&&t(Ho),r&&t(N),r&&t(Ro),r&&t(Ie),r&&t(Bo),r&&t(he),w(Ze),r&&t(Wo),r&&t(A),w(Qe),w(tt),r&&t(Uo),r&&t(ue),w(rt),r&&t(Vo),r&&t(B),w(ot),w(nt),w(Pe),r&&t(Go),r&&t(_e),w(st),r&&t(Ko),r&&t(ee),w(it),w(ct),w(De),w(mt),r&&t(Jo),r&&t(be),w(ft),r&&t(Xo),r&&t(Se),w(ht),w(gt),r&&t(Yo),r&&t(we),w(pt),r&&t(Zo),r&&t(W),w(ut),w(bt),w(Ae),w(St),r&&t(Qo),r&&t(Ee),w(wt),r&&t(ea),r&&t(te),w(Tt),w($t),w(Ne),w(xt)}}}const Yc={local:"segformer",sections:[{local:"overview",title:"Overview"},{local:"transformers.SegformerConfig",title:"SegformerConfig"},{local:"transformers.SegformerFeatureExtractor",title:"SegformerFeatureExtractor"},{local:"transformers.SegformerModel",title:"SegformerModel"},{local:"transformers.SegformerDecodeHead",title:"SegformerDecodeHead"},{local:"transformers.SegformerForImageClassification",title:"SegformerForImageClassification"},{local:"transformers.SegformerForSemanticSegmentation",title:"SegformerForSemanticSegmentation"}],title:"SegFormer"};function Zc(Q,h,$){let{fw:g}=h;return Q.$$set=p=>{"fw"in p&&$(0,g=p.fw)},[g]}class nm extends Hc{constructor(h){super();Rc(this,h,Zc,Xc,Bc,{fw:0})}}export{nm as default,Yc as metadata};
294
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/luke.mdx-33abb078.js
import{S as op,i as sp,s as ap,e as a,k as l,w as _,t as o,M as rp,c as r,d as t,m as c,a as i,x as k,h as s,b as d,F as e,g as u,y,q as b,o as v,B as T}from"../../chunks/vendor-4833417e.js";import{T as Ro}from"../../chunks/Tip-fffd6df1.js";import{D as W}from"../../chunks/Docstring-4f315ed9.js";import{C as Sn}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as ze}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function ip(U){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var z=i(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function dp(U){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var z=i(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function lp(U){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var z=i(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function cp(U){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var z=i(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function pp(U){let h,L,m,w,E;return{c(){h=a("p"),L=o("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=a("code"),w=o("Module"),E=o(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){h=r(g,"P",{});var f=i(h);L=s(f,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r(f,"CODE",{});var z=i(m);w=s(z,"Module"),z.forEach(t),E=s(f,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),f.forEach(t)},m(g,f){u(g,h,f),e(h,L),e(h,m),e(m,w),e(h,E)},d(g){g&&t(h)}}}function hp(U){let h,L,m,w,E,g,f,z,Ds,Ho,le,xe,Nn,He,Bs,Dn,Ws,Vo,$e,Us,Ve,Ks,Rs,Yo,Jt,Hs,Qo,Xt,Bn,Vs,Jo,Gt,Ys,Xo,R,Wn,Ye,Qs,Zt,Js,Xs,Gs,Un,q,Zs,Kn,ea,ta,Rn,na,oa,Hn,sa,aa,Vn,ra,ia,en,da,la,ca,Qe,H,tn,pa,ha,Yn,ua,ma,Qn,fa,ga,Jn,_a,ka,ya,Je,nn,Xn,ba,va,Ta,on,Gn,wa,La,Ea,K,Zn,za,xa,ce,qe,sn,$a,qa,Xe,Fa,Ca,Ma,Fe,an,Pa,ja,Ge,Aa,Ia,Oa,rn,dn,Sa,Na,Da,O,ln,Ba,Wa,eo,Ua,Ka,to,Ra,Ha,no,Va,Ya,oo,Qa,Ja,Xa,pe,Ga,cn,Za,er,Ze,tr,nr,or,et,sr,tt,ar,rr,Go,pn,ir,Zo,nt,es,V,dr,ot,lr,cr,st,pr,hr,at,ur,mr,ts,he,Ce,so,rt,fr,ao,gr,ns,F,it,_r,dt,kr,hn,yr,br,vr,ue,Tr,un,wr,Lr,mn,Er,zr,xr,ro,$r,qr,lt,os,me,Me,io,ct,Fr,lo,Cr,ss,C,pt,Mr,co,Pr,jr,x,Ar,fn,Ir,Or,gn,Sr,Nr,_n,Dr,Br,po,Wr,Ur,ho,Kr,Rr,uo,Hr,Vr,mo,Yr,Qr,Jr,Pe,ht,Xr,fo,Gr,Zr,kn,ut,as,fe,je,go,mt,ei,_o,ti,rs,M,ft,ni,ko,oi,si,gt,ai,yn,ri,ii,di,_t,li,kt,ci,pi,hi,S,yt,ui,ge,mi,bn,fi,gi,yo,_i,ki,yi,Ae,bi,bo,vi,Ti,bt,is,_e,Ie,vo,vt,wi,To,Li,ds,P,Tt,Ei,wo,zi,xi,wt,$i,vn,qi,Fi,Ci,Lt,Mi,Et,Pi,ji,Ai,J,zt,Ii,ke,Oi,Tn,Si,Ni,Lo,Di,Bi,Wi,Oe,ls,ye,Se,Eo,xt,Ui,zo,Ki,cs,j,$t,Ri,xo,Hi,Vi,qt,Yi,wn,Qi,Ji,Xi,Ft,Gi,Ct,Zi,ed,td,N,Mt,nd,be,od,Ln,sd,ad,$o,rd,id,dd,Ne,ld,qo,cd,pd,Pt,ps,ve,De,Fo,jt,hd,Co,ud,hs,A,At,md,Mo,fd,gd,It,_d,En,kd,yd,bd,Ot,vd,St,Td,wd,Ld,D,Nt,Ed,Te,zd,zn,xd,$d,Po,qd,Fd,Cd,Be,Md,jo,Pd,jd,Dt,us,we,We,Ao,Bt,Ad,Io,Id,ms,I,Wt,Od,Oo,Sd,Nd,Ut,Dd,xn,Bd,Wd,Ud,Kt,Kd,Rt,Rd,Hd,Vd,B,Ht,Yd,Le,Qd,$n,Jd,Xd,So,Gd,Zd,el,Ue,tl,No,nl,ol,Vt,fs;return g=new ze({}),He=new ze({}),nt=new Sn({props:{code:`from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification model = LukeModel.from_pretrained("studio-ousia/luke-base") tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyonc\xE9" inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") outputs = model(**inputs) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state entities = [ "Beyonc\xE9", "Los Angeles", ] # Wikipedia entity titles corresponding to the entity mentions "Beyonc\xE9" and "Los Angeles" entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") outputs = model(**inputs) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = int(logits[0].argmax()) print("Predicted class:", model.config.id2label[predicted_class_idx])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeModel, LukeForEntityPairClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeModel.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-comment"># Example 1: Computing the contextualized entity representation corresponding to the entity mention &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>)] <span class="hljs-comment"># character-based entity span corresponding to &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state <span class="hljs-comment"># Example 2: Inputting Wikipedia entities to obtain enriched contextualized representations</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entities = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Beyonc\xE9&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Los Angeles&quot;</span>, <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-comment"># Wikipedia entity titles corresponding to the entity mentions &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>)] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state <span class="hljs-comment"># Example 3: Classifying the relationship between two entities using LukeForEntityPairClassification head model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntityPairClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>)] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = <span class="hljs-built_in">int</span>(logits[<span class="hljs-number">0</span>].argmax()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx])`}}),rt=new ze({}),it=new W({props:{name:"class transformers.LukeConfig",anchor:"transformers.LukeConfig",parameters:[{name:"vocab_size",val:" = 50267"},{name:"entity_vocab_size",val:" = 500000"},{name:"hidden_size",val:" = 768"},{name:"entity_emb_size",val:" = 256"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"use_entity_aware_attention",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/configuration_luke.py#L29",parametersDescription:[{anchor:"transformers.LukeConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the LUKE model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeModel">LukeModel</a>.`,name:"vocab_size"},{anchor:"transformers.LukeConfig.entity_vocab_size",description:`<strong>entity_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 500000) &#x2014; Entity vocabulary size of the LUKE model. Defines the number of different entities that can be represented by the <code>entity_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeModel">LukeModel</a>.`,name:"entity_vocab_size"},{anchor:"transformers.LukeConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.LukeConfig.entity_emb_size",description:`<strong>entity_emb_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; The number of dimensions of the entity embedding.`,name:"entity_emb_size"},{anchor:"transformers.LukeConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.LukeConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.LukeConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.LukeConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.LukeConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.LukeConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.LukeConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.LukeConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeModel">LukeModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.LukeConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.LukeConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.LukeConfig.use_entity_aware_attention",description:`<strong>use_entity_aware_attention</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014; Whether or not the model should use the entity-aware self-attention mechanism proposed in <a href="https://arxiv.org/abs/2010.01057" rel="nofollow">LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention (Yamada et al.)</a>.`,name:"use_entity_aware_attention"}]}}),lt=new Sn({props:{code:`from transformers import LukeConfig, LukeModel # Initializing a LUKE configuration configuration = LukeConfig() # Initializing a model from the configuration model = LukeModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeConfig, LukeModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a LUKE configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = LukeConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),ct=new ze({}),pt=new W({props:{name:"class transformers.LukeTokenizer",anchor:"transformers.LukeTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"entity_vocab_file",val:""},{name:"task",val:" = None"},{name:"max_entity_length",val:" = 32"},{name:"max_mention_length",val:" = 30"},{name:"entity_token_1",val:" = '<ent>'"},{name:"entity_token_2",val:" = '<ent2>'"},{name:"entity_unk_token",val:" = '[UNK]'"},{name:"entity_pad_token",val:" = '[PAD]'"},{name:"entity_mask_token",val:" = '[MASK]'"},{name:"entity_mask2_token",val:" = '[MASK2]'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/tokenization_luke.py#L152",parametersDescription:[{anchor:"transformers.LukeTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.LukeTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.LukeTokenizer.entity_vocab_file",description:`<strong>entity_vocab_file</strong> (<code>str</code>) &#x2014; Path to the entity vocabulary file.`,name:"entity_vocab_file"},{anchor:"transformers.LukeTokenizer.task",description:`<strong>task</strong> (<code>str</code>, <em>optional</em>) &#x2014; Task for which you want to prepare sequences. One of <code>&quot;entity_classification&quot;</code>, <code>&quot;entity_pair_classification&quot;</code>, or <code>&quot;entity_span_classification&quot;</code>. If you specify this argument, the entity sequence is automatically created based on the given entity span(s).`,name:"task"},{anchor:"transformers.LukeTokenizer.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.LukeTokenizer.max_mention_length",description:`<strong>max_mention_length</strong> (<code>int</code>, <em>optional</em>, defaults to 30) &#x2014; The maximum number of tokens inside an entity span.`,name:"max_mention_length"},{anchor:"transformers.LukeTokenizer.entity_token_1",description:`<strong>entity_token_1</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_1"},{anchor:"transformers.LukeTokenizer.entity_token_2",description:`<strong>entity_token_2</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&lt;ent2&gt;</code>) &#x2014; The special token used to represent an entity span in a word token sequence. This token is only used when <code>task</code> is set to <code>&quot;entity_pair_classification&quot;</code>.`,name:"entity_token_2"}]}}),ht=new W({props:{name:"__call__",anchor:"transformers.LukeTokenizer.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"entity_spans",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entity_spans_pair",val:": typing.Union[typing.List[typing.Tuple[int, int]], typing.List[typing.List[typing.Tuple[int, int]]], NoneType] = None"},{name:"entities",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"entities_pair",val:": typing.Union[typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"max_entity_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": typing.Optional[bool] = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/tokenization_luke.py#L261",parametersDescription:[{anchor:"transformers.LukeTokenizer.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text"},{anchor:"transformers.LukeTokenizer.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings.`,name:"text_pair"},{anchor:"transformers.LukeTokenizer.__call__.entity_spans",description:`<strong>entity_spans</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify <code>&quot;entity_classification&quot;</code> or <code>&quot;entity_pair_classification&quot;</code> as the <code>task</code> argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify <code>entities</code>, the length of each sequence must be equal to the length of each sequence of <code>entities</code>.`,name:"entity_spans"},{anchor:"transformers.LukeTokenizer.__call__.entity_spans_pair",description:`<strong>entity_spans_pair</strong> (<code>List[Tuple[int, int]]</code>, <code>List[List[Tuple[int, int]]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the <code>task</code> argument in the constructor, this argument is ignored. If you specify <code>entities_pair</code>, the length of each sequence must be equal to the length of each sequence of <code>entities_pair</code>.`,name:"entity_spans_pair"},{anchor:"transformers.LukeTokenizer.__call__.entities",description:`<strong>entities</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans</code>. If you specify <code>entity_spans</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities"},{anchor:"transformers.LukeTokenizer.__call__.entities_pair",description:`<strong>entities_pair</strong> (<code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the <code>task</code> argument in the constructor. The length of each sequence must be equal to the length of each sequence of <code>entity_spans_pair</code>. If you specify <code>entity_spans_pair</code> without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity.`,name:"entities_pair"},{anchor:"transformers.LukeTokenizer.__call__.max_entity_length",description:`<strong>max_entity_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length of <code>entity_ids</code>.`,name:"max_entity_length"},{anchor:"transformers.LukeTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.LukeTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.LukeTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.LukeTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.LukeTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.LukeTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.LukeTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.LukeTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.LukeTokenizer.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.LukeTokenizer.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.LukeTokenizer.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.LukeTokenizer.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.LukeTokenizer.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.LukeTokenizer.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.LukeTokenizer.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_ids</strong> \u2014 List of entity ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>entity_position_ids</strong> \u2014 List of entity positions in the input sequence to be fed to a model.</p> </li> <li> <p><strong>entity_token_type_ids</strong> \u2014 List of entity token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Centity_token_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>entity_attention_mask</strong> \u2014 List of indices specifying which entities should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Centity_attention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>entity_start_positions</strong> \u2014 List of the start positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>entity_end_positions</strong> \u2014 List of the end positions of entities in the word token sequence (when <code>task="entity_span_classification"</code>).</p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),ut=new W({props:{name:"save_vocabulary",anchor:"transformers.LukeTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/tokenization_luke.py#L1379"}}),mt=new ze({}),ft=new W({props:{name:"class transformers.LukeModel",anchor:"transformers.LukeModel",parameters:[{name:"config",val:": LukeConfig"},{name:"add_pooling_layer",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L879",parametersDescription:[{anchor:"transformers.LukeModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yt=new W({props:{name:"forward",anchor:"transformers.LukeModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"entity_token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L911",parametersDescription:[{anchor:"transformers.LukeModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeModel.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeModel.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeModel.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeModel.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.BaseLukeModelOutputWithPooling</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</li> <li><strong>entity_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length, hidden_size)</code>) \u2014 Sequence of entity hidden-states at the output of the last layer of the model.</li> <li><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length + entity_length, sequence_length + entity_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.BaseLukeModelOutputWithPooling</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Ae=new Ro({props:{$$slots:{default:[ip]},$$scope:{ctx:U}}}),bt=new Sn({props:{code:`from transformers import LukeTokenizer, LukeModel tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") model = LukeModel.from_pretrained("studio-ousia/luke-base") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyonc\xE9" encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") outputs = model(**encoding) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state text = "Beyonc\xE9 lives in Los Angeles." entities = [ "Beyonc\xE9", "Los Angeles", ] # Wikipedia entity titles corresponding to the entity mentions "Beyonc\xE9" and "Los Angeles" entity_spans = [ (0, 7), (17, 28), ] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" encoding = tokenizer( text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt" ) outputs = model(**encoding) word_last_hidden_state = outputs.last_hidden_state entity_last_hidden_state = outputs.entity_last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeModel.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-base&quot;</span>) <span class="hljs-comment"># Compute the contextualized entity representation corresponding to the entity mention &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>)] <span class="hljs-comment"># character-based entity span corresponding to &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state <span class="hljs-comment"># Input Wikipedia entities to obtain enriched contextualized representations of word tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entities = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Beyonc\xE9&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Los Angeles&quot;</span>, <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-comment"># Wikipedia entity titles corresponding to the entity mentions &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [ <span class="hljs-meta">... </span> (<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), <span class="hljs-meta">... </span> (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>), <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer( <span class="hljs-meta">... </span> text, entities=entities, entity_spans=entity_spans, add_prefix_space=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span>word_last_hidden_state = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span>entity_last_hidden_state = outputs.entity_last_hidden_state`}}),vt=new ze({}),Tt=new W({props:{name:"class transformers.LukeForMaskedLM",anchor:"transformers.LukeForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1132",parametersDescription:[{anchor:"transformers.LukeForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),zt=new W({props:{name:"forward",anchor:"transformers.LukeForMaskedLM.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1168",parametersDescription:[{anchor:"transformers.LukeForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForMaskedLM.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForMaskedLM.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForMaskedLM.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForMaskedLM.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.LukeForMaskedLM.forward.entity_labels",description:`<strong>entity_labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"entity_labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.LukeMaskedLMOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 The sum of masked language modeling (MLM) loss and entity prediction loss.</p> </li> <li> <p><strong>mlm_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>mep_loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked entity prediction (MEP) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>entity_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.LukeMaskedLMOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Oe=new Ro({props:{$$slots:{default:[dp]},$$scope:{ctx:U}}}),xt=new ze({}),$t=new W({props:{name:"class transformers.LukeForEntityClassification",anchor:"transformers.LukeForEntityClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1268",parametersDescription:[{anchor:"transformers.LukeForEntityClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Mt=new W({props:{name:"forward",anchor:"transformers.LukeForEntityClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"entity_token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1281",parametersDescription:[{anchor:"transformers.LukeForEntityClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForEntityClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForEntityClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForEntityClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForEntityClassification.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForEntityClassification.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForEntityClassification.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForEntityClassification.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForEntityClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForEntityClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForEntityClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForEntityClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForEntityClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForEntityClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code> or <code>(batch_size, num_labels)</code>, <em>optional</em>) &#x2014; Labels for computing the classification loss. If the shape is <code>(batch_size,)</code>, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in <code>[0, ..., config.num_labels - 1]</code>. If the shape is <code>(batch_size, num_labels)</code>, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain <code>[0, 1]</code>, where 0 and 1 indicate false and true, respectively.`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.EntityClassificationOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.EntityClassificationOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new Ro({props:{$$slots:{default:[lp]},$$scope:{ctx:U}}}),Pt=new Sn({props:{code:`from transformers import LukeTokenizer, LukeForEntityClassification tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyonc\xE9" inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeForEntityClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-open-entity&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntityClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-open-entity&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [(<span class="hljs-number">0</span>, <span class="hljs-number">7</span>)] <span class="hljs-comment"># character-based entity span corresponding to &quot;Beyonc\xE9&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx]) Predicted <span class="hljs-keyword">class</span>: person`}}),jt=new ze({}),At=new W({props:{name:"class transformers.LukeForEntityPairClassification",anchor:"transformers.LukeForEntityPairClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1383",parametersDescription:[{anchor:"transformers.LukeForEntityPairClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Nt=new W({props:{name:"forward",anchor:"transformers.LukeForEntityPairClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"entity_token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1396",parametersDescription:[{anchor:"transformers.LukeForEntityPairClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForEntityPairClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForEntityPairClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForEntityPairClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForEntityPairClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForEntityPairClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForEntityPairClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForEntityPairClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code> or <code>(batch_size, num_labels)</code>, <em>optional</em>) &#x2014; Labels for computing the classification loss. If the shape is <code>(batch_size,)</code>, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in <code>[0, ..., config.num_labels - 1]</code>. If the shape is <code>(batch_size, num_labels)</code>, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain <code>[0, 1]</code>, where 0 and 1 indicate false and true, respectively.`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.EntityPairClassificationOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.EntityPairClassificationOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Be=new Ro({props:{$$slots:{default:[cp]},$$scope:{ctx:U}}}),Dt=new Sn({props:{code:`from transformers import LukeTokenizer, LukeForEntityPairClassification tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") text = "Beyonc\xE9 lives in Los Angeles." entity_spans = [ (0, 7), (17, 28), ] # character-based entity spans corresponding to "Beyonc\xE9" and "Los Angeles" inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeForEntityPairClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntityPairClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-tacred&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [ <span class="hljs-meta">... </span> (<span class="hljs-number">0</span>, <span class="hljs-number">7</span>), <span class="hljs-meta">... </span> (<span class="hljs-number">17</span>, <span class="hljs-number">28</span>), <span class="hljs-meta">&gt;&gt;&gt; </span>] <span class="hljs-comment"># character-based entity spans corresponding to &quot;Beyonc\xE9&quot; and &quot;Los Angeles&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = logits.argmax(-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Predicted class:&quot;</span>, model.config.id2label[predicted_class_idx]) Predicted <span class="hljs-keyword">class</span>: per:cities_of_residence`}}),Bt=new ze({}),Wt=new W({props:{name:"class transformers.LukeForEntitySpanClassification",anchor:"transformers.LukeForEntitySpanClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1503",parametersDescription:[{anchor:"transformers.LukeForEntitySpanClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig">LukeConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ht=new W({props:{name:"forward",anchor:"transformers.LukeForEntitySpanClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_attention_mask",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"entity_end_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/luke/modeling_luke.py#L1516",parametersDescription:[{anchor:"transformers.LukeForEntitySpanClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LukeForEntitySpanClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_ids",description:`<strong>entity_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>) &#x2014; Indices of entity tokens in the entity vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer">LukeTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.`,name:"entity_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_attention_mask",description:`<strong>entity_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding entity token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for entity tokens that are <strong>not masked</strong>,</li> <li>0 for entity tokens that are <strong>masked</strong>.</li> </ul>`,name:"entity_attention_mask"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_token_type_ids",description:`<strong>entity_token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>portion A</em> entity token,</li> <li>1 corresponds to a <em>portion B</em> entity token.</li> </ul>`,name:"entity_token_type_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_position_ids",description:`<strong>entity_position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length, max_mention_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input entity in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"entity_position_ids"},{anchor:"transformers.LukeForEntitySpanClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LukeForEntitySpanClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LukeForEntitySpanClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LukeForEntitySpanClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LukeForEntitySpanClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_start_positions",description:`<strong>entity_start_positions</strong> (<code>torch.LongTensor</code>) &#x2014; The start positions of entities in the word token sequence.`,name:"entity_start_positions"},{anchor:"transformers.LukeForEntitySpanClassification.forward.entity_end_positions",description:`<strong>entity_end_positions</strong> (<code>torch.LongTensor</code>) &#x2014; The end positions of entities in the word token sequence.`,name:"entity_end_positions"},{anchor:"transformers.LukeForEntitySpanClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, entity_length)</code> or <code>(batch_size, entity_length, num_labels)</code>, <em>optional</em>) &#x2014; Labels for computing the classification loss. If the shape is <code>(batch_size, entity_length)</code>, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in <code>[0, ..., config.num_labels - 1]</code>. If the shape is <code>(batch_size, entity_length, num_labels)</code>, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain <code>[0, 1]</code>, where 0 and 1 indicate false and true, respectively.`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.luke.modeling_luke.EntitySpanClassificationOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeConfig" >LukeConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>entity_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, entity_length, hidden_size)</code>. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.luke.modeling_luke.EntitySpanClassificationOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),Ue=new Ro({props:{$$slots:{default:[pp]},$$scope:{ctx:U}}}),Vt=new Sn({props:{code:`from transformers import LukeTokenizer, LukeForEntitySpanClassification tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") text = "Beyonc\xE9 lives in Los Angeles" word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens entity_spans = [] for i, start_pos in enumerate(word_start_positions): for end_pos in word_end_positions[i:]: entity_spans.append((start_pos, end_pos)) inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_indices = logits.argmax(-1).squeeze().tolist() for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): if predicted_class_idx != 0: print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> LukeTokenizer, LukeForEntitySpanClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = LukeTokenizer.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-conll-2003&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LukeForEntitySpanClassification.from_pretrained(<span class="hljs-string">&quot;studio-ousia/luke-large-finetuned-conll-2003&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;Beyonc\xE9 lives in Los Angeles&quot;</span> <span class="hljs-comment"># List all possible entity spans in the text</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_start_positions = [<span class="hljs-number">0</span>, <span class="hljs-number">8</span>, <span class="hljs-number">14</span>, <span class="hljs-number">17</span>, <span class="hljs-number">21</span>] <span class="hljs-comment"># character-based start positions of word tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_end_positions = [<span class="hljs-number">7</span>, <span class="hljs-number">13</span>, <span class="hljs-number">16</span>, <span class="hljs-number">20</span>, <span class="hljs-number">28</span>] <span class="hljs-comment"># character-based end positions of word tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>entity_spans = [] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, start_pos <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(word_start_positions): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> end_pos <span class="hljs-keyword">in</span> word_end_positions[i:]: <span class="hljs-meta">... </span> entity_spans.append((start_pos, end_pos)) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_indices = logits.argmax(-<span class="hljs-number">1</span>).squeeze().tolist() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> span, predicted_class_idx <span class="hljs-keyword">in</span> <span class="hljs-built_in">zip</span>(entity_spans, predicted_class_indices): <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> predicted_class_idx != <span class="hljs-number">0</span>: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(text[span[<span class="hljs-number">0</span>] : span[<span class="hljs-number">1</span>]], model.config.id2label[predicted_class_idx]) Beyonc\xE9 PER Los Angeles LOC`}}),{c(){h=a("meta"),L=l(),m=a("h1"),w=a("a"),E=a("span"),_(g.$$.fragment),f=l(),z=a("span"),Ds=o("LUKE"),Ho=l(),le=a("h2"),xe=a("a"),Nn=a("span"),_(He.$$.fragment),Bs=l(),Dn=a("span"),Ws=o("Overview"),Vo=l(),$e=a("p"),Us=o("The LUKE model was proposed in "),Ve=a("a"),Ks=o("LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),Rs=o(` by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto. It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive and cloze-style question answering, entity typing, and relation classification.`),Yo=l(),Jt=a("p"),Hs=o("The abstract from the paper is the following:"),Qo=l(),Xt=a("p"),Bn=a("em"),Vs=o(`Entity representations are useful in natural language tasks involving entities. In this paper, we propose new pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed model treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering).`),Jo=l(),Gt=a("p"),Ys=o("Tips:"),Xo=l(),R=a("ul"),Wn=a("li"),Ye=a("p"),Qs=o("This implementation is the same as "),Zt=a("a"),Js=o("RobertaModel"),Xs=o(` with the addition of entity embeddings as well as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities.`),Gs=l(),Un=a("li"),q=a("p"),Zs=o("LUKE treats entities as input tokens; therefore, it takes "),Kn=a("code"),ea=o("entity_ids"),ta=o(", "),Rn=a("code"),na=o("entity_attention_mask"),oa=o(`, `),Hn=a("code"),sa=o("entity_token_type_ids"),aa=o(" and "),Vn=a("code"),ra=o("entity_position_ids"),ia=o(` as extra input. You can obtain those using `),en=a("a"),da=o("LukeTokenizer"),la=o("."),ca=l(),Qe=a("li"),H=a("p"),tn=a("a"),pa=o("LukeTokenizer"),ha=o(" takes "),Yn=a("code"),ua=o("entities"),ma=o(" and "),Qn=a("code"),fa=o("entity_spans"),ga=o(` (character-based start and end positions of the entities in the input text) as extra input. `),Jn=a("code"),_a=o("entities"),ka=o(` typically consist of [MASK] entities or Wikipedia entities. The brief description when inputting these entities are as follows:`),ya=l(),Je=a("ul"),nn=a("li"),Xn=a("em"),ba=o("Inputting [MASK] entities to compute entity representations"),va=o(`: The [MASK] entity is used to mask entities to be predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address downstream tasks requiring the information of entities in text such as entity typing, relation classification, and named entity recognition.`),Ta=l(),on=a("li"),Gn=a("em"),wa=o("Inputting Wikipedia entities to compute knowledge-enhanced token representations"),La=o(`: LUKE learns rich information (or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as question answering.`),Ea=l(),K=a("li"),Zn=a("p"),za=o("There are three head models for the former use case:"),xa=l(),ce=a("ul"),qe=a("li"),sn=a("a"),$a=o("LukeForEntityClassification"),qa=o(`, for tasks to classify a single entity in an input text such as entity typing, e.g. the `),Xe=a("a"),Fa=o("Open Entity dataset"),Ca=o(`. This model places a linear head on top of the output entity representation.`),Ma=l(),Fe=a("li"),an=a("a"),Pa=o("LukeForEntityPairClassification"),ja=o(`, for tasks to classify the relationship between two entities such as relation classification, e.g. the `),Ge=a("a"),Aa=o("TACRED dataset"),Ia=o(`. This model places a linear head on top of the concatenated output representation of the pair of given entities.`),Oa=l(),rn=a("li"),dn=a("a"),Sa=o("LukeForEntitySpanClassification"),Na=o(`, for tasks to classify the sequence of entity spans, such as named entity recognition (NER). This model places a linear head on top of the output entity representations. You can address NER using this model by inputting all possible entity spans in the text to the model.`),Da=l(),O=a("p"),ln=a("a"),Ba=o("LukeTokenizer"),Wa=o(" has a "),eo=a("code"),Ua=o("task"),Ka=o(` argument, which enables you to easily create an input to these head models by specifying `),to=a("code"),Ra=o('task="entity_classification"'),Ha=o(", "),no=a("code"),Va=o('task="entity_pair_classification"'),Ya=o(`, or `),oo=a("code"),Qa=o('task="entity_span_classification"'),Ja=o(". Please refer to the example code of each head models."),Xa=l(),pe=a("p"),Ga=o("A demo notebook on how to fine-tune "),cn=a("a"),Za=o("LukeForEntityPairClassification"),er=o(` for relation classification can be found `),Ze=a("a"),tr=o("here"),nr=o("."),or=l(),et=a("p"),sr=o(`There are also 3 notebooks available, which showcase how you can reproduce the results as reported in the paper with the HuggingFace implementation of LUKE. They can be found `),tt=a("a"),ar=o("here"),rr=o("."),Go=l(),pn=a("p"),ir=o("Example:"),Zo=l(),_(nt.$$.fragment),es=l(),V=a("p"),dr=o("This model was contributed by "),ot=a("a"),lr=o("ikuyamada"),cr=o(" and "),st=a("a"),pr=o("nielsr"),hr=o(". The original code can be found "),at=a("a"),ur=o("here"),mr=o("."),ts=l(),he=a("h2"),Ce=a("a"),so=a("span"),_(rt.$$.fragment),fr=l(),ao=a("span"),gr=o("LukeConfig"),ns=l(),F=a("div"),_(it.$$.fragment),_r=l(),dt=a("p"),kr=o("This is the configuration class to store the configuration of a "),hn=a("a"),yr=o("LukeModel"),br=o(`. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture.`),vr=l(),ue=a("p"),Tr=o("Configuration objects inherit from "),un=a("a"),wr=o("PretrainedConfig"),Lr=o(` and can be used to control the model outputs. Read the documentation from `),mn=a("a"),Er=o("PretrainedConfig"),zr=o(" for more information."),xr=l(),ro=a("p"),$r=o("Examples:"),qr=l(),_(lt.$$.fragment),os=l(),me=a("h2"),Me=a("a"),io=a("span"),_(ct.$$.fragment),Fr=l(),lo=a("span"),Cr=o("LukeTokenizer"),ss=l(),C=a("div"),_(pt.$$.fragment),Mr=l(),co=a("p"),Pr=o("Construct a LUKE tokenizer."),jr=l(),x=a("p"),Ar=o("This tokenizer inherits from "),fn=a("a"),Ir=o("RobertaTokenizer"),Or=o(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Compared to `),gn=a("a"),Sr=o("RobertaTokenizer"),Nr=o(", "),_n=a("a"),Dr=o("LukeTokenizer"),Br=o(` also creates entity sequences, namely `),po=a("code"),Wr=o("entity_ids"),Ur=o(", "),ho=a("code"),Kr=o("entity_attention_mask"),Rr=o(", "),uo=a("code"),Hr=o("entity_token_type_ids"),Vr=o(`, and `),mo=a("code"),Yr=o("entity_position_ids"),Qr=o(" to be used by the LUKE model."),Jr=l(),Pe=a("div"),_(ht.$$.fragment),Xr=l(),fo=a("p"),Gr=o(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),Zr=l(),kn=a("div"),_(ut.$$.fragment),as=l(),fe=a("h2"),je=a("a"),go=a("span"),_(mt.$$.fragment),ei=l(),_o=a("span"),ti=o("LukeModel"),rs=l(),M=a("div"),_(ft.$$.fragment),ni=l(),ko=a("p"),oi=o("The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any specific head on top."),si=l(),gt=a("p"),ai=o("This model inherits from "),yn=a("a"),ri=o("PreTrainedModel"),ii=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),di=l(),_t=a("p"),li=o("This model is also a PyTorch "),kt=a("a"),ci=o("torch.nn.Module"),pi=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),hi=l(),S=a("div"),_(yt.$$.fragment),ui=l(),ge=a("p"),mi=o("The "),bn=a("a"),fi=o("LukeModel"),gi=o(" forward method, overrides the "),yo=a("code"),_i=o("__call__"),ki=o(" special method."),yi=l(),_(Ae.$$.fragment),bi=l(),bo=a("p"),vi=o("Examples:"),Ti=l(),_(bt.$$.fragment),is=l(),_e=a("h2"),Ie=a("a"),vo=a("span"),_(vt.$$.fragment),wi=l(),To=a("span"),Li=o("LukeForMaskedLM"),ds=l(),P=a("div"),_(Tt.$$.fragment),Ei=l(),wo=a("p"),zi=o(`The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction.`),xi=l(),wt=a("p"),$i=o("This model inherits from "),vn=a("a"),qi=o("PreTrainedModel"),Fi=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ci=l(),Lt=a("p"),Mi=o("This model is also a PyTorch "),Et=a("a"),Pi=o("torch.nn.Module"),ji=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ai=l(),J=a("div"),_(zt.$$.fragment),Ii=l(),ke=a("p"),Oi=o("The "),Tn=a("a"),Si=o("LukeForMaskedLM"),Ni=o(" forward method, overrides the "),Lo=a("code"),Di=o("__call__"),Bi=o(" special method."),Wi=l(),_(Oe.$$.fragment),ls=l(),ye=a("h2"),Se=a("a"),Eo=a("span"),_(xt.$$.fragment),Ui=l(),zo=a("span"),Ki=o("LukeForEntityClassification"),cs=l(),j=a("div"),_($t.$$.fragment),Ri=l(),xo=a("p"),Hi=o(`The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity.`),Vi=l(),qt=a("p"),Yi=o("This model inherits from "),wn=a("a"),Qi=o("PreTrainedModel"),Ji=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Xi=l(),Ft=a("p"),Gi=o("This model is also a PyTorch "),Ct=a("a"),Zi=o("torch.nn.Module"),ed=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),td=l(),N=a("div"),_(Mt.$$.fragment),nd=l(),be=a("p"),od=o("The "),Ln=a("a"),sd=o("LukeForEntityClassification"),ad=o(" forward method, overrides the "),$o=a("code"),rd=o("__call__"),id=o(" special method."),dd=l(),_(Ne.$$.fragment),ld=l(),qo=a("p"),cd=o("Examples:"),pd=l(),_(Pt.$$.fragment),ps=l(),ve=a("h2"),De=a("a"),Fo=a("span"),_(jt.$$.fragment),hd=l(),Co=a("span"),ud=o("LukeForEntityPairClassification"),hs=l(),A=a("div"),_(At.$$.fragment),md=l(),Mo=a("p"),fd=o(`The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED.`),gd=l(),It=a("p"),_d=o("This model inherits from "),En=a("a"),kd=o("PreTrainedModel"),yd=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),bd=l(),Ot=a("p"),vd=o("This model is also a PyTorch "),St=a("a"),Td=o("torch.nn.Module"),wd=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ld=l(),D=a("div"),_(Nt.$$.fragment),Ed=l(),Te=a("p"),zd=o("The "),zn=a("a"),xd=o("LukeForEntityPairClassification"),$d=o(" forward method, overrides the "),Po=a("code"),qd=o("__call__"),Fd=o(" special method."),Cd=l(),_(Be.$$.fragment),Md=l(),jo=a("p"),Pd=o("Examples:"),jd=l(),_(Dt.$$.fragment),us=l(),we=a("h2"),We=a("a"),Ao=a("span"),_(Bt.$$.fragment),Ad=l(),Io=a("span"),Id=o("LukeForEntitySpanClassification"),ms=l(),I=a("div"),_(Wt.$$.fragment),Od=l(),Oo=a("p"),Sd=o(`The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition.`),Nd=l(),Ut=a("p"),Dd=o("This model inherits from "),xn=a("a"),Bd=o("PreTrainedModel"),Wd=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ud=l(),Kt=a("p"),Kd=o("This model is also a PyTorch "),Rt=a("a"),Rd=o("torch.nn.Module"),Hd=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Vd=l(),B=a("div"),_(Ht.$$.fragment),Yd=l(),Le=a("p"),Qd=o("The "),$n=a("a"),Jd=o("LukeForEntitySpanClassification"),Xd=o(" forward method, overrides the "),So=a("code"),Gd=o("__call__"),Zd=o(" special method."),el=l(),_(Ue.$$.fragment),tl=l(),No=a("p"),nl=o("Examples:"),ol=l(),_(Vt.$$.fragment),this.h()},l(n){const p=rp('[data-svelte="svelte-1phssyn"]',document.head);h=r(p,"META",{name:!0,content:!0}),p.forEach(t),L=c(n),m=r(n,"H1",{class:!0});var Yt=i(m);w=r(Yt,"A",{id:!0,class:!0,href:!0});var Do=i(w);E=r(Do,"SPAN",{});var Bo=i(E);k(g.$$.fragment,Bo),Bo.forEach(t),Do.forEach(t),f=c(Yt),z=r(Yt,"SPAN",{});var Wo=i(z);Ds=s(Wo,"LUKE"),Wo.forEach(t),Yt.forEach(t),Ho=c(n),le=r(n,"H2",{class:!0});var Qt=i(le);xe=r(Qt,"A",{id:!0,class:!0,href:!0});var il=i(xe);Nn=r(il,"SPAN",{});var dl=i(Nn);k(He.$$.fragment,dl),dl.forEach(t),il.forEach(t),Bs=c(Qt),Dn=r(Qt,"SPAN",{});var ll=i(Dn);Ws=s(ll,"Overview"),ll.forEach(t),Qt.forEach(t),Vo=c(n),$e=r(n,"P",{});var gs=i($e);Us=s(gs,"The LUKE model was proposed in "),Ve=r(gs,"A",{href:!0,rel:!0});var cl=i(Ve);Ks=s(cl,"LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),cl.forEach(t),Rs=s(gs,` by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto. It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive and cloze-style question answering, entity typing, and relation classification.`),gs.forEach(t),Yo=c(n),Jt=r(n,"P",{});var pl=i(Jt);Hs=s(pl,"The abstract from the paper is the following:"),pl.forEach(t),Qo=c(n),Xt=r(n,"P",{});var hl=i(Xt);Bn=r(hl,"EM",{});var ul=i(Bn);Vs=s(ul,`Entity representations are useful in natural language tasks involving entities. In this paper, we propose new pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed model treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering).`),ul.forEach(t),hl.forEach(t),Jo=c(n),Gt=r(n,"P",{});var ml=i(Gt);Ys=s(ml,"Tips:"),ml.forEach(t),Xo=c(n),R=r(n,"UL",{});var Ke=i(R);Wn=r(Ke,"LI",{});var fl=i(Wn);Ye=r(fl,"P",{});var _s=i(Ye);Qs=s(_s,"This implementation is the same as "),Zt=r(_s,"A",{href:!0});var gl=i(Zt);Js=s(gl,"RobertaModel"),gl.forEach(t),Xs=s(_s,` with the addition of entity embeddings as well as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities.`),_s.forEach(t),fl.forEach(t),Gs=c(Ke),Un=r(Ke,"LI",{});var _l=i(Un);q=r(_l,"P",{});var Y=i(q);Zs=s(Y,"LUKE treats entities as input tokens; therefore, it takes "),Kn=r(Y,"CODE",{});var kl=i(Kn);ea=s(kl,"entity_ids"),kl.forEach(t),ta=s(Y,", "),Rn=r(Y,"CODE",{});var yl=i(Rn);na=s(yl,"entity_attention_mask"),yl.forEach(t),oa=s(Y,`, `),Hn=r(Y,"CODE",{});var bl=i(Hn);sa=s(bl,"entity_token_type_ids"),bl.forEach(t),aa=s(Y," and "),Vn=r(Y,"CODE",{});var vl=i(Vn);ra=s(vl,"entity_position_ids"),vl.forEach(t),ia=s(Y,` as extra input. You can obtain those using `),en=r(Y,"A",{href:!0});var Tl=i(en);da=s(Tl,"LukeTokenizer"),Tl.forEach(t),la=s(Y,"."),Y.forEach(t),_l.forEach(t),ca=c(Ke),Qe=r(Ke,"LI",{});var ks=i(Qe);H=r(ks,"P",{});var Ee=i(H);tn=r(Ee,"A",{href:!0});var wl=i(tn);pa=s(wl,"LukeTokenizer"),wl.forEach(t),ha=s(Ee," takes "),Yn=r(Ee,"CODE",{});var Ll=i(Yn);ua=s(Ll,"entities"),Ll.forEach(t),ma=s(Ee," and "),Qn=r(Ee,"CODE",{});var El=i(Qn);fa=s(El,"entity_spans"),El.forEach(t),ga=s(Ee,` (character-based start and end positions of the entities in the input text) as extra input. `),Jn=r(Ee,"CODE",{});var zl=i(Jn);_a=s(zl,"entities"),zl.forEach(t),ka=s(Ee,` typically consist of [MASK] entities or Wikipedia entities. The brief description when inputting these entities are as follows:`),Ee.forEach(t),ya=c(ks),Je=r(ks,"UL",{});var ys=i(Je);nn=r(ys,"LI",{});var sl=i(nn);Xn=r(sl,"EM",{});var xl=i(Xn);ba=s(xl,"Inputting [MASK] entities to compute entity representations"),xl.forEach(t),va=s(sl,`: The [MASK] entity is used to mask entities to be predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address downstream tasks requiring the information of entities in text such as entity typing, relation classification, and named entity recognition.`),sl.forEach(t),Ta=c(ys),on=r(ys,"LI",{});var al=i(on);Gn=r(al,"EM",{});var $l=i(Gn);wa=s($l,"Inputting Wikipedia entities to compute knowledge-enhanced token representations"),$l.forEach(t),La=s(al,`: LUKE learns rich information (or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as question answering.`),al.forEach(t),ys.forEach(t),ks.forEach(t),Ea=c(Ke),K=r(Ke,"LI",{});var X=i(K);Zn=r(X,"P",{});var ql=i(Zn);za=s(ql,"There are three head models for the former use case:"),ql.forEach(t),xa=c(X),ce=r(X,"UL",{});var qn=i(ce);qe=r(qn,"LI",{});var Uo=i(qe);sn=r(Uo,"A",{href:!0});var Fl=i(sn);$a=s(Fl,"LukeForEntityClassification"),Fl.forEach(t),qa=s(Uo,`, for tasks to classify a single entity in an input text such as entity typing, e.g. the `),Xe=r(Uo,"A",{href:!0,rel:!0});var Cl=i(Xe);Fa=s(Cl,"Open Entity dataset"),Cl.forEach(t),Ca=s(Uo,`. This model places a linear head on top of the output entity representation.`),Uo.forEach(t),Ma=c(qn),Fe=r(qn,"LI",{});var Ko=i(Fe);an=r(Ko,"A",{href:!0});var Ml=i(an);Pa=s(Ml,"LukeForEntityPairClassification"),Ml.forEach(t),ja=s(Ko,`, for tasks to classify the relationship between two entities such as relation classification, e.g. the `),Ge=r(Ko,"A",{href:!0,rel:!0});var Pl=i(Ge);Aa=s(Pl,"TACRED dataset"),Pl.forEach(t),Ia=s(Ko,`. This model places a linear head on top of the concatenated output representation of the pair of given entities.`),Ko.forEach(t),Oa=c(qn),rn=r(qn,"LI",{});var rl=i(rn);dn=r(rl,"A",{href:!0});var jl=i(dn);Sa=s(jl,"LukeForEntitySpanClassification"),jl.forEach(t),Na=s(rl,`, for tasks to classify the sequence of entity spans, such as named entity recognition (NER). This model places a linear head on top of the output entity representations. You can address NER using this model by inputting all possible entity spans in the text to the model.`),rl.forEach(t),qn.forEach(t),Da=c(X),O=r(X,"P",{});var Q=i(O);ln=r(Q,"A",{href:!0});var Al=i(ln);Ba=s(Al,"LukeTokenizer"),Al.forEach(t),Wa=s(Q," has a "),eo=r(Q,"CODE",{});var Il=i(eo);Ua=s(Il,"task"),Il.forEach(t),Ka=s(Q,` argument, which enables you to easily create an input to these head models by specifying `),to=r(Q,"CODE",{});var Ol=i(to);Ra=s(Ol,'task="entity_classification"'),Ol.forEach(t),Ha=s(Q,", "),no=r(Q,"CODE",{});var Sl=i(no);Va=s(Sl,'task="entity_pair_classification"'),Sl.forEach(t),Ya=s(Q,`, or `),oo=r(Q,"CODE",{});var Nl=i(oo);Qa=s(Nl,'task="entity_span_classification"'),Nl.forEach(t),Ja=s(Q,". Please refer to the example code of each head models."),Q.forEach(t),Xa=c(X),pe=r(X,"P",{});var Fn=i(pe);Ga=s(Fn,"A demo notebook on how to fine-tune "),cn=r(Fn,"A",{href:!0});var Dl=i(cn);Za=s(Dl,"LukeForEntityPairClassification"),Dl.forEach(t),er=s(Fn,` for relation classification can be found `),Ze=r(Fn,"A",{href:!0,rel:!0});var Bl=i(Ze);tr=s(Bl,"here"),Bl.forEach(t),nr=s(Fn,"."),Fn.forEach(t),or=c(X),et=r(X,"P",{});var bs=i(et);sr=s(bs,`There are also 3 notebooks available, which showcase how you can reproduce the results as reported in the paper with the HuggingFace implementation of LUKE. They can be found `),tt=r(bs,"A",{href:!0,rel:!0});var Wl=i(tt);ar=s(Wl,"here"),Wl.forEach(t),rr=s(bs,"."),bs.forEach(t),X.forEach(t),Ke.forEach(t),Go=c(n),pn=r(n,"P",{});var Ul=i(pn);ir=s(Ul,"Example:"),Ul.forEach(t),Zo=c(n),k(nt.$$.fragment,n),es=c(n),V=r(n,"P",{});var Re=i(V);dr=s(Re,"This model was contributed by "),ot=r(Re,"A",{href:!0,rel:!0});var Kl=i(ot);lr=s(Kl,"ikuyamada"),Kl.forEach(t),cr=s(Re," and "),st=r(Re,"A",{href:!0,rel:!0});var Rl=i(st);pr=s(Rl,"nielsr"),Rl.forEach(t),hr=s(Re,". The original code can be found "),at=r(Re,"A",{href:!0,rel:!0});var Hl=i(at);ur=s(Hl,"here"),Hl.forEach(t),mr=s(Re,"."),Re.forEach(t),ts=c(n),he=r(n,"H2",{class:!0});var vs=i(he);Ce=r(vs,"A",{id:!0,class:!0,href:!0});var Vl=i(Ce);so=r(Vl,"SPAN",{});var Yl=i(so);k(rt.$$.fragment,Yl),Yl.forEach(t),Vl.forEach(t),fr=c(vs),ao=r(vs,"SPAN",{});var Ql=i(ao);gr=s(Ql,"LukeConfig"),Ql.forEach(t),vs.forEach(t),ns=c(n),F=r(n,"DIV",{class:!0});var G=i(F);k(it.$$.fragment,G),_r=c(G),dt=r(G,"P",{});var Ts=i(dt);kr=s(Ts,"This is the configuration class to store the configuration of a "),hn=r(Ts,"A",{href:!0});var Jl=i(hn);yr=s(Jl,"LukeModel"),Jl.forEach(t),br=s(Ts,`. It is used to instantiate a LUKE model according to the specified arguments, defining the model architecture.`),Ts.forEach(t),vr=c(G),ue=r(G,"P",{});var Cn=i(ue);Tr=s(Cn,"Configuration objects inherit from "),un=r(Cn,"A",{href:!0});var Xl=i(un);wr=s(Xl,"PretrainedConfig"),Xl.forEach(t),Lr=s(Cn,` and can be used to control the model outputs. Read the documentation from `),mn=r(Cn,"A",{href:!0});var Gl=i(mn);Er=s(Gl,"PretrainedConfig"),Gl.forEach(t),zr=s(Cn," for more information."),Cn.forEach(t),xr=c(G),ro=r(G,"P",{});var Zl=i(ro);$r=s(Zl,"Examples:"),Zl.forEach(t),qr=c(G),k(lt.$$.fragment,G),G.forEach(t),os=c(n),me=r(n,"H2",{class:!0});var ws=i(me);Me=r(ws,"A",{id:!0,class:!0,href:!0});var ec=i(Me);io=r(ec,"SPAN",{});var tc=i(io);k(ct.$$.fragment,tc),tc.forEach(t),ec.forEach(t),Fr=c(ws),lo=r(ws,"SPAN",{});var nc=i(lo);Cr=s(nc,"LukeTokenizer"),nc.forEach(t),ws.forEach(t),ss=c(n),C=r(n,"DIV",{class:!0});var Z=i(C);k(pt.$$.fragment,Z),Mr=c(Z),co=r(Z,"P",{});var oc=i(co);Pr=s(oc,"Construct a LUKE tokenizer."),oc.forEach(t),jr=c(Z),x=r(Z,"P",{});var $=i(x);Ar=s($,"This tokenizer inherits from "),fn=r($,"A",{href:!0});var sc=i(fn);Ir=s(sc,"RobertaTokenizer"),sc.forEach(t),Or=s($,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Compared to `),gn=r($,"A",{href:!0});var ac=i(gn);Sr=s(ac,"RobertaTokenizer"),ac.forEach(t),Nr=s($,", "),_n=r($,"A",{href:!0});var rc=i(_n);Dr=s(rc,"LukeTokenizer"),rc.forEach(t),Br=s($,` also creates entity sequences, namely `),po=r($,"CODE",{});var ic=i(po);Wr=s(ic,"entity_ids"),ic.forEach(t),Ur=s($,", "),ho=r($,"CODE",{});var dc=i(ho);Kr=s(dc,"entity_attention_mask"),dc.forEach(t),Rr=s($,", "),uo=r($,"CODE",{});var lc=i(uo);Hr=s(lc,"entity_token_type_ids"),lc.forEach(t),Vr=s($,`, and `),mo=r($,"CODE",{});var cc=i(mo);Yr=s(cc,"entity_position_ids"),cc.forEach(t),Qr=s($," to be used by the LUKE model."),$.forEach(t),Jr=c(Z),Pe=r(Z,"DIV",{class:!0});var Ls=i(Pe);k(ht.$$.fragment,Ls),Xr=c(Ls),fo=r(Ls,"P",{});var pc=i(fo);Gr=s(pc,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for.`),pc.forEach(t),Ls.forEach(t),Zr=c(Z),kn=r(Z,"DIV",{class:!0});var hc=i(kn);k(ut.$$.fragment,hc),hc.forEach(t),Z.forEach(t),as=c(n),fe=r(n,"H2",{class:!0});var Es=i(fe);je=r(Es,"A",{id:!0,class:!0,href:!0});var uc=i(je);go=r(uc,"SPAN",{});var mc=i(go);k(mt.$$.fragment,mc),mc.forEach(t),uc.forEach(t),ei=c(Es),_o=r(Es,"SPAN",{});var fc=i(_o);ti=s(fc,"LukeModel"),fc.forEach(t),Es.forEach(t),rs=c(n),M=r(n,"DIV",{class:!0});var ee=i(M);k(ft.$$.fragment,ee),ni=c(ee),ko=r(ee,"P",{});var gc=i(ko);oi=s(gc,"The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any specific head on top."),gc.forEach(t),si=c(ee),gt=r(ee,"P",{});var zs=i(gt);ai=s(zs,"This model inherits from "),yn=r(zs,"A",{href:!0});var _c=i(yn);ri=s(_c,"PreTrainedModel"),_c.forEach(t),ii=s(zs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zs.forEach(t),di=c(ee),_t=r(ee,"P",{});var xs=i(_t);li=s(xs,"This model is also a PyTorch "),kt=r(xs,"A",{href:!0,rel:!0});var kc=i(kt);ci=s(kc,"torch.nn.Module"),kc.forEach(t),pi=s(xs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xs.forEach(t),hi=c(ee),S=r(ee,"DIV",{class:!0});var te=i(S);k(yt.$$.fragment,te),ui=c(te),ge=r(te,"P",{});var Mn=i(ge);mi=s(Mn,"The "),bn=r(Mn,"A",{href:!0});var yc=i(bn);fi=s(yc,"LukeModel"),yc.forEach(t),gi=s(Mn," forward method, overrides the "),yo=r(Mn,"CODE",{});var bc=i(yo);_i=s(bc,"__call__"),bc.forEach(t),ki=s(Mn," special method."),Mn.forEach(t),yi=c(te),k(Ae.$$.fragment,te),bi=c(te),bo=r(te,"P",{});var vc=i(bo);vi=s(vc,"Examples:"),vc.forEach(t),Ti=c(te),k(bt.$$.fragment,te),te.forEach(t),ee.forEach(t),is=c(n),_e=r(n,"H2",{class:!0});var $s=i(_e);Ie=r($s,"A",{id:!0,class:!0,href:!0});var Tc=i(Ie);vo=r(Tc,"SPAN",{});var wc=i(vo);k(vt.$$.fragment,wc),wc.forEach(t),Tc.forEach(t),wi=c($s),To=r($s,"SPAN",{});var Lc=i(To);Li=s(Lc,"LukeForMaskedLM"),Lc.forEach(t),$s.forEach(t),ds=c(n),P=r(n,"DIV",{class:!0});var ne=i(P);k(Tt.$$.fragment,ne),Ei=c(ne),wo=r(ne,"P",{});var Ec=i(wo);zi=s(Ec,`The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction.`),Ec.forEach(t),xi=c(ne),wt=r(ne,"P",{});var qs=i(wt);$i=s(qs,"This model inherits from "),vn=r(qs,"A",{href:!0});var zc=i(vn);qi=s(zc,"PreTrainedModel"),zc.forEach(t),Fi=s(qs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qs.forEach(t),Ci=c(ne),Lt=r(ne,"P",{});var Fs=i(Lt);Mi=s(Fs,"This model is also a PyTorch "),Et=r(Fs,"A",{href:!0,rel:!0});var xc=i(Et);Pi=s(xc,"torch.nn.Module"),xc.forEach(t),ji=s(Fs,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fs.forEach(t),Ai=c(ne),J=r(ne,"DIV",{class:!0});var Pn=i(J);k(zt.$$.fragment,Pn),Ii=c(Pn),ke=r(Pn,"P",{});var jn=i(ke);Oi=s(jn,"The "),Tn=r(jn,"A",{href:!0});var $c=i(Tn);Si=s($c,"LukeForMaskedLM"),$c.forEach(t),Ni=s(jn," forward method, overrides the "),Lo=r(jn,"CODE",{});var qc=i(Lo);Di=s(qc,"__call__"),qc.forEach(t),Bi=s(jn," special method."),jn.forEach(t),Wi=c(Pn),k(Oe.$$.fragment,Pn),Pn.forEach(t),ne.forEach(t),ls=c(n),ye=r(n,"H2",{class:!0});var Cs=i(ye);Se=r(Cs,"A",{id:!0,class:!0,href:!0});var Fc=i(Se);Eo=r(Fc,"SPAN",{});var Cc=i(Eo);k(xt.$$.fragment,Cc),Cc.forEach(t),Fc.forEach(t),Ui=c(Cs),zo=r(Cs,"SPAN",{});var Mc=i(zo);Ki=s(Mc,"LukeForEntityClassification"),Mc.forEach(t),Cs.forEach(t),cs=c(n),j=r(n,"DIV",{class:!0});var oe=i(j);k($t.$$.fragment,oe),Ri=c(oe),xo=r(oe,"P",{});var Pc=i(xo);Hi=s(Pc,`The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity.`),Pc.forEach(t),Vi=c(oe),qt=r(oe,"P",{});var Ms=i(qt);Yi=s(Ms,"This model inherits from "),wn=r(Ms,"A",{href:!0});var jc=i(wn);Qi=s(jc,"PreTrainedModel"),jc.forEach(t),Ji=s(Ms,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ms.forEach(t),Xi=c(oe),Ft=r(oe,"P",{});var Ps=i(Ft);Gi=s(Ps,"This model is also a PyTorch "),Ct=r(Ps,"A",{href:!0,rel:!0});var Ac=i(Ct);Zi=s(Ac,"torch.nn.Module"),Ac.forEach(t),ed=s(Ps,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ps.forEach(t),td=c(oe),N=r(oe,"DIV",{class:!0});var se=i(N);k(Mt.$$.fragment,se),nd=c(se),be=r(se,"P",{});var An=i(be);od=s(An,"The "),Ln=r(An,"A",{href:!0});var Ic=i(Ln);sd=s(Ic,"LukeForEntityClassification"),Ic.forEach(t),ad=s(An," forward method, overrides the "),$o=r(An,"CODE",{});var Oc=i($o);rd=s(Oc,"__call__"),Oc.forEach(t),id=s(An," special method."),An.forEach(t),dd=c(se),k(Ne.$$.fragment,se),ld=c(se),qo=r(se,"P",{});var Sc=i(qo);cd=s(Sc,"Examples:"),Sc.forEach(t),pd=c(se),k(Pt.$$.fragment,se),se.forEach(t),oe.forEach(t),ps=c(n),ve=r(n,"H2",{class:!0});var js=i(ve);De=r(js,"A",{id:!0,class:!0,href:!0});var Nc=i(De);Fo=r(Nc,"SPAN",{});var Dc=i(Fo);k(jt.$$.fragment,Dc),Dc.forEach(t),Nc.forEach(t),hd=c(js),Co=r(js,"SPAN",{});var Bc=i(Co);ud=s(Bc,"LukeForEntityPairClassification"),Bc.forEach(t),js.forEach(t),hs=c(n),A=r(n,"DIV",{class:!0});var ae=i(A);k(At.$$.fragment,ae),md=c(ae),Mo=r(ae,"P",{});var Wc=i(Mo);fd=s(Wc,`The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED.`),Wc.forEach(t),gd=c(ae),It=r(ae,"P",{});var As=i(It);_d=s(As,"This model inherits from "),En=r(As,"A",{href:!0});var Uc=i(En);kd=s(Uc,"PreTrainedModel"),Uc.forEach(t),yd=s(As,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),As.forEach(t),bd=c(ae),Ot=r(ae,"P",{});var Is=i(Ot);vd=s(Is,"This model is also a PyTorch "),St=r(Is,"A",{href:!0,rel:!0});var Kc=i(St);Td=s(Kc,"torch.nn.Module"),Kc.forEach(t),wd=s(Is,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Is.forEach(t),Ld=c(ae),D=r(ae,"DIV",{class:!0});var re=i(D);k(Nt.$$.fragment,re),Ed=c(re),Te=r(re,"P",{});var In=i(Te);zd=s(In,"The "),zn=r(In,"A",{href:!0});var Rc=i(zn);xd=s(Rc,"LukeForEntityPairClassification"),Rc.forEach(t),$d=s(In," forward method, overrides the "),Po=r(In,"CODE",{});var Hc=i(Po);qd=s(Hc,"__call__"),Hc.forEach(t),Fd=s(In," special method."),In.forEach(t),Cd=c(re),k(Be.$$.fragment,re),Md=c(re),jo=r(re,"P",{});var Vc=i(jo);Pd=s(Vc,"Examples:"),Vc.forEach(t),jd=c(re),k(Dt.$$.fragment,re),re.forEach(t),ae.forEach(t),us=c(n),we=r(n,"H2",{class:!0});var Os=i(we);We=r(Os,"A",{id:!0,class:!0,href:!0});var Yc=i(We);Ao=r(Yc,"SPAN",{});var Qc=i(Ao);k(Bt.$$.fragment,Qc),Qc.forEach(t),Yc.forEach(t),Ad=c(Os),Io=r(Os,"SPAN",{});var Jc=i(Io);Id=s(Jc,"LukeForEntitySpanClassification"),Jc.forEach(t),Os.forEach(t),ms=c(n),I=r(n,"DIV",{class:!0});var ie=i(I);k(Wt.$$.fragment,ie),Od=c(ie),Oo=r(ie,"P",{});var Xc=i(Oo);Sd=s(Xc,`The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition.`),Xc.forEach(t),Nd=c(ie),Ut=r(ie,"P",{});var Ss=i(Ut);Dd=s(Ss,"This model inherits from "),xn=r(Ss,"A",{href:!0});var Gc=i(xn);Bd=s(Gc,"PreTrainedModel"),Gc.forEach(t),Wd=s(Ss,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ss.forEach(t),Ud=c(ie),Kt=r(ie,"P",{});var Ns=i(Kt);Kd=s(Ns,"This model is also a PyTorch "),Rt=r(Ns,"A",{href:!0,rel:!0});var Zc=i(Rt);Rd=s(Zc,"torch.nn.Module"),Zc.forEach(t),Hd=s(Ns,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ns.forEach(t),Vd=c(ie),B=r(ie,"DIV",{class:!0});var de=i(B);k(Ht.$$.fragment,de),Yd=c(de),Le=r(de,"P",{});var On=i(Le);Qd=s(On,"The "),$n=r(On,"A",{href:!0});var ep=i($n);Jd=s(ep,"LukeForEntitySpanClassification"),ep.forEach(t),Xd=s(On," forward method, overrides the "),So=r(On,"CODE",{});var tp=i(So);Gd=s(tp,"__call__"),tp.forEach(t),Zd=s(On," special method."),On.forEach(t),el=c(de),k(Ue.$$.fragment,de),tl=c(de),No=r(de,"P",{});var np=i(No);nl=s(np,"Examples:"),np.forEach(t),ol=c(de),k(Vt.$$.fragment,de),de.forEach(t),ie.forEach(t),this.h()},h(){d(h,"name","hf:doc:metadata"),d(h,"content",JSON.stringify(up)),d(w,"id","luke"),d(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(w,"href","#luke"),d(m,"class","relative group"),d(xe,"id","overview"),d(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(xe,"href","#overview"),d(le,"class","relative group"),d(Ve,"href","https://arxiv.org/abs/2010.01057"),d(Ve,"rel","nofollow"),d(Zt,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaModel"),d(en,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer"),d(tn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer"),d(sn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForEntityClassification"),d(Xe,"href","https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html"),d(Xe,"rel","nofollow"),d(an,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForEntityPairClassification"),d(Ge,"href","https://nlp.stanford.edu/projects/tacred/"),d(Ge,"rel","nofollow"),d(dn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForEntitySpanClassification"),d(ln,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer"),d(cn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForEntityPairClassification"),d(Ze,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LUKE"),d(Ze,"rel","nofollow"),d(tt,"href","https://github.com/studio-ousia/luke/tree/master/notebooks"),d(tt,"rel","nofollow"),d(ot,"href","https://huggingface.co/ikuyamada"),d(ot,"rel","nofollow"),d(st,"href","https://huggingface.co/nielsr"),d(st,"rel","nofollow"),d(at,"href","https://github.com/studio-ousia/luke"),d(at,"rel","nofollow"),d(Ce,"id","transformers.LukeConfig"),d(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ce,"href","#transformers.LukeConfig"),d(he,"class","relative group"),d(hn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeModel"),d(un,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(mn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(F,"class","docstring"),d(Me,"id","transformers.LukeTokenizer"),d(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Me,"href","#transformers.LukeTokenizer"),d(me,"class","relative group"),d(fn,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer"),d(gn,"href","/docs/transformers/pr_16143/en/model_doc/roberta#transformers.RobertaTokenizer"),d(_n,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeTokenizer"),d(Pe,"class","docstring"),d(kn,"class","docstring"),d(C,"class","docstring"),d(je,"id","transformers.LukeModel"),d(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(je,"href","#transformers.LukeModel"),d(fe,"class","relative group"),d(yn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(kt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(kt,"rel","nofollow"),d(bn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeModel"),d(S,"class","docstring"),d(M,"class","docstring"),d(Ie,"id","transformers.LukeForMaskedLM"),d(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ie,"href","#transformers.LukeForMaskedLM"),d(_e,"class","relative group"),d(vn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Et,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Et,"rel","nofollow"),d(Tn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForMaskedLM"),d(J,"class","docstring"),d(P,"class","docstring"),d(Se,"id","transformers.LukeForEntityClassification"),d(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Se,"href","#transformers.LukeForEntityClassification"),d(ye,"class","relative group"),d(wn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Ct,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Ct,"rel","nofollow"),d(Ln,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForEntityClassification"),d(N,"class","docstring"),d(j,"class","docstring"),d(De,"id","transformers.LukeForEntityPairClassification"),d(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(De,"href","#transformers.LukeForEntityPairClassification"),d(ve,"class","relative group"),d(En,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(St,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(St,"rel","nofollow"),d(zn,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForEntityPairClassification"),d(D,"class","docstring"),d(A,"class","docstring"),d(We,"id","transformers.LukeForEntitySpanClassification"),d(We,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(We,"href","#transformers.LukeForEntitySpanClassification"),d(we,"class","relative group"),d(xn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Rt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Rt,"rel","nofollow"),d($n,"href","/docs/transformers/pr_16143/en/model_doc/luke#transformers.LukeForEntitySpanClassification"),d(B,"class","docstring"),d(I,"class","docstring")},m(n,p){e(document.head,h),u(n,L,p),u(n,m,p),e(m,w),e(w,E),y(g,E,null),e(m,f),e(m,z),e(z,Ds),u(n,Ho,p),u(n,le,p),e(le,xe),e(xe,Nn),y(He,Nn,null),e(le,Bs),e(le,Dn),e(Dn,Ws),u(n,Vo,p),u(n,$e,p),e($e,Us),e($e,Ve),e(Ve,Ks),e($e,Rs),u(n,Yo,p),u(n,Jt,p),e(Jt,Hs),u(n,Qo,p),u(n,Xt,p),e(Xt,Bn),e(Bn,Vs),u(n,Jo,p),u(n,Gt,p),e(Gt,Ys),u(n,Xo,p),u(n,R,p),e(R,Wn),e(Wn,Ye),e(Ye,Qs),e(Ye,Zt),e(Zt,Js),e(Ye,Xs),e(R,Gs),e(R,Un),e(Un,q),e(q,Zs),e(q,Kn),e(Kn,ea),e(q,ta),e(q,Rn),e(Rn,na),e(q,oa),e(q,Hn),e(Hn,sa),e(q,aa),e(q,Vn),e(Vn,ra),e(q,ia),e(q,en),e(en,da),e(q,la),e(R,ca),e(R,Qe),e(Qe,H),e(H,tn),e(tn,pa),e(H,ha),e(H,Yn),e(Yn,ua),e(H,ma),e(H,Qn),e(Qn,fa),e(H,ga),e(H,Jn),e(Jn,_a),e(H,ka),e(Qe,ya),e(Qe,Je),e(Je,nn),e(nn,Xn),e(Xn,ba),e(nn,va),e(Je,Ta),e(Je,on),e(on,Gn),e(Gn,wa),e(on,La),e(R,Ea),e(R,K),e(K,Zn),e(Zn,za),e(K,xa),e(K,ce),e(ce,qe),e(qe,sn),e(sn,$a),e(qe,qa),e(qe,Xe),e(Xe,Fa),e(qe,Ca),e(ce,Ma),e(ce,Fe),e(Fe,an),e(an,Pa),e(Fe,ja),e(Fe,Ge),e(Ge,Aa),e(Fe,Ia),e(ce,Oa),e(ce,rn),e(rn,dn),e(dn,Sa),e(rn,Na),e(K,Da),e(K,O),e(O,ln),e(ln,Ba),e(O,Wa),e(O,eo),e(eo,Ua),e(O,Ka),e(O,to),e(to,Ra),e(O,Ha),e(O,no),e(no,Va),e(O,Ya),e(O,oo),e(oo,Qa),e(O,Ja),e(K,Xa),e(K,pe),e(pe,Ga),e(pe,cn),e(cn,Za),e(pe,er),e(pe,Ze),e(Ze,tr),e(pe,nr),e(K,or),e(K,et),e(et,sr),e(et,tt),e(tt,ar),e(et,rr),u(n,Go,p),u(n,pn,p),e(pn,ir),u(n,Zo,p),y(nt,n,p),u(n,es,p),u(n,V,p),e(V,dr),e(V,ot),e(ot,lr),e(V,cr),e(V,st),e(st,pr),e(V,hr),e(V,at),e(at,ur),e(V,mr),u(n,ts,p),u(n,he,p),e(he,Ce),e(Ce,so),y(rt,so,null),e(he,fr),e(he,ao),e(ao,gr),u(n,ns,p),u(n,F,p),y(it,F,null),e(F,_r),e(F,dt),e(dt,kr),e(dt,hn),e(hn,yr),e(dt,br),e(F,vr),e(F,ue),e(ue,Tr),e(ue,un),e(un,wr),e(ue,Lr),e(ue,mn),e(mn,Er),e(ue,zr),e(F,xr),e(F,ro),e(ro,$r),e(F,qr),y(lt,F,null),u(n,os,p),u(n,me,p),e(me,Me),e(Me,io),y(ct,io,null),e(me,Fr),e(me,lo),e(lo,Cr),u(n,ss,p),u(n,C,p),y(pt,C,null),e(C,Mr),e(C,co),e(co,Pr),e(C,jr),e(C,x),e(x,Ar),e(x,fn),e(fn,Ir),e(x,Or),e(x,gn),e(gn,Sr),e(x,Nr),e(x,_n),e(_n,Dr),e(x,Br),e(x,po),e(po,Wr),e(x,Ur),e(x,ho),e(ho,Kr),e(x,Rr),e(x,uo),e(uo,Hr),e(x,Vr),e(x,mo),e(mo,Yr),e(x,Qr),e(C,Jr),e(C,Pe),y(ht,Pe,null),e(Pe,Xr),e(Pe,fo),e(fo,Gr),e(C,Zr),e(C,kn),y(ut,kn,null),u(n,as,p),u(n,fe,p),e(fe,je),e(je,go),y(mt,go,null),e(fe,ei),e(fe,_o),e(_o,ti),u(n,rs,p),u(n,M,p),y(ft,M,null),e(M,ni),e(M,ko),e(ko,oi),e(M,si),e(M,gt),e(gt,ai),e(gt,yn),e(yn,ri),e(gt,ii),e(M,di),e(M,_t),e(_t,li),e(_t,kt),e(kt,ci),e(_t,pi),e(M,hi),e(M,S),y(yt,S,null),e(S,ui),e(S,ge),e(ge,mi),e(ge,bn),e(bn,fi),e(ge,gi),e(ge,yo),e(yo,_i),e(ge,ki),e(S,yi),y(Ae,S,null),e(S,bi),e(S,bo),e(bo,vi),e(S,Ti),y(bt,S,null),u(n,is,p),u(n,_e,p),e(_e,Ie),e(Ie,vo),y(vt,vo,null),e(_e,wi),e(_e,To),e(To,Li),u(n,ds,p),u(n,P,p),y(Tt,P,null),e(P,Ei),e(P,wo),e(wo,zi),e(P,xi),e(P,wt),e(wt,$i),e(wt,vn),e(vn,qi),e(wt,Fi),e(P,Ci),e(P,Lt),e(Lt,Mi),e(Lt,Et),e(Et,Pi),e(Lt,ji),e(P,Ai),e(P,J),y(zt,J,null),e(J,Ii),e(J,ke),e(ke,Oi),e(ke,Tn),e(Tn,Si),e(ke,Ni),e(ke,Lo),e(Lo,Di),e(ke,Bi),e(J,Wi),y(Oe,J,null),u(n,ls,p),u(n,ye,p),e(ye,Se),e(Se,Eo),y(xt,Eo,null),e(ye,Ui),e(ye,zo),e(zo,Ki),u(n,cs,p),u(n,j,p),y($t,j,null),e(j,Ri),e(j,xo),e(xo,Hi),e(j,Vi),e(j,qt),e(qt,Yi),e(qt,wn),e(wn,Qi),e(qt,Ji),e(j,Xi),e(j,Ft),e(Ft,Gi),e(Ft,Ct),e(Ct,Zi),e(Ft,ed),e(j,td),e(j,N),y(Mt,N,null),e(N,nd),e(N,be),e(be,od),e(be,Ln),e(Ln,sd),e(be,ad),e(be,$o),e($o,rd),e(be,id),e(N,dd),y(Ne,N,null),e(N,ld),e(N,qo),e(qo,cd),e(N,pd),y(Pt,N,null),u(n,ps,p),u(n,ve,p),e(ve,De),e(De,Fo),y(jt,Fo,null),e(ve,hd),e(ve,Co),e(Co,ud),u(n,hs,p),u(n,A,p),y(At,A,null),e(A,md),e(A,Mo),e(Mo,fd),e(A,gd),e(A,It),e(It,_d),e(It,En),e(En,kd),e(It,yd),e(A,bd),e(A,Ot),e(Ot,vd),e(Ot,St),e(St,Td),e(Ot,wd),e(A,Ld),e(A,D),y(Nt,D,null),e(D,Ed),e(D,Te),e(Te,zd),e(Te,zn),e(zn,xd),e(Te,$d),e(Te,Po),e(Po,qd),e(Te,Fd),e(D,Cd),y(Be,D,null),e(D,Md),e(D,jo),e(jo,Pd),e(D,jd),y(Dt,D,null),u(n,us,p),u(n,we,p),e(we,We),e(We,Ao),y(Bt,Ao,null),e(we,Ad),e(we,Io),e(Io,Id),u(n,ms,p),u(n,I,p),y(Wt,I,null),e(I,Od),e(I,Oo),e(Oo,Sd),e(I,Nd),e(I,Ut),e(Ut,Dd),e(Ut,xn),e(xn,Bd),e(Ut,Wd),e(I,Ud),e(I,Kt),e(Kt,Kd),e(Kt,Rt),e(Rt,Rd),e(Kt,Hd),e(I,Vd),e(I,B),y(Ht,B,null),e(B,Yd),e(B,Le),e(Le,Qd),e(Le,$n),e($n,Jd),e(Le,Xd),e(Le,So),e(So,Gd),e(Le,Zd),e(B,el),y(Ue,B,null),e(B,tl),e(B,No),e(No,nl),e(B,ol),y(Vt,B,null),fs=!0},p(n,[p]){const Yt={};p&2&&(Yt.$$scope={dirty:p,ctx:n}),Ae.$set(Yt);const Do={};p&2&&(Do.$$scope={dirty:p,ctx:n}),Oe.$set(Do);const Bo={};p&2&&(Bo.$$scope={dirty:p,ctx:n}),Ne.$set(Bo);const Wo={};p&2&&(Wo.$$scope={dirty:p,ctx:n}),Be.$set(Wo);const Qt={};p&2&&(Qt.$$scope={dirty:p,ctx:n}),Ue.$set(Qt)},i(n){fs||(b(g.$$.fragment,n),b(He.$$.fragment,n),b(nt.$$.fragment,n),b(rt.$$.fragment,n),b(it.$$.fragment,n),b(lt.$$.fragment,n),b(ct.$$.fragment,n),b(pt.$$.fragment,n),b(ht.$$.fragment,n),b(ut.$$.fragment,n),b(mt.$$.fragment,n),b(ft.$$.fragment,n),b(yt.$$.fragment,n),b(Ae.$$.fragment,n),b(bt.$$.fragment,n),b(vt.$$.fragment,n),b(Tt.$$.fragment,n),b(zt.$$.fragment,n),b(Oe.$$.fragment,n),b(xt.$$.fragment,n),b($t.$$.fragment,n),b(Mt.$$.fragment,n),b(Ne.$$.fragment,n),b(Pt.$$.fragment,n),b(jt.$$.fragment,n),b(At.$$.fragment,n),b(Nt.$$.fragment,n),b(Be.$$.fragment,n),b(Dt.$$.fragment,n),b(Bt.$$.fragment,n),b(Wt.$$.fragment,n),b(Ht.$$.fragment,n),b(Ue.$$.fragment,n),b(Vt.$$.fragment,n),fs=!0)},o(n){v(g.$$.fragment,n),v(He.$$.fragment,n),v(nt.$$.fragment,n),v(rt.$$.fragment,n),v(it.$$.fragment,n),v(lt.$$.fragment,n),v(ct.$$.fragment,n),v(pt.$$.fragment,n),v(ht.$$.fragment,n),v(ut.$$.fragment,n),v(mt.$$.fragment,n),v(ft.$$.fragment,n),v(yt.$$.fragment,n),v(Ae.$$.fragment,n),v(bt.$$.fragment,n),v(vt.$$.fragment,n),v(Tt.$$.fragment,n),v(zt.$$.fragment,n),v(Oe.$$.fragment,n),v(xt.$$.fragment,n),v($t.$$.fragment,n),v(Mt.$$.fragment,n),v(Ne.$$.fragment,n),v(Pt.$$.fragment,n),v(jt.$$.fragment,n),v(At.$$.fragment,n),v(Nt.$$.fragment,n),v(Be.$$.fragment,n),v(Dt.$$.fragment,n),v(Bt.$$.fragment,n),v(Wt.$$.fragment,n),v(Ht.$$.fragment,n),v(Ue.$$.fragment,n),v(Vt.$$.fragment,n),fs=!1},d(n){t(h),n&&t(L),n&&t(m),T(g),n&&t(Ho),n&&t(le),T(He),n&&t(Vo),n&&t($e),n&&t(Yo),n&&t(Jt),n&&t(Qo),n&&t(Xt),n&&t(Jo),n&&t(Gt),n&&t(Xo),n&&t(R),n&&t(Go),n&&t(pn),n&&t(Zo),T(nt,n),n&&t(es),n&&t(V),n&&t(ts),n&&t(he),T(rt),n&&t(ns),n&&t(F),T(it),T(lt),n&&t(os),n&&t(me),T(ct),n&&t(ss),n&&t(C),T(pt),T(ht),T(ut),n&&t(as),n&&t(fe),T(mt),n&&t(rs),n&&t(M),T(ft),T(yt),T(Ae),T(bt),n&&t(is),n&&t(_e),T(vt),n&&t(ds),n&&t(P),T(Tt),T(zt),T(Oe),n&&t(ls),n&&t(ye),T(xt),n&&t(cs),n&&t(j),T($t),T(Mt),T(Ne),T(Pt),n&&t(ps),n&&t(ve),T(jt),n&&t(hs),n&&t(A),T(At),T(Nt),T(Be),T(Dt),n&&t(us),n&&t(we),T(Bt),n&&t(ms),n&&t(I),T(Wt),T(Ht),T(Ue),T(Vt)}}}const up={local:"luke",sections:[{local:"overview",title:"Overview"},{local:"transformers.LukeConfig",title:"LukeConfig"},{local:"transformers.LukeTokenizer",title:"LukeTokenizer"},{local:"transformers.LukeModel",title:"LukeModel"},{local:"transformers.LukeForMaskedLM",title:"LukeForMaskedLM"},{local:"transformers.LukeForEntityClassification",title:"LukeForEntityClassification"},{local:"transformers.LukeForEntityPairClassification",title:"LukeForEntityPairClassification"},{local:"transformers.LukeForEntitySpanClassification",title:"LukeForEntitySpanClassification"}],title:"LUKE"};function mp(U,h,L){let{fw:m}=h;return U.$$set=w=>{"fw"in w&&L(0,m=w.fw)},[m]}class vp extends op{constructor(h){super();sp(this,h,mp,hp,ap,{fw:0})}}export{vp as default,up as metadata};
295
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/trocr.mdx-6100bb6b.js
import{S as _i,i as vi,s as Ti,e as n,k as l,w as v,t as o,M as Ci,c as s,d as r,m as c,a as i,x as T,h as a,b as d,N as bi,F as e,g as f,y as C,q as b,o as w,B as k}from"../../chunks/vendor-4833417e.js";import{T as gi}from"../../chunks/Tip-fffd6df1.js";import{D as J}from"../../chunks/Docstring-4f315ed9.js";import{C as Jn}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as zt}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function wi(be){let m,z,p,E,D,y,j,A;return{c(){m=n("p"),z=o(`This class method is simply calling the feature extractor `),p=n("a"),E=o("from_pretrained()"),D=o(` and the tokenizer `),y=n("code"),j=o("from_pretrained"),A=o(` methods. Please refer to the docstrings of the methods above for more information.`),this.h()},l(x){m=s(x,"P",{});var _=i(m);z=a(_,`This class method is simply calling the feature extractor `),p=s(_,"A",{href:!0});var P=i(p);E=a(P,"from_pretrained()"),P.forEach(r),D=a(_,` and the tokenizer `),y=s(_,"CODE",{});var L=i(y);j=a(L,"from_pretrained"),L.forEach(r),A=a(_,` methods. Please refer to the docstrings of the methods above for more information.`),_.forEach(r),this.h()},h(){d(p,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained")},m(x,_){f(x,m,_),e(m,z),e(m,p),e(p,E),e(m,D),e(m,y),e(y,j),e(m,A)},d(x){x&&r(m)}}}function ki(be){let m,z,p,E,D,y,j,A;return{c(){m=n("p"),z=o("This class method is simply calling "),p=n("a"),E=o("save_pretrained()"),D=o(` and `),y=n("code"),j=o("save_pretrained"),A=o(`. Please refer to the docstrings of the methods above for more information.`),this.h()},l(x){m=s(x,"P",{});var _=i(m);z=a(_,"This class method is simply calling "),p=s(_,"A",{href:!0});var P=i(p);E=a(P,"save_pretrained()"),P.forEach(r),D=a(_,` and `),y=s(_,"CODE",{});var L=i(y);j=a(L,"save_pretrained"),L.forEach(r),A=a(_,`. Please refer to the docstrings of the methods above for more information.`),_.forEach(r),this.h()},h(){d(p,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained")},m(x,_){f(x,m,_),e(m,z),e(m,p),e(p,E),e(m,D),e(m,y),e(y,j),e(m,A)},d(x){x&&r(m)}}}function Oi(be){let m,z,p,E,D,y,j,A,x,_,P,L,Lt,we,ro,Dt,oo,gr,S,ao,ke,no,so,Oe,io,lo,_r,nt,co,vr,st,At,ho,Tr,oe,Yn,Cr,ae,fo,it,mo,po,br,ne,uo,It,go,_o,wr,W,vo,Ee,To,Co,ye,bo,wo,kr,dt,ko,Or,H,Re,Oo,$e,Eo,yo,Ro,q,$o,xe,xo,Po,Pe,Mo,Fo,Me,zo,Lo,Do,Fe,Ao,lt,Io,jo,Er,Y,se,jt,ze,qo,qt,Vo,yr,B,No,Vt,So,Wo,ct,Ho,Bo,Rr,u,Uo,Nt,Xo,Go,St,Zo,Jo,Wt,Yo,Ko,Ht,Qo,ea,ht,ta,ra,Bt,oa,aa,Ut,na,sa,Xt,ia,da,Gt,la,ca,$r,ft,Zt,ha,xr,Le,Pr,ie,fa,De,ma,pa,Mr,K,de,Jt,Ae,ua,Yt,ga,Fr,M,Ie,_a,Q,va,mt,Ta,Ca,je,ba,wa,ka,ee,Oa,pt,Ea,ya,ut,Ra,$a,xa,Kt,Pa,Ma,qe,zr,te,le,Qt,Ve,Fa,er,za,Lr,g,Ne,La,tr,Da,Aa,R,gt,Ia,ja,rr,qa,Va,or,Na,Sa,ar,Wa,Ha,nr,Ba,Ua,Se,sr,Xa,Ga,Za,_t,Ja,Ya,Ka,ce,We,Qa,V,en,ir,tn,rn,vt,on,an,dr,nn,sn,dn,U,He,ln,lr,cn,hn,he,fn,X,Be,mn,Ue,pn,Tt,un,gn,_n,fe,vn,me,Xe,Tn,Ge,Cn,Ct,bn,wn,kn,pe,Ze,On,Je,En,bt,yn,Rn,$n,ue,Ye,xn,cr,Pn,Dr,re,ge,hr,Ke,Mn,fr,Fn,Ar,I,Qe,zn,N,Ln,wt,Dn,An,mr,In,jn,kt,qn,Vn,Nn,et,Sn,tt,Wn,Hn,Bn,G,rt,Un,pr,Xn,Gn,ot,Ir;return y=new zt({}),we=new zt({}),ze=new zt({}),Le=new Jn({props:{code:`from transformers import TrOCRProcessor, VisionEncoderDecoderModel import requests from PIL import Image processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") # load image from the IAM dataset url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") pixel_values = processor(image, return_tensors="pt").pixel_values generated_ids = model.generate(pixel_values) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrOCRProcessor, VisionEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span>processor = TrOCRProcessor.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;microsoft/trocr-base-handwritten&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load image from the IAM dataset </span> <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw).convert(<span class="hljs-string">&quot;RGB&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = processor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).pixel_values <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(pixel_values) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_text = processor.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] `}}),Ae=new zt({}),Ie=new J({props:{name:"class transformers.TrOCRConfig",anchor:"transformers.TrOCRConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"d_model",val:" = 1024"},{name:"decoder_layers",val:" = 12"},{name:"decoder_attention_heads",val:" = 16"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"activation_function",val:" = 'gelu'"},{name:"max_position_embeddings",val:" = 512"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"decoder_start_token_id",val:" = 2"},{name:"classifier_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = False"},{name:"scale_embedding",val:" = False"},{name:"use_learned_position_embeddings",val:" = True"},{name:"layernorm_embedding",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/configuration_trocr.py#L29",parametersDescription:[{anchor:"transformers.TrOCRConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRForCausalLM">TrOCRForCausalLM</a>.`,name:"vocab_size"},{anchor:"transformers.TrOCRConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.TrOCRConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.TrOCRConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.TrOCRConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.TrOCRConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.TrOCRConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.TrOCRConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, and pooler.`,name:"dropout"},{anchor:"transformers.TrOCRConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.TrOCRConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.TrOCRConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.TrOCRConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"init_std"},{anchor:"transformers.TrOCRConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.TrOCRConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to scale the word embeddings by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.TrOCRConfig.use_learned_position_embeddings",description:`<strong>use_learned_position_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used.`,name:"use_learned_position_embeddings"},{anchor:"transformers.TrOCRConfig.layernorm_embedding",description:`<strong>layernorm_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a layernorm after the word + position embeddings.`,name:"layernorm_embedding"}]}}),qe=new Jn({props:{code:`from transformers import TrOCRForCausalLM, TrOCRConfig # Initializing a TrOCR-base style configuration configuration = TrOCRConfig() # Initializing a model from the TrOCR-base style configuration model = TrOCRForCausalLM(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrOCRForCausalLM, TrOCRConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a TrOCR-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = TrOCRConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the TrOCR-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TrOCRForCausalLM(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Ve=new zt({}),Ne=new J({props:{name:"class transformers.TrOCRProcessor",anchor:"transformers.TrOCRProcessor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L23",parametersDescription:[{anchor:"transformers.TrOCRProcessor.feature_extractor",description:`<strong>feature_extractor</strong> ([<code>ViTFeatureExtractor</code>/<code>DeiTFeatureExtractor</code>]) &#x2014; An instance of [<code>ViTFeatureExtractor</code>/<code>DeiTFeatureExtractor</code>]. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.TrOCRProcessor.tokenizer",description:`<strong>tokenizer</strong> ([<code>RobertaTokenizer</code>/<code>XLMRobertaTokenizer</code>]) &#x2014; An instance of [<code>RobertaTokenizer</code>/<code>XLMRobertaTokenizer</code>]. The tokenizer is a required input.`,name:"tokenizer"}]}}),We=new J({props:{name:"__call__",anchor:"transformers.TrOCRProcessor.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L44"}}),He=new J({props:{name:"from_pretrained",anchor:"transformers.ProcessorMixin.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L157",parametersDescription:[{anchor:"transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and <code>from_pretrained</code>.</li> </ul>`,name:"pretrained_model_name_or_path"}]}}),he=new gi({props:{$$slots:{default:[wi]},$$scope:{ctx:be}}}),Be=new J({props:{name:"save_pretrained",anchor:"transformers.ProcessorMixin.save_pretrained",parameters:[{name:"save_directory",val:""},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L95",parametersDescription:[{anchor:"transformers.ProcessorMixin.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.ProcessorMixin.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your processor to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}]}}),fe=new gi({props:{$$slots:{default:[ki]},$$scope:{ctx:be}}}),Xe=new J({props:{name:"batch_decode",anchor:"transformers.TrOCRProcessor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L53"}}),Ze=new J({props:{name:"decode",anchor:"transformers.TrOCRProcessor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L60"}}),Ye=new J({props:{name:"as_target_processor",anchor:"transformers.TrOCRProcessor.as_target_processor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/processing_trocr.py#L67"}}),Ke=new zt({}),Qe=new J({props:{name:"class transformers.TrOCRForCausalLM",anchor:"transformers.TrOCRForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/modeling_trocr.py#L776",parametersDescription:[{anchor:"transformers.TrOCRForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRConfig">TrOCRConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),rt=new J({props:{name:"forward",anchor:"transformers.TrOCRForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/trocr/modeling_trocr.py#L807",parametersDescription:[{anchor:"transformers.TrOCRForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <code>TrOCRTokenizer</code>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TrOCRForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TrOCRForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.TrOCRForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.TrOCRForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TrOCRForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TrOCRForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TrOCRForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.TrOCRForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.TrOCRForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TrOCRForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TrOCRForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRConfig" >TrOCRConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ot=new Jn({props:{code:`from transformers import VisionEncoderDecoderModel, TrOCRForCausalLM, ViTModel, TrOCRConfig, ViTConfig encoder = ViTModel(ViTConfig()) decoder = TrOCRForCausalLM(TrOCRConfig()) model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> VisionEncoderDecoderModel, TrOCRForCausalLM, ViTModel, TrOCRConfig, ViTConfig <span class="hljs-meta">&gt;&gt;&gt; </span>encoder = ViTModel(ViTConfig()) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = TrOCRForCausalLM(TrOCRConfig()) <span class="hljs-comment"># init vision2text model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)`}}),{c(){m=n("meta"),z=l(),p=n("h1"),E=n("a"),D=n("span"),v(y.$$.fragment),j=l(),A=n("span"),x=o("TrOCR"),_=l(),P=n("h2"),L=n("a"),Lt=n("span"),v(we.$$.fragment),ro=l(),Dt=n("span"),oo=o("Overview"),gr=l(),S=n("p"),ao=o("The TrOCR model was proposed in "),ke=n("a"),no=o(`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),so=o(` by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. TrOCR consists of an image Transformer encoder and an autoregressive text Transformer decoder to perform `),Oe=n("a"),io=o("optical character recognition (OCR)"),lo=o("."),_r=l(),nt=n("p"),co=o("The abstract from the paper is the following:"),vr=l(),st=n("p"),At=n("em"),ho=o(`Text recognition is a long-standing research problem for document digitalization. Existing approaches for text recognition are usually built based on CNN for image understanding and RNN for char-level text generation. In addition, another language model is usually needed to improve the overall accuracy as a post-processing step. In this paper, we propose an end-to-end text recognition approach with pre-trained image Transformer and text Transformer models, namely TrOCR, which leverages the Transformer architecture for both image understanding and wordpiece-level text generation. The TrOCR model is simple but effective, and can be pre-trained with large-scale synthetic data and fine-tuned with human-labeled datasets. Experiments show that the TrOCR model outperforms the current state-of-the-art models on both printed and handwritten text recognition tasks.`),Tr=l(),oe=n("img"),Cr=l(),ae=n("small"),fo=o("TrOCR architecture. Taken from the "),it=n("a"),mo=o("original paper"),po=o("."),br=l(),ne=n("p"),uo=o("Please refer to the "),It=n("code"),go=o("VisionEncoderDecoder"),_o=o(" class on how to use this model."),wr=l(),W=n("p"),vo=o("This model was contributed by "),Ee=n("a"),To=o("nielsr"),Co=o(`. The original code can be found `),ye=n("a"),bo=o("here"),wo=o("."),kr=l(),dt=n("p"),ko=o("Tips:"),Or=l(),H=n("ul"),Re=n("li"),Oo=o("The quickest way to get started with TrOCR is by checking the "),$e=n("a"),Eo=o(`tutorial notebooks`),yo=o(`, which show how to use the model at inference time as well as fine-tuning on custom data.`),Ro=l(),q=n("li"),$o=o(`TrOCR is pre-trained in 2 stages before being fine-tuned on downstream datasets. It achieves state-of-the-art results on both printed (e.g. the `),xe=n("a"),xo=o("SROIE dataset"),Po=o(" and handwritten (e.g. the "),Pe=n("a"),Mo=o(`IAM Handwriting dataset`),Fo=o(` text recognition tasks. For more information, see the `),Me=n("a"),zo=o("official models"),Lo=o("."),Do=l(),Fe=n("li"),Ao=o("TrOCR is always used within the "),lt=n("a"),Io=o("VisionEncoderDecoder"),jo=o(" framework."),Er=l(),Y=n("h2"),se=n("a"),jt=n("span"),v(ze.$$.fragment),qo=l(),qt=n("span"),Vo=o("Inference"),yr=l(),B=n("p"),No=o("TrOCR\u2019s "),Vt=n("code"),So=o("VisionEncoderDecoder"),Wo=o(` model accepts images as input and makes use of `),ct=n("a"),Ho=o("generate()"),Bo=o(" to autoregressively generate text given the input image."),Rr=l(),u=n("p"),Uo=o("The ["),Nt=n("code"),Xo=o("ViTFeatureExtractor"),Go=o("/"),St=n("code"),Zo=o("DeiTFeatureExtractor"),Jo=o(`] class is responsible for preprocessing the input image and [`),Wt=n("code"),Yo=o("RobertaTokenizer"),Ko=o("/"),Ht=n("code"),Qo=o("XLMRobertaTokenizer"),ea=o(`] decodes the generated target tokens to the target string. The `),ht=n("a"),ta=o("TrOCRProcessor"),ra=o(" wraps ["),Bt=n("code"),oa=o("ViTFeatureExtractor"),aa=o("/"),Ut=n("code"),na=o("DeiTFeatureExtractor"),sa=o("] and ["),Xt=n("code"),ia=o("RobertaTokenizer"),da=o("/"),Gt=n("code"),la=o("XLMRobertaTokenizer"),ca=o(`] into a single instance to both extract the input features and decode the predicted token ids.`),$r=l(),ft=n("ul"),Zt=n("li"),ha=o("Step-by-step Optical Character Recognition (OCR)"),xr=l(),v(Le.$$.fragment),Pr=l(),ie=n("p"),fa=o("See the "),De=n("a"),ma=o("model hub"),pa=o(" to look for TrOCR checkpoints."),Mr=l(),K=n("h2"),de=n("a"),Jt=n("span"),v(Ae.$$.fragment),ua=l(),Yt=n("span"),ga=o("TrOCRConfig"),Fr=l(),M=n("div"),v(Ie.$$.fragment),_a=l(),Q=n("p"),va=o("This is the configuration class to store the configuration of a "),mt=n("a"),Ta=o("TrOCRForCausalLM"),Ca=o(`. It is used to instantiate an TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrOCR `),je=n("a"),ba=o("microsoft/trocr-base"),wa=o(" architecture."),ka=l(),ee=n("p"),Oa=o("Configuration objects inherit from "),pt=n("a"),Ea=o("PretrainedConfig"),ya=o(` and can be used to control the model outputs. Read the documentation from `),ut=n("a"),Ra=o("PretrainedConfig"),$a=o(" for more information."),xa=l(),Kt=n("p"),Pa=o("Example:"),Ma=l(),v(qe.$$.fragment),zr=l(),te=n("h2"),le=n("a"),Qt=n("span"),v(Ve.$$.fragment),Fa=l(),er=n("span"),za=o("TrOCRProcessor"),Lr=l(),g=n("div"),v(Ne.$$.fragment),La=l(),tr=n("p"),Da=o("Constructs a TrOCR processor which wraps a vision feature extractor and a TrOCR tokenizer into a single processor."),Aa=l(),R=n("p"),gt=n("a"),Ia=o("TrOCRProcessor"),ja=o(" offers all the functionalities of ["),rr=n("code"),qa=o("ViTFeatureExtractor"),Va=o("/"),or=n("code"),Na=o("DeiTFeatureExtractor"),Sa=o(`] and [`),ar=n("code"),Wa=o("RobertaTokenizer"),Ha=o("/"),nr=n("code"),Ba=o("XLMRobertaTokenizer"),Ua=o("]. See the "),Se=n("a"),sr=n("strong"),Xa=o("call"),Ga=o("()"),Za=o(" and "),_t=n("a"),Ja=o("decode()"),Ya=o(` for more information.`),Ka=l(),ce=n("div"),v(We.$$.fragment),Qa=l(),V=n("p"),en=o(`When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor\u2019s `),ir=n("code"),tn=o("__call__()"),rn=o(`and returns its output. If used in the context `),vt=n("a"),on=o("as_target_processor()"),an=o(` this method forwards all its arguments to TrOCRTokenizer\u2019s `),dr=n("code"),nn=o("__call__"),sn=o(". Please refer to the doctsring of the above two methods for more information."),dn=l(),U=n("div"),v(He.$$.fragment),ln=l(),lr=n("p"),cn=o("Instantiate a processor associated with a pretrained model."),hn=l(),v(he.$$.fragment),fn=l(),X=n("div"),v(Be.$$.fragment),mn=l(),Ue=n("p"),pn=o(`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Tt=n("a"),un=o("from_pretrained()"),gn=o(" method."),_n=l(),v(fe.$$.fragment),vn=l(),me=n("div"),v(Xe.$$.fragment),Tn=l(),Ge=n("p"),Cn=o("This method forwards all its arguments to TrOCRTokenizer\u2019s "),Ct=n("a"),bn=o("batch_decode()"),wn=o(`. Please refer to the docstring of this method for more information.`),kn=l(),pe=n("div"),v(Ze.$$.fragment),On=l(),Je=n("p"),En=o("This method forwards all its arguments to TrOCRTokenizer\u2019s "),bt=n("a"),yn=o("decode()"),Rn=o(`. Please refer to the docstring of this method for more information.`),$n=l(),ue=n("div"),v(Ye.$$.fragment),xn=l(),cr=n("p"),Pn=o("Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR."),Dr=l(),re=n("h2"),ge=n("a"),hr=n("span"),v(Ke.$$.fragment),Mn=l(),fr=n("span"),Fn=o("TrOCRForCausalLM"),Ar=l(),I=n("div"),v(Qe.$$.fragment),zn=l(),N=n("p"),Ln=o("The TrOCR Decoder with a language modeling head. Can be used as the decoder part of "),wt=n("a"),Dn=o("EncoderDecoderModel"),An=o(" and "),mr=n("code"),In=o("VisionEncoderDecoder"),jn=o(`. This model inherits from `),kt=n("a"),qn=o("PreTrainedModel"),Vn=o(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nn=l(),et=n("p"),Sn=o("This model is also a PyTorch "),tt=n("a"),Wn=o("torch.nn.Module"),Hn=o(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bn=l(),G=n("div"),v(rt.$$.fragment),Un=l(),pr=n("p"),Xn=o("Example:"),Gn=l(),v(ot.$$.fragment),this.h()},l(t){const h=Ci('[data-svelte="svelte-1phssyn"]',document.head);m=s(h,"META",{name:!0,content:!0}),h.forEach(r),z=c(t),p=s(t,"H1",{class:!0});var at=i(p);E=s(at,"A",{id:!0,class:!0,href:!0});var ur=i(E);D=s(ur,"SPAN",{});var Kn=i(D);T(y.$$.fragment,Kn),Kn.forEach(r),ur.forEach(r),j=c(at),A=s(at,"SPAN",{});var Qn=i(A);x=a(Qn,"TrOCR"),Qn.forEach(r),at.forEach(r),_=c(t),P=s(t,"H2",{class:!0});var jr=i(P);L=s(jr,"A",{id:!0,class:!0,href:!0});var es=i(L);Lt=s(es,"SPAN",{});var ts=i(Lt);T(we.$$.fragment,ts),ts.forEach(r),es.forEach(r),ro=c(jr),Dt=s(jr,"SPAN",{});var rs=i(Dt);oo=a(rs,"Overview"),rs.forEach(r),jr.forEach(r),gr=c(t),S=s(t,"P",{});var Ot=i(S);ao=a(Ot,"The TrOCR model was proposed in "),ke=s(Ot,"A",{href:!0,rel:!0});var os=i(ke);no=a(os,`TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models`),os.forEach(r),so=a(Ot,` by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. TrOCR consists of an image Transformer encoder and an autoregressive text Transformer decoder to perform `),Oe=s(Ot,"A",{href:!0,rel:!0});var as=i(Oe);io=a(as,"optical character recognition (OCR)"),as.forEach(r),lo=a(Ot,"."),Ot.forEach(r),_r=c(t),nt=s(t,"P",{});var ns=i(nt);co=a(ns,"The abstract from the paper is the following:"),ns.forEach(r),vr=c(t),st=s(t,"P",{});var ss=i(st);At=s(ss,"EM",{});var is=i(At);ho=a(is,`Text recognition is a long-standing research problem for document digitalization. Existing approaches for text recognition are usually built based on CNN for image understanding and RNN for char-level text generation. In addition, another language model is usually needed to improve the overall accuracy as a post-processing step. In this paper, we propose an end-to-end text recognition approach with pre-trained image Transformer and text Transformer models, namely TrOCR, which leverages the Transformer architecture for both image understanding and wordpiece-level text generation. The TrOCR model is simple but effective, and can be pre-trained with large-scale synthetic data and fine-tuned with human-labeled datasets. Experiments show that the TrOCR model outperforms the current state-of-the-art models on both printed and handwritten text recognition tasks.`),is.forEach(r),ss.forEach(r),Tr=c(t),oe=s(t,"IMG",{src:!0,alt:!0,width:!0}),Cr=c(t),ae=s(t,"SMALL",{});var qr=i(ae);fo=a(qr,"TrOCR architecture. Taken from the "),it=s(qr,"A",{href:!0});var ds=i(it);mo=a(ds,"original paper"),ds.forEach(r),po=a(qr,"."),qr.forEach(r),br=c(t),ne=s(t,"P",{});var Vr=i(ne);uo=a(Vr,"Please refer to the "),It=s(Vr,"CODE",{});var ls=i(It);go=a(ls,"VisionEncoderDecoder"),ls.forEach(r),_o=a(Vr," class on how to use this model."),Vr.forEach(r),wr=c(t),W=s(t,"P",{});var Et=i(W);vo=a(Et,"This model was contributed by "),Ee=s(Et,"A",{href:!0,rel:!0});var cs=i(Ee);To=a(cs,"nielsr"),cs.forEach(r),Co=a(Et,`. The original code can be found `),ye=s(Et,"A",{href:!0,rel:!0});var hs=i(ye);bo=a(hs,"here"),hs.forEach(r),wo=a(Et,"."),Et.forEach(r),kr=c(t),dt=s(t,"P",{});var fs=i(dt);ko=a(fs,"Tips:"),fs.forEach(r),Or=c(t),H=s(t,"UL",{});var yt=i(H);Re=s(yt,"LI",{});var Nr=i(Re);Oo=a(Nr,"The quickest way to get started with TrOCR is by checking the "),$e=s(Nr,"A",{href:!0,rel:!0});var ms=i($e);Eo=a(ms,`tutorial notebooks`),ms.forEach(r),yo=a(Nr,`, which show how to use the model at inference time as well as fine-tuning on custom data.`),Nr.forEach(r),Ro=c(yt),q=s(yt,"LI",{});var _e=i(q);$o=a(_e,`TrOCR is pre-trained in 2 stages before being fine-tuned on downstream datasets. It achieves state-of-the-art results on both printed (e.g. the `),xe=s(_e,"A",{href:!0,rel:!0});var ps=i(xe);xo=a(ps,"SROIE dataset"),ps.forEach(r),Po=a(_e," and handwritten (e.g. the "),Pe=s(_e,"A",{href:!0,rel:!0});var us=i(Pe);Mo=a(us,`IAM Handwriting dataset`),us.forEach(r),Fo=a(_e,` text recognition tasks. For more information, see the `),Me=s(_e,"A",{href:!0,rel:!0});var gs=i(Me);zo=a(gs,"official models"),gs.forEach(r),Lo=a(_e,"."),_e.forEach(r),Do=c(yt),Fe=s(yt,"LI",{});var Sr=i(Fe);Ao=a(Sr,"TrOCR is always used within the "),lt=s(Sr,"A",{href:!0});var _s=i(lt);Io=a(_s,"VisionEncoderDecoder"),_s.forEach(r),jo=a(Sr," framework."),Sr.forEach(r),yt.forEach(r),Er=c(t),Y=s(t,"H2",{class:!0});var Wr=i(Y);se=s(Wr,"A",{id:!0,class:!0,href:!0});var vs=i(se);jt=s(vs,"SPAN",{});var Ts=i(jt);T(ze.$$.fragment,Ts),Ts.forEach(r),vs.forEach(r),qo=c(Wr),qt=s(Wr,"SPAN",{});var Cs=i(qt);Vo=a(Cs,"Inference"),Cs.forEach(r),Wr.forEach(r),yr=c(t),B=s(t,"P",{});var Rt=i(B);No=a(Rt,"TrOCR\u2019s "),Vt=s(Rt,"CODE",{});var bs=i(Vt);So=a(bs,"VisionEncoderDecoder"),bs.forEach(r),Wo=a(Rt,` model accepts images as input and makes use of `),ct=s(Rt,"A",{href:!0});var ws=i(ct);Ho=a(ws,"generate()"),ws.forEach(r),Bo=a(Rt," to autoregressively generate text given the input image."),Rt.forEach(r),Rr=c(t),u=s(t,"P",{});var O=i(u);Uo=a(O,"The ["),Nt=s(O,"CODE",{});var ks=i(Nt);Xo=a(ks,"ViTFeatureExtractor"),ks.forEach(r),Go=a(O,"/"),St=s(O,"CODE",{});var Os=i(St);Zo=a(Os,"DeiTFeatureExtractor"),Os.forEach(r),Jo=a(O,`] class is responsible for preprocessing the input image and [`),Wt=s(O,"CODE",{});var Es=i(Wt);Yo=a(Es,"RobertaTokenizer"),Es.forEach(r),Ko=a(O,"/"),Ht=s(O,"CODE",{});var ys=i(Ht);Qo=a(ys,"XLMRobertaTokenizer"),ys.forEach(r),ea=a(O,`] decodes the generated target tokens to the target string. The `),ht=s(O,"A",{href:!0});var Rs=i(ht);ta=a(Rs,"TrOCRProcessor"),Rs.forEach(r),ra=a(O," wraps ["),Bt=s(O,"CODE",{});var $s=i(Bt);oa=a($s,"ViTFeatureExtractor"),$s.forEach(r),aa=a(O,"/"),Ut=s(O,"CODE",{});var xs=i(Ut);na=a(xs,"DeiTFeatureExtractor"),xs.forEach(r),sa=a(O,"] and ["),Xt=s(O,"CODE",{});var Ps=i(Xt);ia=a(Ps,"RobertaTokenizer"),Ps.forEach(r),da=a(O,"/"),Gt=s(O,"CODE",{});var Ms=i(Gt);la=a(Ms,"XLMRobertaTokenizer"),Ms.forEach(r),ca=a(O,`] into a single instance to both extract the input features and decode the predicted token ids.`),O.forEach(r),$r=c(t),ft=s(t,"UL",{});var Fs=i(ft);Zt=s(Fs,"LI",{});var zs=i(Zt);ha=a(zs,"Step-by-step Optical Character Recognition (OCR)"),zs.forEach(r),Fs.forEach(r),xr=c(t),T(Le.$$.fragment,t),Pr=c(t),ie=s(t,"P",{});var Hr=i(ie);fa=a(Hr,"See the "),De=s(Hr,"A",{href:!0,rel:!0});var Ls=i(De);ma=a(Ls,"model hub"),Ls.forEach(r),pa=a(Hr," to look for TrOCR checkpoints."),Hr.forEach(r),Mr=c(t),K=s(t,"H2",{class:!0});var Br=i(K);de=s(Br,"A",{id:!0,class:!0,href:!0});var Ds=i(de);Jt=s(Ds,"SPAN",{});var As=i(Jt);T(Ae.$$.fragment,As),As.forEach(r),Ds.forEach(r),ua=c(Br),Yt=s(Br,"SPAN",{});var Is=i(Yt);ga=a(Is,"TrOCRConfig"),Is.forEach(r),Br.forEach(r),Fr=c(t),M=s(t,"DIV",{class:!0});var Z=i(M);T(Ie.$$.fragment,Z),_a=c(Z),Q=s(Z,"P",{});var $t=i(Q);va=a($t,"This is the configuration class to store the configuration of a "),mt=s($t,"A",{href:!0});var js=i(mt);Ta=a(js,"TrOCRForCausalLM"),js.forEach(r),Ca=a($t,`. It is used to instantiate an TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrOCR `),je=s($t,"A",{href:!0,rel:!0});var qs=i(je);ba=a(qs,"microsoft/trocr-base"),qs.forEach(r),wa=a($t," architecture."),$t.forEach(r),ka=c(Z),ee=s(Z,"P",{});var xt=i(ee);Oa=a(xt,"Configuration objects inherit from "),pt=s(xt,"A",{href:!0});var Vs=i(pt);Ea=a(Vs,"PretrainedConfig"),Vs.forEach(r),ya=a(xt,` and can be used to control the model outputs. Read the documentation from `),ut=s(xt,"A",{href:!0});var Ns=i(ut);Ra=a(Ns,"PretrainedConfig"),Ns.forEach(r),$a=a(xt," for more information."),xt.forEach(r),xa=c(Z),Kt=s(Z,"P",{});var Ss=i(Kt);Pa=a(Ss,"Example:"),Ss.forEach(r),Ma=c(Z),T(qe.$$.fragment,Z),Z.forEach(r),zr=c(t),te=s(t,"H2",{class:!0});var Ur=i(te);le=s(Ur,"A",{id:!0,class:!0,href:!0});var Ws=i(le);Qt=s(Ws,"SPAN",{});var Hs=i(Qt);T(Ve.$$.fragment,Hs),Hs.forEach(r),Ws.forEach(r),Fa=c(Ur),er=s(Ur,"SPAN",{});var Bs=i(er);za=a(Bs,"TrOCRProcessor"),Bs.forEach(r),Ur.forEach(r),Lr=c(t),g=s(t,"DIV",{class:!0});var $=i(g);T(Ne.$$.fragment,$),La=c($),tr=s($,"P",{});var Us=i(tr);Da=a(Us,"Constructs a TrOCR processor which wraps a vision feature extractor and a TrOCR tokenizer into a single processor."),Us.forEach(r),Aa=c($),R=s($,"P",{});var F=i(R);gt=s(F,"A",{href:!0});var Xs=i(gt);Ia=a(Xs,"TrOCRProcessor"),Xs.forEach(r),ja=a(F," offers all the functionalities of ["),rr=s(F,"CODE",{});var Gs=i(rr);qa=a(Gs,"ViTFeatureExtractor"),Gs.forEach(r),Va=a(F,"/"),or=s(F,"CODE",{});var Zs=i(or);Na=a(Zs,"DeiTFeatureExtractor"),Zs.forEach(r),Sa=a(F,`] and [`),ar=s(F,"CODE",{});var Js=i(ar);Wa=a(Js,"RobertaTokenizer"),Js.forEach(r),Ha=a(F,"/"),nr=s(F,"CODE",{});var Ys=i(nr);Ba=a(Ys,"XLMRobertaTokenizer"),Ys.forEach(r),Ua=a(F,"]. See the "),Se=s(F,"A",{href:!0});var Zn=i(Se);sr=s(Zn,"STRONG",{});var Ks=i(sr);Xa=a(Ks,"call"),Ks.forEach(r),Ga=a(Zn,"()"),Zn.forEach(r),Za=a(F," and "),_t=s(F,"A",{href:!0});var Qs=i(_t);Ja=a(Qs,"decode()"),Qs.forEach(r),Ya=a(F,` for more information.`),F.forEach(r),Ka=c($),ce=s($,"DIV",{class:!0});var Xr=i(ce);T(We.$$.fragment,Xr),Qa=c(Xr),V=s(Xr,"P",{});var ve=i(V);en=a(ve,`When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor\u2019s `),ir=s(ve,"CODE",{});var ei=i(ir);tn=a(ei,"__call__()"),ei.forEach(r),rn=a(ve,`and returns its output. If used in the context `),vt=s(ve,"A",{href:!0});var ti=i(vt);on=a(ti,"as_target_processor()"),ti.forEach(r),an=a(ve,` this method forwards all its arguments to TrOCRTokenizer\u2019s `),dr=s(ve,"CODE",{});var ri=i(dr);nn=a(ri,"__call__"),ri.forEach(r),sn=a(ve,". Please refer to the doctsring of the above two methods for more information."),ve.forEach(r),Xr.forEach(r),dn=c($),U=s($,"DIV",{class:!0});var Pt=i(U);T(He.$$.fragment,Pt),ln=c(Pt),lr=s(Pt,"P",{});var oi=i(lr);cn=a(oi,"Instantiate a processor associated with a pretrained model."),oi.forEach(r),hn=c(Pt),T(he.$$.fragment,Pt),Pt.forEach(r),fn=c($),X=s($,"DIV",{class:!0});var Mt=i(X);T(Be.$$.fragment,Mt),mn=c(Mt),Ue=s(Mt,"P",{});var Gr=i(Ue);pn=a(Gr,`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Tt=s(Gr,"A",{href:!0});var ai=i(Tt);un=a(ai,"from_pretrained()"),ai.forEach(r),gn=a(Gr," method."),Gr.forEach(r),_n=c(Mt),T(fe.$$.fragment,Mt),Mt.forEach(r),vn=c($),me=s($,"DIV",{class:!0});var Zr=i(me);T(Xe.$$.fragment,Zr),Tn=c(Zr),Ge=s(Zr,"P",{});var Jr=i(Ge);Cn=a(Jr,"This method forwards all its arguments to TrOCRTokenizer\u2019s "),Ct=s(Jr,"A",{href:!0});var ni=i(Ct);bn=a(ni,"batch_decode()"),ni.forEach(r),wn=a(Jr,`. Please refer to the docstring of this method for more information.`),Jr.forEach(r),Zr.forEach(r),kn=c($),pe=s($,"DIV",{class:!0});var Yr=i(pe);T(Ze.$$.fragment,Yr),On=c(Yr),Je=s(Yr,"P",{});var Kr=i(Je);En=a(Kr,"This method forwards all its arguments to TrOCRTokenizer\u2019s "),bt=s(Kr,"A",{href:!0});var si=i(bt);yn=a(si,"decode()"),si.forEach(r),Rn=a(Kr,`. Please refer to the docstring of this method for more information.`),Kr.forEach(r),Yr.forEach(r),$n=c($),ue=s($,"DIV",{class:!0});var Qr=i(ue);T(Ye.$$.fragment,Qr),xn=c(Qr),cr=s(Qr,"P",{});var ii=i(cr);Pn=a(ii,"Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR."),ii.forEach(r),Qr.forEach(r),$.forEach(r),Dr=c(t),re=s(t,"H2",{class:!0});var eo=i(re);ge=s(eo,"A",{id:!0,class:!0,href:!0});var di=i(ge);hr=s(di,"SPAN",{});var li=i(hr);T(Ke.$$.fragment,li),li.forEach(r),di.forEach(r),Mn=c(eo),fr=s(eo,"SPAN",{});var ci=i(fr);Fn=a(ci,"TrOCRForCausalLM"),ci.forEach(r),eo.forEach(r),Ar=c(t),I=s(t,"DIV",{class:!0});var Te=i(I);T(Qe.$$.fragment,Te),zn=c(Te),N=s(Te,"P",{});var Ce=i(N);Ln=a(Ce,"The TrOCR Decoder with a language modeling head. Can be used as the decoder part of "),wt=s(Ce,"A",{href:!0});var hi=i(wt);Dn=a(hi,"EncoderDecoderModel"),hi.forEach(r),An=a(Ce," and "),mr=s(Ce,"CODE",{});var fi=i(mr);In=a(fi,"VisionEncoderDecoder"),fi.forEach(r),jn=a(Ce,`. This model inherits from `),kt=s(Ce,"A",{href:!0});var mi=i(kt);qn=a(mi,"PreTrainedModel"),mi.forEach(r),Vn=a(Ce,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ce.forEach(r),Nn=c(Te),et=s(Te,"P",{});var to=i(et);Sn=a(to,"This model is also a PyTorch "),tt=s(to,"A",{href:!0,rel:!0});var pi=i(tt);Wn=a(pi,"torch.nn.Module"),pi.forEach(r),Hn=a(to,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),to.forEach(r),Bn=c(Te),G=s(Te,"DIV",{class:!0});var Ft=i(G);T(rt.$$.fragment,Ft),Un=c(Ft),pr=s(Ft,"P",{});var ui=i(pr);Xn=a(ui,"Example:"),ui.forEach(r),Gn=c(Ft),T(ot.$$.fragment,Ft),Ft.forEach(r),Te.forEach(r),this.h()},h(){d(m,"name","hf:doc:metadata"),d(m,"content",JSON.stringify(Ei)),d(E,"id","trocr"),d(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(E,"href","#trocr"),d(p,"class","relative group"),d(L,"id","overview"),d(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(L,"href","#overview"),d(P,"class","relative group"),d(ke,"href","https://arxiv.org/abs/2109.10282"),d(ke,"rel","nofollow"),d(Oe,"href","https://en.wikipedia.org/wiki/Optical_character_recognition"),d(Oe,"rel","nofollow"),bi(oe.src,Yn="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/trocr_architecture.jpg")||d(oe,"src",Yn),d(oe,"alt","drawing"),d(oe,"width","600"),d(it,"href","https://arxiv.org/abs/2109.10282"),d(Ee,"href","https://huggingface.co/nielsr"),d(Ee,"rel","nofollow"),d(ye,"href","https://github.com/microsoft/unilm/tree/6f60612e7cc86a2a1ae85c47231507a587ab4e01/trocr"),d(ye,"rel","nofollow"),d($e,"href","https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR"),d($e,"rel","nofollow"),d(xe,"href","https://paperswithcode.com/dataset/sroie"),d(xe,"rel","nofollow"),d(Pe,"href","https://fki.tic.heia-fr.ch/databases/iam-handwriting-database%3E"),d(Pe,"rel","nofollow"),d(Me,"href","https://huggingface.co/models?other=trocr%3E"),d(Me,"rel","nofollow"),d(lt,"href","vision-encoder-decoder"),d(se,"id","inference"),d(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(se,"href","#inference"),d(Y,"class","relative group"),d(ct,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),d(ht,"href","/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor"),d(De,"href","https://huggingface.co/models?filter=trocr"),d(De,"rel","nofollow"),d(de,"id","transformers.TrOCRConfig"),d(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(de,"href","#transformers.TrOCRConfig"),d(K,"class","relative group"),d(mt,"href","/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRForCausalLM"),d(je,"href","https://huggingface.co/microsoft/trocr-base"),d(je,"rel","nofollow"),d(pt,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(ut,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(M,"class","docstring"),d(le,"id","transformers.TrOCRProcessor"),d(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(le,"href","#transformers.TrOCRProcessor"),d(te,"class","relative group"),d(gt,"href","/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor"),d(Se,"href","/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor.__call__"),d(_t,"href","/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor.decode"),d(vt,"href","/docs/transformers/pr_16143/en/model_doc/trocr#transformers.TrOCRProcessor.as_target_processor"),d(ce,"class","docstring"),d(U,"class","docstring"),d(Tt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.ProcessorMixin.from_pretrained"),d(X,"class","docstring"),d(Ct,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),d(me,"class","docstring"),d(bt,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),d(pe,"class","docstring"),d(ue,"class","docstring"),d(g,"class","docstring"),d(ge,"id","transformers.TrOCRForCausalLM"),d(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ge,"href","#transformers.TrOCRForCausalLM"),d(re,"class","relative group"),d(wt,"href","/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel"),d(kt,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(tt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(tt,"rel","nofollow"),d(G,"class","docstring"),d(I,"class","docstring")},m(t,h){e(document.head,m),f(t,z,h),f(t,p,h),e(p,E),e(E,D),C(y,D,null),e(p,j),e(p,A),e(A,x),f(t,_,h),f(t,P,h),e(P,L),e(L,Lt),C(we,Lt,null),e(P,ro),e(P,Dt),e(Dt,oo),f(t,gr,h),f(t,S,h),e(S,ao),e(S,ke),e(ke,no),e(S,so),e(S,Oe),e(Oe,io),e(S,lo),f(t,_r,h),f(t,nt,h),e(nt,co),f(t,vr,h),f(t,st,h),e(st,At),e(At,ho),f(t,Tr,h),f(t,oe,h),f(t,Cr,h),f(t,ae,h),e(ae,fo),e(ae,it),e(it,mo),e(ae,po),f(t,br,h),f(t,ne,h),e(ne,uo),e(ne,It),e(It,go),e(ne,_o),f(t,wr,h),f(t,W,h),e(W,vo),e(W,Ee),e(Ee,To),e(W,Co),e(W,ye),e(ye,bo),e(W,wo),f(t,kr,h),f(t,dt,h),e(dt,ko),f(t,Or,h),f(t,H,h),e(H,Re),e(Re,Oo),e(Re,$e),e($e,Eo),e(Re,yo),e(H,Ro),e(H,q),e(q,$o),e(q,xe),e(xe,xo),e(q,Po),e(q,Pe),e(Pe,Mo),e(q,Fo),e(q,Me),e(Me,zo),e(q,Lo),e(H,Do),e(H,Fe),e(Fe,Ao),e(Fe,lt),e(lt,Io),e(Fe,jo),f(t,Er,h),f(t,Y,h),e(Y,se),e(se,jt),C(ze,jt,null),e(Y,qo),e(Y,qt),e(qt,Vo),f(t,yr,h),f(t,B,h),e(B,No),e(B,Vt),e(Vt,So),e(B,Wo),e(B,ct),e(ct,Ho),e(B,Bo),f(t,Rr,h),f(t,u,h),e(u,Uo),e(u,Nt),e(Nt,Xo),e(u,Go),e(u,St),e(St,Zo),e(u,Jo),e(u,Wt),e(Wt,Yo),e(u,Ko),e(u,Ht),e(Ht,Qo),e(u,ea),e(u,ht),e(ht,ta),e(u,ra),e(u,Bt),e(Bt,oa),e(u,aa),e(u,Ut),e(Ut,na),e(u,sa),e(u,Xt),e(Xt,ia),e(u,da),e(u,Gt),e(Gt,la),e(u,ca),f(t,$r,h),f(t,ft,h),e(ft,Zt),e(Zt,ha),f(t,xr,h),C(Le,t,h),f(t,Pr,h),f(t,ie,h),e(ie,fa),e(ie,De),e(De,ma),e(ie,pa),f(t,Mr,h),f(t,K,h),e(K,de),e(de,Jt),C(Ae,Jt,null),e(K,ua),e(K,Yt),e(Yt,ga),f(t,Fr,h),f(t,M,h),C(Ie,M,null),e(M,_a),e(M,Q),e(Q,va),e(Q,mt),e(mt,Ta),e(Q,Ca),e(Q,je),e(je,ba),e(Q,wa),e(M,ka),e(M,ee),e(ee,Oa),e(ee,pt),e(pt,Ea),e(ee,ya),e(ee,ut),e(ut,Ra),e(ee,$a),e(M,xa),e(M,Kt),e(Kt,Pa),e(M,Ma),C(qe,M,null),f(t,zr,h),f(t,te,h),e(te,le),e(le,Qt),C(Ve,Qt,null),e(te,Fa),e(te,er),e(er,za),f(t,Lr,h),f(t,g,h),C(Ne,g,null),e(g,La),e(g,tr),e(tr,Da),e(g,Aa),e(g,R),e(R,gt),e(gt,Ia),e(R,ja),e(R,rr),e(rr,qa),e(R,Va),e(R,or),e(or,Na),e(R,Sa),e(R,ar),e(ar,Wa),e(R,Ha),e(R,nr),e(nr,Ba),e(R,Ua),e(R,Se),e(Se,sr),e(sr,Xa),e(Se,Ga),e(R,Za),e(R,_t),e(_t,Ja),e(R,Ya),e(g,Ka),e(g,ce),C(We,ce,null),e(ce,Qa),e(ce,V),e(V,en),e(V,ir),e(ir,tn),e(V,rn),e(V,vt),e(vt,on),e(V,an),e(V,dr),e(dr,nn),e(V,sn),e(g,dn),e(g,U),C(He,U,null),e(U,ln),e(U,lr),e(lr,cn),e(U,hn),C(he,U,null),e(g,fn),e(g,X),C(Be,X,null),e(X,mn),e(X,Ue),e(Ue,pn),e(Ue,Tt),e(Tt,un),e(Ue,gn),e(X,_n),C(fe,X,null),e(g,vn),e(g,me),C(Xe,me,null),e(me,Tn),e(me,Ge),e(Ge,Cn),e(Ge,Ct),e(Ct,bn),e(Ge,wn),e(g,kn),e(g,pe),C(Ze,pe,null),e(pe,On),e(pe,Je),e(Je,En),e(Je,bt),e(bt,yn),e(Je,Rn),e(g,$n),e(g,ue),C(Ye,ue,null),e(ue,xn),e(ue,cr),e(cr,Pn),f(t,Dr,h),f(t,re,h),e(re,ge),e(ge,hr),C(Ke,hr,null),e(re,Mn),e(re,fr),e(fr,Fn),f(t,Ar,h),f(t,I,h),C(Qe,I,null),e(I,zn),e(I,N),e(N,Ln),e(N,wt),e(wt,Dn),e(N,An),e(N,mr),e(mr,In),e(N,jn),e(N,kt),e(kt,qn),e(N,Vn),e(I,Nn),e(I,et),e(et,Sn),e(et,tt),e(tt,Wn),e(et,Hn),e(I,Bn),e(I,G),C(rt,G,null),e(G,Un),e(G,pr),e(pr,Xn),e(G,Gn),C(ot,G,null),Ir=!0},p(t,[h]){const at={};h&2&&(at.$$scope={dirty:h,ctx:t}),he.$set(at);const ur={};h&2&&(ur.$$scope={dirty:h,ctx:t}),fe.$set(ur)},i(t){Ir||(b(y.$$.fragment,t),b(we.$$.fragment,t),b(ze.$$.fragment,t),b(Le.$$.fragment,t),b(Ae.$$.fragment,t),b(Ie.$$.fragment,t),b(qe.$$.fragment,t),b(Ve.$$.fragment,t),b(Ne.$$.fragment,t),b(We.$$.fragment,t),b(He.$$.fragment,t),b(he.$$.fragment,t),b(Be.$$.fragment,t),b(fe.$$.fragment,t),b(Xe.$$.fragment,t),b(Ze.$$.fragment,t),b(Ye.$$.fragment,t),b(Ke.$$.fragment,t),b(Qe.$$.fragment,t),b(rt.$$.fragment,t),b(ot.$$.fragment,t),Ir=!0)},o(t){w(y.$$.fragment,t),w(we.$$.fragment,t),w(ze.$$.fragment,t),w(Le.$$.fragment,t),w(Ae.$$.fragment,t),w(Ie.$$.fragment,t),w(qe.$$.fragment,t),w(Ve.$$.fragment,t),w(Ne.$$.fragment,t),w(We.$$.fragment,t),w(He.$$.fragment,t),w(he.$$.fragment,t),w(Be.$$.fragment,t),w(fe.$$.fragment,t),w(Xe.$$.fragment,t),w(Ze.$$.fragment,t),w(Ye.$$.fragment,t),w(Ke.$$.fragment,t),w(Qe.$$.fragment,t),w(rt.$$.fragment,t),w(ot.$$.fragment,t),Ir=!1},d(t){r(m),t&&r(z),t&&r(p),k(y),t&&r(_),t&&r(P),k(we),t&&r(gr),t&&r(S),t&&r(_r),t&&r(nt),t&&r(vr),t&&r(st),t&&r(Tr),t&&r(oe),t&&r(Cr),t&&r(ae),t&&r(br),t&&r(ne),t&&r(wr),t&&r(W),t&&r(kr),t&&r(dt),t&&r(Or),t&&r(H),t&&r(Er),t&&r(Y),k(ze),t&&r(yr),t&&r(B),t&&r(Rr),t&&r(u),t&&r($r),t&&r(ft),t&&r(xr),k(Le,t),t&&r(Pr),t&&r(ie),t&&r(Mr),t&&r(K),k(Ae),t&&r(Fr),t&&r(M),k(Ie),k(qe),t&&r(zr),t&&r(te),k(Ve),t&&r(Lr),t&&r(g),k(Ne),k(We),k(He),k(he),k(Be),k(fe),k(Xe),k(Ze),k(Ye),t&&r(Dr),t&&r(re),k(Ke),t&&r(Ar),t&&r(I),k(Qe),k(rt),k(ot)}}}const Ei={local:"trocr",sections:[{local:"overview",title:"Overview"},{local:"inference",title:"Inference"},{local:"transformers.TrOCRConfig",title:"TrOCRConfig"},{local:"transformers.TrOCRProcessor",title:"TrOCRProcessor"},{local:"transformers.TrOCRForCausalLM",title:"TrOCRForCausalLM"}],title:"TrOCR"};function yi(be,m,z){let{fw:p}=m;return be.$$set=E=>{"fw"in E&&z(0,p=E.fw)},[p]}class zi extends _i{constructor(m){super();vi(this,m,yi,Oi,Ti,{fw:0})}}export{zi as default,Ei as metadata};
296
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/nystromformer.mdx-a520f5f1.js
import{S as vi,i as bi,s as wi,e as s,k as d,w as _,t as a,M as Ti,c as n,d as t,m as c,a as r,x as y,h as i,b as l,F as e,g as h,y as k,q as v,o as b,B as w}from"../../chunks/vendor-4833417e.js";import{T as Ko}from"../../chunks/Tip-fffd6df1.js";import{D as O}from"../../chunks/Docstring-4f315ed9.js";import{C as Ee}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as he}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Ni(q){let m,N,f,T,$;return{c(){m=s("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){m=n(g,"P",{});var u=r(m);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n(u,"CODE",{});var M=r(f);T=i(M,"Module"),M.forEach(t),$=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){h(g,m,u),e(m,N),e(m,f),e(f,T),e(m,$)},d(g){g&&t(m)}}}function $i(q){let m,N,f,T,$;return{c(){m=s("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){m=n(g,"P",{});var u=r(m);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n(u,"CODE",{});var M=r(f);T=i(M,"Module"),M.forEach(t),$=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){h(g,m,u),e(m,N),e(m,f),e(f,T),e(m,$)},d(g){g&&t(m)}}}function Mi(q){let m,N,f,T,$;return{c(){m=s("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){m=n(g,"P",{});var u=r(m);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n(u,"CODE",{});var M=r(f);T=i(M,"Module"),M.forEach(t),$=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){h(g,m,u),e(m,N),e(m,f),e(f,T),e(m,$)},d(g){g&&t(m)}}}function zi(q){let m,N,f,T,$;return{c(){m=s("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){m=n(g,"P",{});var u=r(m);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n(u,"CODE",{});var M=r(f);T=i(M,"Module"),M.forEach(t),$=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){h(g,m,u),e(m,N),e(m,f),e(f,T),e(m,$)},d(g){g&&t(m)}}}function qi(q){let m,N,f,T,$;return{c(){m=s("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){m=n(g,"P",{});var u=r(m);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n(u,"CODE",{});var M=r(f);T=i(M,"Module"),M.forEach(t),$=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){h(g,m,u),e(m,N),e(m,f),e(f,T),e(m,$)},d(g){g&&t(m)}}}function Fi(q){let m,N,f,T,$;return{c(){m=s("p"),N=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s("code"),T=a("Module"),$=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(g){m=n(g,"P",{});var u=r(m);N=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=n(u,"CODE",{});var M=r(f);T=i(M,"Module"),M.forEach(t),$=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(g,u){h(g,m,u),e(m,N),e(m,f),e(f,T),e(m,$)},d(g){g&&t(m)}}}function Ci(q){let m,N,f,T,$,g,u,M,vs,Wt,Y,ue,Xo,Pe,bs,et,ws,Bt,ge,Ts,Se,ot,Ns,$s,Qt,qo,Ms,Ht,Fo,tt,zs,Ut,Q,qs,Le,Fs,Cs,Ie,js,As,Rt,Z,_e,st,De,xs,nt,Es,Vt,F,Oe,Ps,K,Ss,Co,Ls,Is,We,Ds,Os,Ws,X,Bs,jo,Qs,Hs,Ao,Us,Rs,Vs,rt,Gs,Js,Be,Gt,ee,ye,at,Qe,Ys,it,Zs,Jt,W,He,Ks,Ue,Xs,Re,en,on,tn,C,Ve,sn,oe,nn,xo,rn,an,lt,ln,dn,cn,ke,mn,dt,pn,fn,Ge,Yt,te,ve,ct,Je,hn,mt,un,Zt,B,Ye,gn,se,_n,pt,yn,kn,Ze,vn,bn,wn,j,Ke,Tn,ne,Nn,Eo,$n,Mn,ft,zn,qn,Fn,be,Cn,ht,jn,An,Xe,Kt,re,we,ut,eo,xn,gt,En,Xt,S,oo,Pn,_t,Sn,Ln,to,In,so,Dn,On,Wn,z,no,Bn,ae,Qn,Po,Hn,Un,yt,Rn,Vn,Gn,Te,Jn,kt,Yn,Zn,ro,Kn,vt,Xn,er,ao,es,ie,Ne,bt,io,or,wt,tr,os,L,lo,sr,Tt,nr,rr,co,ar,mo,ir,lr,dr,A,po,cr,le,mr,So,pr,fr,Nt,hr,ur,gr,$e,_r,$t,yr,kr,fo,ts,de,Me,Mt,ho,vr,zt,br,ss,I,uo,wr,qt,Tr,Nr,go,$r,_o,Mr,zr,qr,x,yo,Fr,ce,Cr,Lo,jr,Ar,Ft,xr,Er,Pr,ze,Sr,Ct,Lr,Ir,ko,ns,me,qe,jt,vo,Dr,At,Or,rs,D,bo,Wr,pe,Br,xt,Qr,Hr,Et,Ur,Rr,Vr,wo,Gr,To,Jr,Yr,Zr,E,No,Kr,fe,Xr,Io,ea,oa,Pt,ta,sa,na,Fe,ra,St,aa,ia,$o,as;return g=new he({}),Pe=new he({}),De=new he({}),Oe=new O({props:{name:"class transformers.NystromformerConfig",anchor:"transformers.NystromformerConfig",parameters:[{name:"vocab_size",val:" = 30000"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu_new'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 510"},{name:"type_vocab_size",val:" = 2"},{name:"segment_means_seq_len",val:" = 64"},{name:"num_landmarks",val:" = 64"},{name:"conv_kernel_size",val:" = 65"},{name:"inv_coeff_init_option",val:" = False"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/configuration_nystromformer.py#L29",parametersDescription:[{anchor:"transformers.NystromformerConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30000) &#x2014; Vocabulary size of the Nystromformer model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerModel">NystromformerModel</a>.`,name:"vocab_size"},{anchor:"transformers.NystromformerConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimension of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.NystromformerConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.NystromformerConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.NystromformerConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimension of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.NystromformerConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.NystromformerConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.NystromformerConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.NystromformerConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.NystromformerConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerModel">NystromformerModel</a>.`,name:"type_vocab_size"},{anchor:"transformers.NystromformerConfig.segment_means_seq_len",description:`<strong>segment_means_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Sequence length used in segment-means.`,name:"segment_means_seq_len"},{anchor:"transformers.NystromformerConfig.num_landmarks",description:`<strong>num_landmarks</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The number of landmark (or Nystrom) points to use in Nystrom approximation of the softmax self-attention matrix.`,name:"num_landmarks"},{anchor:"transformers.NystromformerConfig.conv_kernel_size",description:`<strong>conv_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 65) &#x2014; The kernel size of depthwise convolution used in Nystrom approximation.`,name:"conv_kernel_size"},{anchor:"transformers.NystromformerConfig.inv_coeff_init_option",description:`<strong>inv_coeff_init_option</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use exact coefficient computation for the initial values for the iterative method of calculating the Moore-Penrose inverse of a matrix.`,name:"inv_coeff_init_option"},{anchor:"transformers.NystromformerConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.NystromformerConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"}]}}),Be=new Ee({props:{code:`from transformers import NystromformerModel, NystromformerConfig # Initializing a Nystromformer uw-madison/nystromformer-512 style configuration configuration = NystromformerConfig() # Initializing a model from the uw-madison/nystromformer-512 style configuration model = NystromformerModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> NystromformerModel, NystromformerConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Nystromformer uw-madison/nystromformer-512 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = NystromformerConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the uw-madison/nystromformer-512 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Qe=new he({}),He=new O({props:{name:"class transformers.NystromformerModel",anchor:"transformers.NystromformerModel",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L556",parametersDescription:[{anchor:"transformers.NystromformerModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig">NystromformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ve=new O({props:{name:"forward",anchor:"transformers.NystromformerModel.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L581",parametersDescription:[{anchor:"transformers.NystromformerModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.NystromformerModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.NystromformerModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.NystromformerModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.NystromformerModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.NystromformerModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.NystromformerModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.NystromformerModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.NystromformerModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig" >NystromformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ke=new Ko({props:{$$slots:{default:[Ni]},$$scope:{ctx:q}}}),Ge=new Ee({props:{code:`from transformers import AutoTokenizer, NystromformerModel import torch tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerModel.from_pretrained("uw-madison/nystromformer-512") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, NystromformerModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerModel.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),Je=new he({}),Ye=new O({props:{name:"class transformers.NystromformerForMaskedLM",anchor:"transformers.NystromformerForMaskedLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L668",parametersDescription:[{anchor:"transformers.NystromformerForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig">NystromformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ke=new O({props:{name:"forward",anchor:"transformers.NystromformerForMaskedLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L684",parametersDescription:[{anchor:"transformers.NystromformerForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.NystromformerForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.NystromformerForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.NystromformerForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.NystromformerForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.NystromformerForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.NystromformerForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.NystromformerForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.NystromformerForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.NystromformerForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig" >NystromformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),be=new Ko({props:{$$slots:{default:[$i]},$$scope:{ctx:q}}}),Xe=new Ee({props:{code:`from transformers import AutoTokenizer, NystromformerForMaskedLM import torch tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512") inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, NystromformerForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerForMaskedLM.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is [MASK].&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),eo=new he({}),oo=new O({props:{name:"class transformers.NystromformerForSequenceClassification",anchor:"transformers.NystromformerForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L772",parametersDescription:[{anchor:"transformers.NystromformerForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig">NystromformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),no=new O({props:{name:"forward",anchor:"transformers.NystromformerForSequenceClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L782",parametersDescription:[{anchor:"transformers.NystromformerForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.NystromformerForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.NystromformerForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.NystromformerForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.NystromformerForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.NystromformerForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.NystromformerForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.NystromformerForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.NystromformerForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.NystromformerForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig" >NystromformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Te=new Ko({props:{$$slots:{default:[Mi]},$$scope:{ctx:q}}}),ro=new Ee({props:{code:`import torch from transformers import AutoTokenizer, NystromformerForSequenceClassification torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerForSequenceClassification.from_pretrained("uw-madison/nystromformer-512", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, NystromformerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),ao=new Ee({props:{code:`import torch from transformers import AutoTokenizer, NystromformerForSequenceClassification torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerForSequenceClassification.from_pretrained("uw-madison/nystromformer-512", problem_type="multi_label_classification", num_labels=2) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss outputs = model(**inputs, labels=labels) loss = outputs.loss list(logits.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, NystromformerForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>, num_labels=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]], dtype=torch.<span class="hljs-built_in">float</span>) <span class="hljs-comment"># need dtype=float for BCEWithLogitsLoss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) `}}),io=new he({}),lo=new O({props:{name:"class transformers.NystromformerForMultipleChoice",anchor:"transformers.NystromformerForMultipleChoice",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L866",parametersDescription:[{anchor:"transformers.NystromformerForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig">NystromformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),po=new O({props:{name:"forward",anchor:"transformers.NystromformerForMultipleChoice.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L877",parametersDescription:[{anchor:"transformers.NystromformerForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.NystromformerForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.NystromformerForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.NystromformerForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.NystromformerForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.NystromformerForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.NystromformerForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.NystromformerForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.NystromformerForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.NystromformerForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig" >NystromformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$e=new Ko({props:{$$slots:{default:[zi]},$$scope:{ctx:q}}}),fo=new Ee({props:{code:`from transformers import AutoTokenizer, NystromformerForMultipleChoice import torch tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerForMultipleChoice.from_pretrained("uw-madison/nystromformer-512") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, NystromformerForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),ho=new he({}),uo=new O({props:{name:"class transformers.NystromformerForTokenClassification",anchor:"transformers.NystromformerForTokenClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L962",parametersDescription:[{anchor:"transformers.NystromformerForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig">NystromformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),yo=new O({props:{name:"forward",anchor:"transformers.NystromformerForTokenClassification.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L974",parametersDescription:[{anchor:"transformers.NystromformerForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.NystromformerForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.NystromformerForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.NystromformerForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.NystromformerForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.NystromformerForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.NystromformerForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.NystromformerForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.NystromformerForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.NystromformerForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig" >NystromformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ze=new Ko({props:{$$slots:{default:[qi]},$$scope:{ctx:q}}}),ko=new Ee({props:{code:`from transformers import AutoTokenizer, NystromformerForTokenClassification import torch tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerForTokenClassification.from_pretrained("uw-madison/nystromformer-512") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, NystromformerForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerForTokenClassification.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>] * inputs[<span class="hljs-string">&quot;input_ids&quot;</span>].size(<span class="hljs-number">1</span>)).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),vo=new he({}),bo=new O({props:{name:"class transformers.NystromformerForQuestionAnswering",anchor:"transformers.NystromformerForQuestionAnswering",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L1041",parametersDescription:[{anchor:"transformers.NystromformerForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig">NystromformerConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),No=new O({props:{name:"forward",anchor:"transformers.NystromformerForQuestionAnswering.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"token_type_ids",val:" = None"},{name:"position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"start_positions",val:" = None"},{name:"end_positions",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/nystromformer/modeling_nystromformer.py#L1054",parametersDescription:[{anchor:"transformers.NystromformerForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <em>input_ids</em> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.NystromformerForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerConfig" >NystromformerConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Fe=new Ko({props:{$$slots:{default:[Fi]},$$scope:{ctx:q}}}),$o=new Ee({props:{code:`from transformers import AutoTokenizer, NystromformerForQuestionAnswering import torch torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerForQuestionAnswering.from_pretrained("uw-madison/nystromformer-512") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) loss = outputs.loss round(loss.item(), 2) start_scores = outputs.start_logits list(start_scores.shape) end_scores = outputs.end_logits list(end_scores.shape) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, NystromformerForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = NystromformerForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;uw-madison/nystromformer-512&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_positions = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>end_positions = torch.tensor([<span class="hljs-number">3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>start_scores = outputs.start_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(start_scores.shape) <span class="hljs-meta">&gt;&gt;&gt; </span>end_scores = outputs.end_logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(end_scores.shape) `}}),{c(){m=s("meta"),N=d(),f=s("h1"),T=s("a"),$=s("span"),_(g.$$.fragment),u=d(),M=s("span"),vs=a("Nystr\xF6mformer"),Wt=d(),Y=s("h2"),ue=s("a"),Xo=s("span"),_(Pe.$$.fragment),bs=d(),et=s("span"),ws=a("Overview"),Bt=d(),ge=s("p"),Ts=a("The Nystr\xF6mformer model was proposed in "),Se=s("a"),ot=s("em"),Ns=a("Nystr\xF6mformer: A Nystr\xF6m-Based Algorithm for Approximating Self-Attention"),$s=a(` by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, and Vikas Singh.`),Qt=d(),qo=s("p"),Ms=a("The abstract from the paper is the following:"),Ht=d(),Fo=s("p"),tt=s("em"),zs=a(`Transformers have emerged as a powerful tool for a broad range of natural language processing tasks. A key component that drives the impressive performance of Transformers is the self-attention mechanism that encodes the influence or dependence of other tokens on each specific token. While beneficial, the quadratic complexity of self-attention on the input sequence length has limited its application to longer sequences \u2014 a topic being actively studied in the community. To address this limitation, we propose Nystr\xF6mformer \u2014 a model that exhibits favorable scalability as a function of sequence length. Our idea is based on adapting the Nystr\xF6m method to approximate standard self-attention with O(n) complexity. The scalability of Nystr\xF6mformer enables application to longer sequences with thousands of tokens. We perform evaluations on multiple downstream tasks on the GLUE benchmark and IMDB reviews with standard sequence length, and find that our Nystr\xF6mformer performs comparably, or in a few cases, even slightly better, than standard self-attention. On longer sequence tasks in the Long Range Arena (LRA) benchmark, Nystr\xF6mformer performs favorably relative to other efficient self-attention methods. Our code is available at this https URL.`),Ut=d(),Q=s("p"),qs=a("This model was contributed by "),Le=s("a"),Fs=a("novice03"),Cs=a(". The original code can be found "),Ie=s("a"),js=a("here"),As=a("."),Rt=d(),Z=s("h2"),_e=s("a"),st=s("span"),_(De.$$.fragment),xs=d(),nt=s("span"),Es=a("NystromformerConfig"),Vt=d(),F=s("div"),_(Oe.$$.fragment),Ps=d(),K=s("p"),Ss=a("This is the configuration class to store the configuration of a "),Co=s("a"),Ls=a("NystromformerModel"),Is=a(`. It is used to instantiate an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nystromformer `),We=s("a"),Ds=a("uw-madison/nystromformer-512"),Os=a(" architecture."),Ws=d(),X=s("p"),Bs=a("Configuration objects inherit from "),jo=s("a"),Qs=a("PretrainedConfig"),Hs=a(` and can be used to control the model outputs. Read the documentation from `),Ao=s("a"),Us=a("PretrainedConfig"),Rs=a(" for more information."),Vs=d(),rt=s("p"),Gs=a("Example:"),Js=d(),_(Be.$$.fragment),Gt=d(),ee=s("h2"),ye=s("a"),at=s("span"),_(Qe.$$.fragment),Ys=d(),it=s("span"),Zs=a("NystromformerModel"),Jt=d(),W=s("div"),_(He.$$.fragment),Ks=d(),Ue=s("p"),Xs=a(`The bare Nystr\xF6mformer Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Re=s("a"),en=a("torch.nn.Module"),on=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),tn=d(),C=s("div"),_(Ve.$$.fragment),sn=d(),oe=s("p"),nn=a("The "),xo=s("a"),rn=a("NystromformerModel"),an=a(" forward method, overrides the "),lt=s("code"),ln=a("__call__"),dn=a(" special method."),cn=d(),_(ke.$$.fragment),mn=d(),dt=s("p"),pn=a("Example:"),fn=d(),_(Ge.$$.fragment),Yt=d(),te=s("h2"),ve=s("a"),ct=s("span"),_(Je.$$.fragment),hn=d(),mt=s("span"),un=a("NystromformerForMaskedLM"),Zt=d(),B=s("div"),_(Ye.$$.fragment),gn=d(),se=s("p"),_n=a("Nystr\xF6mformer Model with a "),pt=s("code"),yn=a("language modeling"),kn=a(` head on top. This model is a PyTorch `),Ze=s("a"),vn=a("torch.nn.Module"),bn=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),wn=d(),j=s("div"),_(Ke.$$.fragment),Tn=d(),ne=s("p"),Nn=a("The "),Eo=s("a"),$n=a("NystromformerForMaskedLM"),Mn=a(" forward method, overrides the "),ft=s("code"),zn=a("__call__"),qn=a(" special method."),Fn=d(),_(be.$$.fragment),Cn=d(),ht=s("p"),jn=a("Example:"),An=d(),_(Xe.$$.fragment),Kt=d(),re=s("h2"),we=s("a"),ut=s("span"),_(eo.$$.fragment),xn=d(),gt=s("span"),En=a("NystromformerForSequenceClassification"),Xt=d(),S=s("div"),_(oo.$$.fragment),Pn=d(),_t=s("p"),Sn=a(`Nystr\xF6mformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Ln=d(),to=s("p"),In=a("This model is a PyTorch "),so=s("a"),Dn=a("torch.nn.Module"),On=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Wn=d(),z=s("div"),_(no.$$.fragment),Bn=d(),ae=s("p"),Qn=a("The "),Po=s("a"),Hn=a("NystromformerForSequenceClassification"),Un=a(" forward method, overrides the "),yt=s("code"),Rn=a("__call__"),Vn=a(" special method."),Gn=d(),_(Te.$$.fragment),Jn=d(),kt=s("p"),Yn=a("Example of single-label classification:"),Zn=d(),_(ro.$$.fragment),Kn=d(),vt=s("p"),Xn=a("Example of multi-label classification:"),er=d(),_(ao.$$.fragment),es=d(),ie=s("h2"),Ne=s("a"),bt=s("span"),_(io.$$.fragment),or=d(),wt=s("span"),tr=a("NystromformerForMultipleChoice"),os=d(),L=s("div"),_(lo.$$.fragment),sr=d(),Tt=s("p"),nr=a(`Nystr\xF6mformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),rr=d(),co=s("p"),ar=a("This model is a PyTorch "),mo=s("a"),ir=a("torch.nn.Module"),lr=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dr=d(),A=s("div"),_(po.$$.fragment),cr=d(),le=s("p"),mr=a("The "),So=s("a"),pr=a("NystromformerForMultipleChoice"),fr=a(" forward method, overrides the "),Nt=s("code"),hr=a("__call__"),ur=a(" special method."),gr=d(),_($e.$$.fragment),_r=d(),$t=s("p"),yr=a("Example:"),kr=d(),_(fo.$$.fragment),ts=d(),de=s("h2"),Me=s("a"),Mt=s("span"),_(ho.$$.fragment),vr=d(),zt=s("span"),br=a("NystromformerForTokenClassification"),ss=d(),I=s("div"),_(uo.$$.fragment),wr=d(),qt=s("p"),Tr=a(`Nystr\xF6mformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Nr=d(),go=s("p"),$r=a("This model is a PyTorch "),_o=s("a"),Mr=a("torch.nn.Module"),zr=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qr=d(),x=s("div"),_(yo.$$.fragment),Fr=d(),ce=s("p"),Cr=a("The "),Lo=s("a"),jr=a("NystromformerForTokenClassification"),Ar=a(" forward method, overrides the "),Ft=s("code"),xr=a("__call__"),Er=a(" special method."),Pr=d(),_(ze.$$.fragment),Sr=d(),Ct=s("p"),Lr=a("Example:"),Ir=d(),_(ko.$$.fragment),ns=d(),me=s("h2"),qe=s("a"),jt=s("span"),_(vo.$$.fragment),Dr=d(),At=s("span"),Or=a("NystromformerForQuestionAnswering"),rs=d(),D=s("div"),_(bo.$$.fragment),Wr=d(),pe=s("p"),Br=a(`Nystr\xF6mformer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),xt=s("code"),Qr=a("span start logits"),Hr=a(" and "),Et=s("code"),Ur=a("span end logits"),Rr=a(")."),Vr=d(),wo=s("p"),Gr=a("This model is a PyTorch "),To=s("a"),Jr=a("torch.nn.Module"),Yr=a(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Zr=d(),E=s("div"),_(No.$$.fragment),Kr=d(),fe=s("p"),Xr=a("The "),Io=s("a"),ea=a("NystromformerForQuestionAnswering"),oa=a(" forward method, overrides the "),Pt=s("code"),ta=a("__call__"),sa=a(" special method."),na=d(),_(Fe.$$.fragment),ra=d(),St=s("p"),aa=a("Example:"),ia=d(),_($o.$$.fragment),this.h()},l(o){const p=Ti('[data-svelte="svelte-1phssyn"]',document.head);m=n(p,"META",{name:!0,content:!0}),p.forEach(t),N=c(o),f=n(o,"H1",{class:!0});var Mo=r(f);T=n(Mo,"A",{id:!0,class:!0,href:!0});var Lt=r(T);$=n(Lt,"SPAN",{});var It=r($);y(g.$$.fragment,It),It.forEach(t),Lt.forEach(t),u=c(Mo),M=n(Mo,"SPAN",{});var Dt=r(M);vs=i(Dt,"Nystr\xF6mformer"),Dt.forEach(t),Mo.forEach(t),Wt=c(o),Y=n(o,"H2",{class:!0});var zo=r(Y);ue=n(zo,"A",{id:!0,class:!0,href:!0});var Ot=r(ue);Xo=n(Ot,"SPAN",{});var la=r(Xo);y(Pe.$$.fragment,la),la.forEach(t),Ot.forEach(t),bs=c(zo),et=n(zo,"SPAN",{});var da=r(et);ws=i(da,"Overview"),da.forEach(t),zo.forEach(t),Bt=c(o),ge=n(o,"P",{});var is=r(ge);Ts=i(is,"The Nystr\xF6mformer model was proposed in "),Se=n(is,"A",{href:!0,rel:!0});var ca=r(Se);ot=n(ca,"EM",{});var ma=r(ot);Ns=i(ma,"Nystr\xF6mformer: A Nystr\xF6m-Based Algorithm for Approximating Self-Attention"),ma.forEach(t),ca.forEach(t),$s=i(is,` by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, and Vikas Singh.`),is.forEach(t),Qt=c(o),qo=n(o,"P",{});var pa=r(qo);Ms=i(pa,"The abstract from the paper is the following:"),pa.forEach(t),Ht=c(o),Fo=n(o,"P",{});var fa=r(Fo);tt=n(fa,"EM",{});var ha=r(tt);zs=i(ha,`Transformers have emerged as a powerful tool for a broad range of natural language processing tasks. A key component that drives the impressive performance of Transformers is the self-attention mechanism that encodes the influence or dependence of other tokens on each specific token. While beneficial, the quadratic complexity of self-attention on the input sequence length has limited its application to longer sequences \u2014 a topic being actively studied in the community. To address this limitation, we propose Nystr\xF6mformer \u2014 a model that exhibits favorable scalability as a function of sequence length. Our idea is based on adapting the Nystr\xF6m method to approximate standard self-attention with O(n) complexity. The scalability of Nystr\xF6mformer enables application to longer sequences with thousands of tokens. We perform evaluations on multiple downstream tasks on the GLUE benchmark and IMDB reviews with standard sequence length, and find that our Nystr\xF6mformer performs comparably, or in a few cases, even slightly better, than standard self-attention. On longer sequence tasks in the Long Range Arena (LRA) benchmark, Nystr\xF6mformer performs favorably relative to other efficient self-attention methods. Our code is available at this https URL.`),ha.forEach(t),fa.forEach(t),Ut=c(o),Q=n(o,"P",{});var Do=r(Q);qs=i(Do,"This model was contributed by "),Le=n(Do,"A",{href:!0,rel:!0});var ua=r(Le);Fs=i(ua,"novice03"),ua.forEach(t),Cs=i(Do,". The original code can be found "),Ie=n(Do,"A",{href:!0,rel:!0});var ga=r(Ie);js=i(ga,"here"),ga.forEach(t),As=i(Do,"."),Do.forEach(t),Rt=c(o),Z=n(o,"H2",{class:!0});var ls=r(Z);_e=n(ls,"A",{id:!0,class:!0,href:!0});var _a=r(_e);st=n(_a,"SPAN",{});var ya=r(st);y(De.$$.fragment,ya),ya.forEach(t),_a.forEach(t),xs=c(ls),nt=n(ls,"SPAN",{});var ka=r(nt);Es=i(ka,"NystromformerConfig"),ka.forEach(t),ls.forEach(t),Vt=c(o),F=n(o,"DIV",{class:!0});var H=r(F);y(Oe.$$.fragment,H),Ps=c(H),K=n(H,"P",{});var Oo=r(K);Ss=i(Oo,"This is the configuration class to store the configuration of a "),Co=n(Oo,"A",{href:!0});var va=r(Co);Ls=i(va,"NystromformerModel"),va.forEach(t),Is=i(Oo,`. It is used to instantiate an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nystromformer `),We=n(Oo,"A",{href:!0,rel:!0});var ba=r(We);Ds=i(ba,"uw-madison/nystromformer-512"),ba.forEach(t),Os=i(Oo," architecture."),Oo.forEach(t),Ws=c(H),X=n(H,"P",{});var Wo=r(X);Bs=i(Wo,"Configuration objects inherit from "),jo=n(Wo,"A",{href:!0});var wa=r(jo);Qs=i(wa,"PretrainedConfig"),wa.forEach(t),Hs=i(Wo,` and can be used to control the model outputs. Read the documentation from `),Ao=n(Wo,"A",{href:!0});var Ta=r(Ao);Us=i(Ta,"PretrainedConfig"),Ta.forEach(t),Rs=i(Wo," for more information."),Wo.forEach(t),Vs=c(H),rt=n(H,"P",{});var Na=r(rt);Gs=i(Na,"Example:"),Na.forEach(t),Js=c(H),y(Be.$$.fragment,H),H.forEach(t),Gt=c(o),ee=n(o,"H2",{class:!0});var ds=r(ee);ye=n(ds,"A",{id:!0,class:!0,href:!0});var $a=r(ye);at=n($a,"SPAN",{});var Ma=r(at);y(Qe.$$.fragment,Ma),Ma.forEach(t),$a.forEach(t),Ys=c(ds),it=n(ds,"SPAN",{});var za=r(it);Zs=i(za,"NystromformerModel"),za.forEach(t),ds.forEach(t),Jt=c(o),W=n(o,"DIV",{class:!0});var Bo=r(W);y(He.$$.fragment,Bo),Ks=c(Bo),Ue=n(Bo,"P",{});var cs=r(Ue);Xs=i(cs,`The bare Nystr\xF6mformer Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Re=n(cs,"A",{href:!0,rel:!0});var qa=r(Re);en=i(qa,"torch.nn.Module"),qa.forEach(t),on=i(cs,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),cs.forEach(t),tn=c(Bo),C=n(Bo,"DIV",{class:!0});var U=r(C);y(Ve.$$.fragment,U),sn=c(U),oe=n(U,"P",{});var Qo=r(oe);nn=i(Qo,"The "),xo=n(Qo,"A",{href:!0});var Fa=r(xo);rn=i(Fa,"NystromformerModel"),Fa.forEach(t),an=i(Qo," forward method, overrides the "),lt=n(Qo,"CODE",{});var Ca=r(lt);ln=i(Ca,"__call__"),Ca.forEach(t),dn=i(Qo," special method."),Qo.forEach(t),cn=c(U),y(ke.$$.fragment,U),mn=c(U),dt=n(U,"P",{});var ja=r(dt);pn=i(ja,"Example:"),ja.forEach(t),fn=c(U),y(Ge.$$.fragment,U),U.forEach(t),Bo.forEach(t),Yt=c(o),te=n(o,"H2",{class:!0});var ms=r(te);ve=n(ms,"A",{id:!0,class:!0,href:!0});var Aa=r(ve);ct=n(Aa,"SPAN",{});var xa=r(ct);y(Je.$$.fragment,xa),xa.forEach(t),Aa.forEach(t),hn=c(ms),mt=n(ms,"SPAN",{});var Ea=r(mt);un=i(Ea,"NystromformerForMaskedLM"),Ea.forEach(t),ms.forEach(t),Zt=c(o),B=n(o,"DIV",{class:!0});var Ho=r(B);y(Ye.$$.fragment,Ho),gn=c(Ho),se=n(Ho,"P",{});var Uo=r(se);_n=i(Uo,"Nystr\xF6mformer Model with a "),pt=n(Uo,"CODE",{});var Pa=r(pt);yn=i(Pa,"language modeling"),Pa.forEach(t),kn=i(Uo,` head on top. This model is a PyTorch `),Ze=n(Uo,"A",{href:!0,rel:!0});var Sa=r(Ze);vn=i(Sa,"torch.nn.Module"),Sa.forEach(t),bn=i(Uo,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Uo.forEach(t),wn=c(Ho),j=n(Ho,"DIV",{class:!0});var R=r(j);y(Ke.$$.fragment,R),Tn=c(R),ne=n(R,"P",{});var Ro=r(ne);Nn=i(Ro,"The "),Eo=n(Ro,"A",{href:!0});var La=r(Eo);$n=i(La,"NystromformerForMaskedLM"),La.forEach(t),Mn=i(Ro," forward method, overrides the "),ft=n(Ro,"CODE",{});var Ia=r(ft);zn=i(Ia,"__call__"),Ia.forEach(t),qn=i(Ro," special method."),Ro.forEach(t),Fn=c(R),y(be.$$.fragment,R),Cn=c(R),ht=n(R,"P",{});var Da=r(ht);jn=i(Da,"Example:"),Da.forEach(t),An=c(R),y(Xe.$$.fragment,R),R.forEach(t),Ho.forEach(t),Kt=c(o),re=n(o,"H2",{class:!0});var ps=r(re);we=n(ps,"A",{id:!0,class:!0,href:!0});var Oa=r(we);ut=n(Oa,"SPAN",{});var Wa=r(ut);y(eo.$$.fragment,Wa),Wa.forEach(t),Oa.forEach(t),xn=c(ps),gt=n(ps,"SPAN",{});var Ba=r(gt);En=i(Ba,"NystromformerForSequenceClassification"),Ba.forEach(t),ps.forEach(t),Xt=c(o),S=n(o,"DIV",{class:!0});var Ce=r(S);y(oo.$$.fragment,Ce),Pn=c(Ce),_t=n(Ce,"P",{});var Qa=r(_t);Sn=i(Qa,`Nystr\xF6mformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Qa.forEach(t),Ln=c(Ce),to=n(Ce,"P",{});var fs=r(to);In=i(fs,"This model is a PyTorch "),so=n(fs,"A",{href:!0,rel:!0});var Ha=r(so);Dn=i(Ha,"torch.nn.Module"),Ha.forEach(t),On=i(fs,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fs.forEach(t),Wn=c(Ce),z=n(Ce,"DIV",{class:!0});var P=r(z);y(no.$$.fragment,P),Bn=c(P),ae=n(P,"P",{});var Vo=r(ae);Qn=i(Vo,"The "),Po=n(Vo,"A",{href:!0});var Ua=r(Po);Hn=i(Ua,"NystromformerForSequenceClassification"),Ua.forEach(t),Un=i(Vo," forward method, overrides the "),yt=n(Vo,"CODE",{});var Ra=r(yt);Rn=i(Ra,"__call__"),Ra.forEach(t),Vn=i(Vo," special method."),Vo.forEach(t),Gn=c(P),y(Te.$$.fragment,P),Jn=c(P),kt=n(P,"P",{});var Va=r(kt);Yn=i(Va,"Example of single-label classification:"),Va.forEach(t),Zn=c(P),y(ro.$$.fragment,P),Kn=c(P),vt=n(P,"P",{});var Ga=r(vt);Xn=i(Ga,"Example of multi-label classification:"),Ga.forEach(t),er=c(P),y(ao.$$.fragment,P),P.forEach(t),Ce.forEach(t),es=c(o),ie=n(o,"H2",{class:!0});var hs=r(ie);Ne=n(hs,"A",{id:!0,class:!0,href:!0});var Ja=r(Ne);bt=n(Ja,"SPAN",{});var Ya=r(bt);y(io.$$.fragment,Ya),Ya.forEach(t),Ja.forEach(t),or=c(hs),wt=n(hs,"SPAN",{});var Za=r(wt);tr=i(Za,"NystromformerForMultipleChoice"),Za.forEach(t),hs.forEach(t),os=c(o),L=n(o,"DIV",{class:!0});var je=r(L);y(lo.$$.fragment,je),sr=c(je),Tt=n(je,"P",{});var Ka=r(Tt);nr=i(Ka,`Nystr\xF6mformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Ka.forEach(t),rr=c(je),co=n(je,"P",{});var us=r(co);ar=i(us,"This model is a PyTorch "),mo=n(us,"A",{href:!0,rel:!0});var Xa=r(mo);ir=i(Xa,"torch.nn.Module"),Xa.forEach(t),lr=i(us,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),us.forEach(t),dr=c(je),A=n(je,"DIV",{class:!0});var V=r(A);y(po.$$.fragment,V),cr=c(V),le=n(V,"P",{});var Go=r(le);mr=i(Go,"The "),So=n(Go,"A",{href:!0});var ei=r(So);pr=i(ei,"NystromformerForMultipleChoice"),ei.forEach(t),fr=i(Go," forward method, overrides the "),Nt=n(Go,"CODE",{});var oi=r(Nt);hr=i(oi,"__call__"),oi.forEach(t),ur=i(Go," special method."),Go.forEach(t),gr=c(V),y($e.$$.fragment,V),_r=c(V),$t=n(V,"P",{});var ti=r($t);yr=i(ti,"Example:"),ti.forEach(t),kr=c(V),y(fo.$$.fragment,V),V.forEach(t),je.forEach(t),ts=c(o),de=n(o,"H2",{class:!0});var gs=r(de);Me=n(gs,"A",{id:!0,class:!0,href:!0});var si=r(Me);Mt=n(si,"SPAN",{});var ni=r(Mt);y(ho.$$.fragment,ni),ni.forEach(t),si.forEach(t),vr=c(gs),zt=n(gs,"SPAN",{});var ri=r(zt);br=i(ri,"NystromformerForTokenClassification"),ri.forEach(t),gs.forEach(t),ss=c(o),I=n(o,"DIV",{class:!0});var Ae=r(I);y(uo.$$.fragment,Ae),wr=c(Ae),qt=n(Ae,"P",{});var ai=r(qt);Tr=i(ai,`Nystr\xF6mformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ai.forEach(t),Nr=c(Ae),go=n(Ae,"P",{});var _s=r(go);$r=i(_s,"This model is a PyTorch "),_o=n(_s,"A",{href:!0,rel:!0});var ii=r(_o);Mr=i(ii,"torch.nn.Module"),ii.forEach(t),zr=i(_s,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),_s.forEach(t),qr=c(Ae),x=n(Ae,"DIV",{class:!0});var G=r(x);y(yo.$$.fragment,G),Fr=c(G),ce=n(G,"P",{});var Jo=r(ce);Cr=i(Jo,"The "),Lo=n(Jo,"A",{href:!0});var li=r(Lo);jr=i(li,"NystromformerForTokenClassification"),li.forEach(t),Ar=i(Jo," forward method, overrides the "),Ft=n(Jo,"CODE",{});var di=r(Ft);xr=i(di,"__call__"),di.forEach(t),Er=i(Jo," special method."),Jo.forEach(t),Pr=c(G),y(ze.$$.fragment,G),Sr=c(G),Ct=n(G,"P",{});var ci=r(Ct);Lr=i(ci,"Example:"),ci.forEach(t),Ir=c(G),y(ko.$$.fragment,G),G.forEach(t),Ae.forEach(t),ns=c(o),me=n(o,"H2",{class:!0});var ys=r(me);qe=n(ys,"A",{id:!0,class:!0,href:!0});var mi=r(qe);jt=n(mi,"SPAN",{});var pi=r(jt);y(vo.$$.fragment,pi),pi.forEach(t),mi.forEach(t),Dr=c(ys),At=n(ys,"SPAN",{});var fi=r(At);Or=i(fi,"NystromformerForQuestionAnswering"),fi.forEach(t),ys.forEach(t),rs=c(o),D=n(o,"DIV",{class:!0});var xe=r(D);y(bo.$$.fragment,xe),Wr=c(xe),pe=n(xe,"P",{});var Yo=r(pe);Br=i(Yo,`Nystr\xF6mformer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),xt=n(Yo,"CODE",{});var hi=r(xt);Qr=i(hi,"span start logits"),hi.forEach(t),Hr=i(Yo," and "),Et=n(Yo,"CODE",{});var ui=r(Et);Ur=i(ui,"span end logits"),ui.forEach(t),Rr=i(Yo,")."),Yo.forEach(t),Vr=c(xe),wo=n(xe,"P",{});var ks=r(wo);Gr=i(ks,"This model is a PyTorch "),To=n(ks,"A",{href:!0,rel:!0});var gi=r(To);Jr=i(gi,"torch.nn.Module"),gi.forEach(t),Yr=i(ks,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ks.forEach(t),Zr=c(xe),E=n(xe,"DIV",{class:!0});var J=r(E);y(No.$$.fragment,J),Kr=c(J),fe=n(J,"P",{});var Zo=r(fe);Xr=i(Zo,"The "),Io=n(Zo,"A",{href:!0});var _i=r(Io);ea=i(_i,"NystromformerForQuestionAnswering"),_i.forEach(t),oa=i(Zo," forward method, overrides the "),Pt=n(Zo,"CODE",{});var yi=r(Pt);ta=i(yi,"__call__"),yi.forEach(t),sa=i(Zo," special method."),Zo.forEach(t),na=c(J),y(Fe.$$.fragment,J),ra=c(J),St=n(J,"P",{});var ki=r(St);aa=i(ki,"Example:"),ki.forEach(t),ia=c(J),y($o.$$.fragment,J),J.forEach(t),xe.forEach(t),this.h()},h(){l(m,"name","hf:doc:metadata"),l(m,"content",JSON.stringify(ji)),l(T,"id","nystrmformer"),l(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(T,"href","#nystrmformer"),l(f,"class","relative group"),l(ue,"id","overview"),l(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ue,"href","#overview"),l(Y,"class","relative group"),l(Se,"href","https://arxiv.org/abs/2102.03902"),l(Se,"rel","nofollow"),l(Le,"href","https://huggingface.co/novice03"),l(Le,"rel","nofollow"),l(Ie,"href","https://github.com/mlpen/Nystromformer"),l(Ie,"rel","nofollow"),l(_e,"id","transformers.NystromformerConfig"),l(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(_e,"href","#transformers.NystromformerConfig"),l(Z,"class","relative group"),l(Co,"href","/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerModel"),l(We,"href","https://huggingface.co/uw-madison/nystromformer-512"),l(We,"rel","nofollow"),l(jo,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(Ao,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),l(F,"class","docstring"),l(ye,"id","transformers.NystromformerModel"),l(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ye,"href","#transformers.NystromformerModel"),l(ee,"class","relative group"),l(Re,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Re,"rel","nofollow"),l(xo,"href","/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerModel"),l(C,"class","docstring"),l(W,"class","docstring"),l(ve,"id","transformers.NystromformerForMaskedLM"),l(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ve,"href","#transformers.NystromformerForMaskedLM"),l(te,"class","relative group"),l(Ze,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(Ze,"rel","nofollow"),l(Eo,"href","/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerForMaskedLM"),l(j,"class","docstring"),l(B,"class","docstring"),l(we,"id","transformers.NystromformerForSequenceClassification"),l(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(we,"href","#transformers.NystromformerForSequenceClassification"),l(re,"class","relative group"),l(so,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(so,"rel","nofollow"),l(Po,"href","/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerForSequenceClassification"),l(z,"class","docstring"),l(S,"class","docstring"),l(Ne,"id","transformers.NystromformerForMultipleChoice"),l(Ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ne,"href","#transformers.NystromformerForMultipleChoice"),l(ie,"class","relative group"),l(mo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(mo,"rel","nofollow"),l(So,"href","/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerForMultipleChoice"),l(A,"class","docstring"),l(L,"class","docstring"),l(Me,"id","transformers.NystromformerForTokenClassification"),l(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Me,"href","#transformers.NystromformerForTokenClassification"),l(de,"class","relative group"),l(_o,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(_o,"rel","nofollow"),l(Lo,"href","/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerForTokenClassification"),l(x,"class","docstring"),l(I,"class","docstring"),l(qe,"id","transformers.NystromformerForQuestionAnswering"),l(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(qe,"href","#transformers.NystromformerForQuestionAnswering"),l(me,"class","relative group"),l(To,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),l(To,"rel","nofollow"),l(Io,"href","/docs/transformers/pr_16143/en/model_doc/nystromformer#transformers.NystromformerForQuestionAnswering"),l(E,"class","docstring"),l(D,"class","docstring")},m(o,p){e(document.head,m),h(o,N,p),h(o,f,p),e(f,T),e(T,$),k(g,$,null),e(f,u),e(f,M),e(M,vs),h(o,Wt,p),h(o,Y,p),e(Y,ue),e(ue,Xo),k(Pe,Xo,null),e(Y,bs),e(Y,et),e(et,ws),h(o,Bt,p),h(o,ge,p),e(ge,Ts),e(ge,Se),e(Se,ot),e(ot,Ns),e(ge,$s),h(o,Qt,p),h(o,qo,p),e(qo,Ms),h(o,Ht,p),h(o,Fo,p),e(Fo,tt),e(tt,zs),h(o,Ut,p),h(o,Q,p),e(Q,qs),e(Q,Le),e(Le,Fs),e(Q,Cs),e(Q,Ie),e(Ie,js),e(Q,As),h(o,Rt,p),h(o,Z,p),e(Z,_e),e(_e,st),k(De,st,null),e(Z,xs),e(Z,nt),e(nt,Es),h(o,Vt,p),h(o,F,p),k(Oe,F,null),e(F,Ps),e(F,K),e(K,Ss),e(K,Co),e(Co,Ls),e(K,Is),e(K,We),e(We,Ds),e(K,Os),e(F,Ws),e(F,X),e(X,Bs),e(X,jo),e(jo,Qs),e(X,Hs),e(X,Ao),e(Ao,Us),e(X,Rs),e(F,Vs),e(F,rt),e(rt,Gs),e(F,Js),k(Be,F,null),h(o,Gt,p),h(o,ee,p),e(ee,ye),e(ye,at),k(Qe,at,null),e(ee,Ys),e(ee,it),e(it,Zs),h(o,Jt,p),h(o,W,p),k(He,W,null),e(W,Ks),e(W,Ue),e(Ue,Xs),e(Ue,Re),e(Re,en),e(Ue,on),e(W,tn),e(W,C),k(Ve,C,null),e(C,sn),e(C,oe),e(oe,nn),e(oe,xo),e(xo,rn),e(oe,an),e(oe,lt),e(lt,ln),e(oe,dn),e(C,cn),k(ke,C,null),e(C,mn),e(C,dt),e(dt,pn),e(C,fn),k(Ge,C,null),h(o,Yt,p),h(o,te,p),e(te,ve),e(ve,ct),k(Je,ct,null),e(te,hn),e(te,mt),e(mt,un),h(o,Zt,p),h(o,B,p),k(Ye,B,null),e(B,gn),e(B,se),e(se,_n),e(se,pt),e(pt,yn),e(se,kn),e(se,Ze),e(Ze,vn),e(se,bn),e(B,wn),e(B,j),k(Ke,j,null),e(j,Tn),e(j,ne),e(ne,Nn),e(ne,Eo),e(Eo,$n),e(ne,Mn),e(ne,ft),e(ft,zn),e(ne,qn),e(j,Fn),k(be,j,null),e(j,Cn),e(j,ht),e(ht,jn),e(j,An),k(Xe,j,null),h(o,Kt,p),h(o,re,p),e(re,we),e(we,ut),k(eo,ut,null),e(re,xn),e(re,gt),e(gt,En),h(o,Xt,p),h(o,S,p),k(oo,S,null),e(S,Pn),e(S,_t),e(_t,Sn),e(S,Ln),e(S,to),e(to,In),e(to,so),e(so,Dn),e(to,On),e(S,Wn),e(S,z),k(no,z,null),e(z,Bn),e(z,ae),e(ae,Qn),e(ae,Po),e(Po,Hn),e(ae,Un),e(ae,yt),e(yt,Rn),e(ae,Vn),e(z,Gn),k(Te,z,null),e(z,Jn),e(z,kt),e(kt,Yn),e(z,Zn),k(ro,z,null),e(z,Kn),e(z,vt),e(vt,Xn),e(z,er),k(ao,z,null),h(o,es,p),h(o,ie,p),e(ie,Ne),e(Ne,bt),k(io,bt,null),e(ie,or),e(ie,wt),e(wt,tr),h(o,os,p),h(o,L,p),k(lo,L,null),e(L,sr),e(L,Tt),e(Tt,nr),e(L,rr),e(L,co),e(co,ar),e(co,mo),e(mo,ir),e(co,lr),e(L,dr),e(L,A),k(po,A,null),e(A,cr),e(A,le),e(le,mr),e(le,So),e(So,pr),e(le,fr),e(le,Nt),e(Nt,hr),e(le,ur),e(A,gr),k($e,A,null),e(A,_r),e(A,$t),e($t,yr),e(A,kr),k(fo,A,null),h(o,ts,p),h(o,de,p),e(de,Me),e(Me,Mt),k(ho,Mt,null),e(de,vr),e(de,zt),e(zt,br),h(o,ss,p),h(o,I,p),k(uo,I,null),e(I,wr),e(I,qt),e(qt,Tr),e(I,Nr),e(I,go),e(go,$r),e(go,_o),e(_o,Mr),e(go,zr),e(I,qr),e(I,x),k(yo,x,null),e(x,Fr),e(x,ce),e(ce,Cr),e(ce,Lo),e(Lo,jr),e(ce,Ar),e(ce,Ft),e(Ft,xr),e(ce,Er),e(x,Pr),k(ze,x,null),e(x,Sr),e(x,Ct),e(Ct,Lr),e(x,Ir),k(ko,x,null),h(o,ns,p),h(o,me,p),e(me,qe),e(qe,jt),k(vo,jt,null),e(me,Dr),e(me,At),e(At,Or),h(o,rs,p),h(o,D,p),k(bo,D,null),e(D,Wr),e(D,pe),e(pe,Br),e(pe,xt),e(xt,Qr),e(pe,Hr),e(pe,Et),e(Et,Ur),e(pe,Rr),e(D,Vr),e(D,wo),e(wo,Gr),e(wo,To),e(To,Jr),e(wo,Yr),e(D,Zr),e(D,E),k(No,E,null),e(E,Kr),e(E,fe),e(fe,Xr),e(fe,Io),e(Io,ea),e(fe,oa),e(fe,Pt),e(Pt,ta),e(fe,sa),e(E,na),k(Fe,E,null),e(E,ra),e(E,St),e(St,aa),e(E,ia),k($o,E,null),as=!0},p(o,[p]){const Mo={};p&2&&(Mo.$$scope={dirty:p,ctx:o}),ke.$set(Mo);const Lt={};p&2&&(Lt.$$scope={dirty:p,ctx:o}),be.$set(Lt);const It={};p&2&&(It.$$scope={dirty:p,ctx:o}),Te.$set(It);const Dt={};p&2&&(Dt.$$scope={dirty:p,ctx:o}),$e.$set(Dt);const zo={};p&2&&(zo.$$scope={dirty:p,ctx:o}),ze.$set(zo);const Ot={};p&2&&(Ot.$$scope={dirty:p,ctx:o}),Fe.$set(Ot)},i(o){as||(v(g.$$.fragment,o),v(Pe.$$.fragment,o),v(De.$$.fragment,o),v(Oe.$$.fragment,o),v(Be.$$.fragment,o),v(Qe.$$.fragment,o),v(He.$$.fragment,o),v(Ve.$$.fragment,o),v(ke.$$.fragment,o),v(Ge.$$.fragment,o),v(Je.$$.fragment,o),v(Ye.$$.fragment,o),v(Ke.$$.fragment,o),v(be.$$.fragment,o),v(Xe.$$.fragment,o),v(eo.$$.fragment,o),v(oo.$$.fragment,o),v(no.$$.fragment,o),v(Te.$$.fragment,o),v(ro.$$.fragment,o),v(ao.$$.fragment,o),v(io.$$.fragment,o),v(lo.$$.fragment,o),v(po.$$.fragment,o),v($e.$$.fragment,o),v(fo.$$.fragment,o),v(ho.$$.fragment,o),v(uo.$$.fragment,o),v(yo.$$.fragment,o),v(ze.$$.fragment,o),v(ko.$$.fragment,o),v(vo.$$.fragment,o),v(bo.$$.fragment,o),v(No.$$.fragment,o),v(Fe.$$.fragment,o),v($o.$$.fragment,o),as=!0)},o(o){b(g.$$.fragment,o),b(Pe.$$.fragment,o),b(De.$$.fragment,o),b(Oe.$$.fragment,o),b(Be.$$.fragment,o),b(Qe.$$.fragment,o),b(He.$$.fragment,o),b(Ve.$$.fragment,o),b(ke.$$.fragment,o),b(Ge.$$.fragment,o),b(Je.$$.fragment,o),b(Ye.$$.fragment,o),b(Ke.$$.fragment,o),b(be.$$.fragment,o),b(Xe.$$.fragment,o),b(eo.$$.fragment,o),b(oo.$$.fragment,o),b(no.$$.fragment,o),b(Te.$$.fragment,o),b(ro.$$.fragment,o),b(ao.$$.fragment,o),b(io.$$.fragment,o),b(lo.$$.fragment,o),b(po.$$.fragment,o),b($e.$$.fragment,o),b(fo.$$.fragment,o),b(ho.$$.fragment,o),b(uo.$$.fragment,o),b(yo.$$.fragment,o),b(ze.$$.fragment,o),b(ko.$$.fragment,o),b(vo.$$.fragment,o),b(bo.$$.fragment,o),b(No.$$.fragment,o),b(Fe.$$.fragment,o),b($o.$$.fragment,o),as=!1},d(o){t(m),o&&t(N),o&&t(f),w(g),o&&t(Wt),o&&t(Y),w(Pe),o&&t(Bt),o&&t(ge),o&&t(Qt),o&&t(qo),o&&t(Ht),o&&t(Fo),o&&t(Ut),o&&t(Q),o&&t(Rt),o&&t(Z),w(De),o&&t(Vt),o&&t(F),w(Oe),w(Be),o&&t(Gt),o&&t(ee),w(Qe),o&&t(Jt),o&&t(W),w(He),w(Ve),w(ke),w(Ge),o&&t(Yt),o&&t(te),w(Je),o&&t(Zt),o&&t(B),w(Ye),w(Ke),w(be),w(Xe),o&&t(Kt),o&&t(re),w(eo),o&&t(Xt),o&&t(S),w(oo),w(no),w(Te),w(ro),w(ao),o&&t(es),o&&t(ie),w(io),o&&t(os),o&&t(L),w(lo),w(po),w($e),w(fo),o&&t(ts),o&&t(de),w(ho),o&&t(ss),o&&t(I),w(uo),w(yo),w(ze),w(ko),o&&t(ns),o&&t(me),w(vo),o&&t(rs),o&&t(D),w(bo),w(No),w(Fe),w($o)}}}const ji={local:"nystrmformer",sections:[{local:"overview",title:"Overview"},{local:"transformers.NystromformerConfig",title:"NystromformerConfig"},{local:"transformers.NystromformerModel",title:"NystromformerModel"},{local:"transformers.NystromformerForMaskedLM",title:"NystromformerForMaskedLM"},{local:"transformers.NystromformerForSequenceClassification",title:"NystromformerForSequenceClassification"},{local:"transformers.NystromformerForMultipleChoice",title:"NystromformerForMultipleChoice"},{local:"transformers.NystromformerForTokenClassification",title:"NystromformerForTokenClassification"},{local:"transformers.NystromformerForQuestionAnswering",title:"NystromformerForQuestionAnswering"}],title:"Nystr\xF6mformer"};function Ai(q,m,N){let{fw:f}=m;return q.$$set=T=>{"fw"in T&&N(0,f=T.fw)},[f]}class Di extends vi{constructor(m){super();bi(this,m,Ai,Ci,wi,{fw:0})}}export{Di as default,ji as metadata};
297
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/wav2vec2.mdx-30b5c1d2.js
import{S as _T,i as vT,s as wT,e as a,k as l,w as u,t as r,M as bT,c as s,d as t,m as c,a as n,x as g,h as i,b as d,F as e,g as h,y as _,q as v,o as w,B as b}from"../../chunks/vendor-4833417e.js";import{T as fe}from"../../chunks/Tip-fffd6df1.js";import{D as V}from"../../chunks/Docstring-4f315ed9.js";import{C as ue}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as ee}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function yT(E){let p,x,f,T,W,k,y,j;return{c(){p=a("p"),x=r("This class method is simply calling "),f=a("a"),T=r("save_pretrained()"),W=r(` and `),k=a("code"),y=r("save_pretrained"),j=r(`. Please refer to the docstrings of the methods above for more information.`),this.h()},l(O){p=s(O,"P",{});var q=n(p);x=i(q,"This class method is simply calling "),f=s(q,"A",{href:!0});var F=n(f);T=i(F,"save_pretrained()"),F.forEach(t),W=i(q,` and `),k=s(q,"CODE",{});var A=n(k);y=i(A,"save_pretrained"),A.forEach(t),j=i(q,`. Please refer to the docstrings of the methods above for more information.`),q.forEach(t),this.h()},h(){d(f,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained")},m(O,q){h(O,p,q),e(p,x),e(p,f),e(f,T),e(p,W),e(p,k),e(k,y),e(p,j)},d(O){O&&t(p)}}}function kT(E){let p,x,f,T,W,k,y,j,O,q,F,A,L,X;return{c(){p=a("p"),x=r(`This class method is simply calling Wav2Vec2FeatureExtractor\u2019s `),f=a("a"),T=r("from_pretrained()"),W=r(`, Wav2Vec2CTCTokenizer\u2019s `),k=a("code"),y=r("from_pretrained"),j=r(`, and `),O=a("code"),q=r("pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub"),F=r("."),A=l(),L=a("p"),X=r("Please refer to the docstrings of the methods above for more information."),this.h()},l(S){p=s(S,"P",{});var C=n(p);x=i(C,`This class method is simply calling Wav2Vec2FeatureExtractor\u2019s `),f=s(C,"A",{href:!0});var Ve=n(f);T=i(Ve,"from_pretrained()"),Ve.forEach(t),W=i(C,`, Wav2Vec2CTCTokenizer\u2019s `),k=s(C,"CODE",{});var ge=n(k);y=i(ge,"from_pretrained"),ge.forEach(t),j=i(C,`, and `),O=s(C,"CODE",{});var B=n(O);q=i(B,"pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub"),B.forEach(t),F=i(C,"."),C.forEach(t),A=c(S),L=s(S,"P",{});var H=n(L);X=i(H,"Please refer to the docstrings of the methods above for more information."),H.forEach(t),this.h()},h(){d(f,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained")},m(S,C){h(S,p,C),e(p,x),e(p,f),e(f,T),e(p,W),e(p,k),e(k,y),e(p,j),e(p,O),e(O,q),e(p,F),h(S,A,C),h(S,L,C),e(L,X)},d(S){S&&t(p),S&&t(A),S&&t(L)}}}function TT(E){let p,x;return{c(){p=a("p"),x=r("This function makes use of Python\u2019s multiprocessing.")},l(f){p=s(f,"P",{});var T=n(p);x=i(T,"This function makes use of Python\u2019s multiprocessing."),T.forEach(t)},m(f,T){h(f,p,T),e(p,x)},d(f){f&&t(p)}}}function xT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function WT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function $T(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function jT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function VT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function FT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function ET(E){let p,x,f,T,W,k,y,j,O,q,F,A,L,X,S,C,Ve,ge,B,H,_e,Ge,M,I,Fe,ve,Ye,Ee,oe,ct,et,D,dt,de,pe,Je,we,pt,be,ae,Pe,ye,mt;return{c(){p=a("p"),x=r("TF 2.0 models accepts two formats as inputs:"),f=l(),T=a("ul"),W=a("li"),k=r("having all inputs as keyword arguments (like PyTorch models), or"),y=l(),j=a("li"),O=r("having all inputs as a list, tuple or dict in the first positional arguments."),q=l(),F=a("p"),A=r("This second option is useful when using "),L=a("code"),X=r("tf.keras.Model.fit"),S=r(` method which currently requires having all the tensors in the first argument of the model call function: `),C=a("code"),Ve=r("model(inputs)"),ge=r("."),B=l(),H=a("p"),_e=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Ge=l(),M=a("ul"),I=a("li"),Fe=r("a single Tensor with "),ve=a("code"),Ye=r("input_values"),Ee=r(" only and nothing else: "),oe=a("code"),ct=r("model(inputs_ids)"),et=l(),D=a("li"),dt=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=a("code"),pe=r("model([input_values, attention_mask])"),Je=r(" or "),we=a("code"),pt=r("model([input_values, attention_mask, token_type_ids])"),be=l(),ae=a("li"),Pe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ye=a("code"),mt=r('model({"input_values": input_values, "token_type_ids": token_type_ids})')},l($){p=s($,"P",{});var P=n(p);x=i(P,"TF 2.0 models accepts two formats as inputs:"),P.forEach(t),f=c($),T=s($,"UL",{});var Ze=n(T);W=s(Ze,"LI",{});var Ke=n(W);k=i(Ke,"having all inputs as keyword arguments (like PyTorch models), or"),Ke.forEach(t),y=c(Ze),j=s(Ze,"LI",{});var Bt=n(j);O=i(Bt,"having all inputs as a list, tuple or dict in the first positional arguments."),Bt.forEach(t),Ze.forEach(t),q=c($),F=s($,"P",{});var se=n(F);A=i(se,"This second option is useful when using "),L=s(se,"CODE",{});var xt=n(L);X=i(xt,"tf.keras.Model.fit"),xt.forEach(t),S=i(se,` method which currently requires having all the tensors in the first argument of the model call function: `),C=s(se,"CODE",{});var ke=n(C);Ve=i(ke,"model(inputs)"),ke.forEach(t),ge=i(se,"."),se.forEach(t),B=c($),H=s($,"P",{});var Ce=n(H);_e=i(Ce,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Ce.forEach(t),Ge=c($),M=s($,"UL",{});var G=n(M);I=s(G,"LI",{});var J=n(I);Fe=i(J,"a single Tensor with "),ve=s(J,"CODE",{});var Ut=n(ve);Ye=i(Ut,"input_values"),Ut.forEach(t),Ee=i(J," only and nothing else: "),oe=s(J,"CODE",{});var ht=n(oe);ct=i(ht,"model(inputs_ids)"),ht.forEach(t),J.forEach(t),et=c(G),D=s(G,"LI",{});var Te=n(D);dt=i(Te,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=s(Te,"CODE",{});var Wt=n(de);pe=i(Wt,"model([input_values, attention_mask])"),Wt.forEach(t),Je=i(Te," or "),we=s(Te,"CODE",{});var U=n(we);pt=i(U,"model([input_values, attention_mask, token_type_ids])"),U.forEach(t),Te.forEach(t),be=c(G),ae=s(G,"LI",{});var xe=n(ae);Pe=i(xe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ye=s(xe,"CODE",{});var Ht=n(ye);mt=i(Ht,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),Ht.forEach(t),xe.forEach(t),G.forEach(t)},m($,P){h($,p,P),e(p,x),h($,f,P),h($,T,P),e(T,W),e(W,k),e(T,y),e(T,j),e(j,O),h($,q,P),h($,F,P),e(F,A),e(F,L),e(L,X),e(F,S),e(F,C),e(C,Ve),e(F,ge),h($,B,P),h($,H,P),e(H,_e),h($,Ge,P),h($,M,P),e(M,I),e(I,Fe),e(I,ve),e(ve,Ye),e(I,Ee),e(I,oe),e(oe,ct),e(M,et),e(M,D),e(D,dt),e(D,de),e(de,pe),e(D,Je),e(D,we),e(we,pt),e(M,be),e(M,ae),e(ae,Pe),e(ae,ye),e(ye,mt)},d($){$&&t(p),$&&t(f),$&&t(T),$&&t(q),$&&t(F),$&&t(B),$&&t(H),$&&t(Ge),$&&t(M)}}}function PT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function CT(E){let p,x,f,T,W,k,y,j,O,q,F,A,L,X,S,C,Ve,ge,B,H,_e,Ge,M,I,Fe,ve,Ye,Ee,oe,ct,et,D,dt,de,pe,Je,we,pt,be,ae,Pe,ye,mt;return{c(){p=a("p"),x=r("TF 2.0 models accepts two formats as inputs:"),f=l(),T=a("ul"),W=a("li"),k=r("having all inputs as keyword arguments (like PyTorch models), or"),y=l(),j=a("li"),O=r("having all inputs as a list, tuple or dict in the first positional arguments."),q=l(),F=a("p"),A=r("This second option is useful when using "),L=a("code"),X=r("tf.keras.Model.fit"),S=r(` method which currently requires having all the tensors in the first argument of the model call function: `),C=a("code"),Ve=r("model(inputs)"),ge=r("."),B=l(),H=a("p"),_e=r(`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Ge=l(),M=a("ul"),I=a("li"),Fe=r("a single Tensor with "),ve=a("code"),Ye=r("input_values"),Ee=r(" only and nothing else: "),oe=a("code"),ct=r("model(inputs_ids)"),et=l(),D=a("li"),dt=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=a("code"),pe=r("model([input_values, attention_mask])"),Je=r(" or "),we=a("code"),pt=r("model([input_values, attention_mask, token_type_ids])"),be=l(),ae=a("li"),Pe=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ye=a("code"),mt=r('model({"input_values": input_values, "token_type_ids": token_type_ids})')},l($){p=s($,"P",{});var P=n(p);x=i(P,"TF 2.0 models accepts two formats as inputs:"),P.forEach(t),f=c($),T=s($,"UL",{});var Ze=n(T);W=s(Ze,"LI",{});var Ke=n(W);k=i(Ke,"having all inputs as keyword arguments (like PyTorch models), or"),Ke.forEach(t),y=c(Ze),j=s(Ze,"LI",{});var Bt=n(j);O=i(Bt,"having all inputs as a list, tuple or dict in the first positional arguments."),Bt.forEach(t),Ze.forEach(t),q=c($),F=s($,"P",{});var se=n(F);A=i(se,"This second option is useful when using "),L=s(se,"CODE",{});var xt=n(L);X=i(xt,"tf.keras.Model.fit"),xt.forEach(t),S=i(se,` method which currently requires having all the tensors in the first argument of the model call function: `),C=s(se,"CODE",{});var ke=n(C);Ve=i(ke,"model(inputs)"),ke.forEach(t),ge=i(se,"."),se.forEach(t),B=c($),H=s($,"P",{});var Ce=n(H);_e=i(Ce,`If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :`),Ce.forEach(t),Ge=c($),M=s($,"UL",{});var G=n(M);I=s(G,"LI",{});var J=n(I);Fe=i(J,"a single Tensor with "),ve=s(J,"CODE",{});var Ut=n(ve);Ye=i(Ut,"input_values"),Ut.forEach(t),Ee=i(J," only and nothing else: "),oe=s(J,"CODE",{});var ht=n(oe);ct=i(ht,"model(inputs_ids)"),ht.forEach(t),J.forEach(t),et=c(G),D=s(G,"LI",{});var Te=n(D);dt=i(Te,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),de=s(Te,"CODE",{});var Wt=n(de);pe=i(Wt,"model([input_values, attention_mask])"),Wt.forEach(t),Je=i(Te," or "),we=s(Te,"CODE",{});var U=n(we);pt=i(U,"model([input_values, attention_mask, token_type_ids])"),U.forEach(t),Te.forEach(t),be=c(G),ae=s(G,"LI",{});var xe=n(ae);Pe=i(xe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ye=s(xe,"CODE",{});var Ht=n(ye);mt=i(Ht,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),Ht.forEach(t),xe.forEach(t),G.forEach(t)},m($,P){h($,p,P),e(p,x),h($,f,P),h($,T,P),e(T,W),e(W,k),e(T,y),e(T,j),e(j,O),h($,q,P),h($,F,P),e(F,A),e(F,L),e(L,X),e(F,S),e(F,C),e(C,Ve),e(F,ge),h($,B,P),h($,H,P),e(H,_e),h($,Ge,P),h($,M,P),e(M,I),e(I,Fe),e(I,ve),e(ve,Ye),e(I,Ee),e(I,oe),e(oe,ct),e(M,et),e(M,D),e(D,dt),e(D,de),e(de,pe),e(D,Je),e(D,we),e(we,pt),e(M,be),e(M,ae),e(ae,Pe),e(ae,ye),e(ye,mt)},d($){$&&t(p),$&&t(f),$&&t(T),$&&t(q),$&&t(F),$&&t(B),$&&t(H),$&&t(Ge),$&&t(M)}}}function qT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function MT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function zT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function AT(E){let p,x,f,T,W;return{c(){p=a("p"),x=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),f=a("code"),T=r("Module"),W=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(k){p=s(k,"P",{});var y=n(p);x=i(y,"Although the recipe for forward pass needs to be defined within this function, one should call the "),f=s(y,"CODE",{});var j=n(f);T=i(j,"Module"),j.forEach(t),W=i(y,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),y.forEach(t)},m(k,y){h(k,p,y),e(p,x),e(p,f),e(f,T),e(p,W)},d(k){k&&t(p)}}}function LT(E){let p,x,f,T,W,k,y,j,O,q,F,A,L,X,S,C,Ve,ge,B,H,_e,Ge,M,I,Fe,ve,Ye,Ee,oe,ct,et,D,dt,de,pe,Je,we,pt,be,ae,Pe,ye,mt,$,P,Ze,Ke,Bt,se,xt,ke,Ce,G,J,Ut,ht,Te,Wt,U,xe,Ht,Rt,lm,Mr,cm,dm,Fa,pm,mm,hm,Xt,fm,zr,um,gm,Ar,_m,vm,wm,Xi,bm,ym,Ea,sd,Gt,Fo,Gi,Pa,km,Ji,Tm,nd,Z,Ca,xm,Zi,Wm,$m,qa,jm,Lr,Vm,Fm,Em,Eo,Ma,Pm,Ki,Cm,qm,Dr,za,Mm,qe,Aa,zm,Qi,Am,Lm,La,Dm,Yi,Om,Sm,Nm,el,Im,Bm,Da,Um,Po,Oa,Hm,tl,Rm,rd,Jt,Co,ol,Sa,Xm,al,Gm,id,Qe,Na,Jm,sl,Zm,Km,Ia,Qm,Or,Ym,eh,th,qo,Ba,oh,nl,ah,ld,Zt,Mo,rl,Ua,sh,il,nh,cd,z,Ha,rh,ll,ih,lh,Me,Sr,ch,dh,Nr,ph,mh,Ir,hh,fh,Ra,cl,uh,gh,_h,Br,vh,wh,bh,zo,Xa,yh,ft,kh,Ga,dl,Th,xh,Wh,Ur,$h,jh,Ja,pl,Vh,Fh,Eh,Ph,Ao,Za,Ch,ut,qh,Hr,Mh,zh,Rr,Ah,Lh,Xr,Dh,Oh,Sh,Gr,Ka,Nh,$t,Qa,Ih,Ya,Bh,Jr,Uh,Hh,Rh,Lo,Xh,Do,es,Gh,ts,Jh,Zr,Zh,Kh,Qh,Oo,os,Yh,as,ef,Kr,tf,of,af,So,ss,sf,ml,nf,dd,Kt,No,hl,ns,rf,fl,lf,pd,N,rs,cf,ul,df,pf,Io,is,mf,gt,hf,ls,gl,ff,uf,gf,Qr,_f,vf,cs,_l,wf,bf,yf,kf,Bo,ds,Tf,_t,xf,Yr,Wf,$f,ei,jf,Vf,ti,Ff,Ef,Pf,jt,ps,Cf,ms,qf,oi,Mf,zf,Af,Uo,Lf,ai,hs,Df,Vt,fs,Of,vl,Sf,Nf,Ho,If,tt,us,Bf,wl,Uf,Hf,bl,Rf,Xf,gs,Gf,Ro,_s,Jf,yl,Zf,md,Qt,Xo,kl,vs,Kf,Tl,Qf,hd,Yt,ws,Yf,bs,eu,xl,tu,ou,fd,eo,ys,au,ks,su,Wl,nu,ru,ud,to,Ts,iu,xs,lu,si,cu,du,gd,vt,Ws,pu,$s,mu,$l,hu,fu,uu,Go,js,gu,jl,_u,_d,wt,Vs,vu,Fs,wu,Vl,bu,yu,ku,Jo,Es,Tu,Fl,xu,vd,oo,Zo,El,Ps,Wu,Pl,$u,wd,We,Cs,ju,qs,Vu,Ms,Fu,Eu,Pu,zs,Cu,ni,qu,Mu,zu,As,Au,Ls,Lu,Du,Ou,ze,Ds,Su,ao,Nu,ri,Iu,Bu,Cl,Uu,Hu,Ru,Ko,Xu,ql,Gu,Ju,Os,bd,so,Qo,Ml,Ss,Zu,zl,Ku,yd,$e,Ns,Qu,no,Yu,Al,eg,tg,Is,og,ag,sg,Bs,ng,ii,rg,ig,lg,Us,cg,Hs,dg,pg,mg,me,Rs,hg,ro,fg,li,ug,gg,Ll,_g,vg,wg,Yo,bg,Dl,yg,kg,Xs,Tg,Gs,kd,io,ea,Ol,Js,xg,Sl,Wg,Td,ne,Zs,$g,Nl,jg,Vg,Ks,Fg,Qs,Eg,Pg,Cg,Ys,qg,ci,Mg,zg,Ag,en,Lg,tn,Dg,Og,Sg,he,on,Ng,lo,Ig,di,Bg,Ug,Il,Hg,Rg,Xg,ta,Gg,Bl,Jg,Zg,an,Kg,sn,xd,co,oa,Ul,nn,Qg,Hl,Yg,Wd,re,rn,e_,Rl,t_,o_,ln,a_,cn,s_,n_,r_,dn,i_,pi,l_,c_,d_,pn,p_,mn,m_,h_,f_,Ae,hn,u_,po,g_,mi,__,v_,Xl,w_,b_,y_,aa,k_,Gl,T_,x_,fn,$d,mo,sa,Jl,un,W_,Zl,$_,jd,ie,gn,j_,Kl,V_,F_,_n,E_,vn,P_,C_,q_,wn,M_,hi,z_,A_,L_,bn,D_,yn,O_,S_,N_,Le,kn,I_,ho,B_,fi,U_,H_,Ql,R_,X_,G_,na,J_,Yl,Z_,K_,Tn,Vd,fo,ra,ec,xn,Q_,tc,Y_,Fd,je,Wn,ev,uo,tv,oc,ov,av,$n,sv,nv,rv,jn,iv,ui,lv,cv,dv,Vn,pv,Fn,mv,hv,fv,De,En,uv,go,gv,gi,_v,vv,ac,wv,bv,yv,ia,kv,sc,Tv,xv,Pn,Ed,_o,la,nc,Cn,Wv,rc,$v,Pd,le,qn,jv,ic,Vv,Fv,Mn,Ev,_i,Pv,Cv,qv,zn,Mv,An,zv,Av,Lv,ca,Dv,Oe,Ln,Ov,vo,Sv,vi,Nv,Iv,lc,Bv,Uv,Hv,da,Rv,cc,Xv,Gv,Dn,Cd,wo,pa,dc,On,Jv,pc,Zv,qd,ce,Sn,Kv,Nn,Qv,mc,Yv,e2,t2,In,o2,wi,a2,s2,n2,Bn,r2,Un,i2,l2,c2,ma,d2,Se,Hn,p2,bo,m2,bi,h2,f2,hc,u2,g2,_2,ha,v2,fc,w2,b2,Rn,Md,yo,fa,uc,Xn,y2,gc,k2,zd,K,Gn,T2,Jn,x2,Zn,W2,$2,j2,Kn,V2,yi,F2,E2,P2,Qn,C2,Yn,q2,M2,z2,_c,A2,L2,bt,vc,er,D2,O2,wc,tr,S2,N2,bc,or,I2,B2,yc,ar,U2,H2,Ne,sr,R2,ko,X2,kc,G2,J2,Tc,Z2,K2,Q2,ua,Y2,xc,ew,tw,nr,Ad,To,ga,Wc,rr,ow,$c,aw,Ld,Q,ir,sw,xo,nw,jc,rw,iw,lr,lw,cw,dw,cr,pw,ki,mw,hw,fw,dr,uw,pr,gw,_w,vw,Vc,ww,bw,yt,Fc,mr,yw,kw,Ec,hr,Tw,xw,Pc,fr,Ww,$w,Cc,ur,jw,Vw,Ie,gr,Fw,Wo,Ew,qc,Pw,Cw,Mc,qw,Mw,zw,_a,Aw,zc,Lw,Dw,_r,Dd,$o,va,Ac,vr,Ow,Lc,Sw,Od,Y,wr,Nw,jo,Iw,Dc,Bw,Uw,br,Hw,Rw,Xw,yr,Gw,Ti,Jw,Zw,Kw,kr,Qw,Tr,Yw,eb,tb,Oc,ob,ab,kt,Sc,xr,sb,nb,Nc,Wr,rb,ib,Ic,$r,lb,cb,Bc,jr,db,pb,Be,Vr,mb,Vo,hb,xi,fb,ub,Uc,gb,_b,vb,wa,wb,Hc,bb,yb,Fr,Sd;return k=new ee({}),X=new ee({}),J=new ee({}),xe=new V({props:{name:"class transformers.Wav2Vec2Config",anchor:"transformers.Wav2Vec2Config",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"feat_quantizer_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"do_stable_layer_norm",val:" = False"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"num_codevectors_per_group",val:" = 320"},{name:"num_codevector_groups",val:" = 2"},{name:"contrastive_logits_temperature",val:" = 0.1"},{name:"num_negatives",val:" = 100"},{name:"codevector_dim",val:" = 256"},{name:"proj_codevector_dim",val:" = 256"},{name:"diversity_loss_weight",val:" = 0.1"},{name:"ctc_loss_reduction",val:" = 'sum'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"tdnn_dim",val:" = (512, 512, 512, 512, 1500)"},{name:"tdnn_kernel",val:" = (5, 3, 3, 1, 1)"},{name:"tdnn_dilation",val:" = (1, 2, 3, 1, 1)"},{name:"xvector_output_dim",val:" = 512"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"add_adapter",val:" = False"},{name:"adapter_kernel_size",val:" = 3"},{name:"adapter_stride",val:" = 2"},{name:"num_adapter_layers",val:" = 3"},{name:"output_hidden_size",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/configuration_wav2vec2.py#L32",parametersDescription:[{anchor:"transformers.Wav2Vec2Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the Wav2Vec2 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Model">Wav2Vec2Model</a> or <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.TFWav2Vec2Model">TFWav2Vec2Model</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Model">Wav2Vec2Model</a>.`,name:"vocab_size"},{anchor:"transformers.Wav2Vec2Config.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.Wav2Vec2Config.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.Wav2Vec2Config.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.Wav2Vec2Config.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.Wav2Vec2Config.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.Wav2Vec2Config.hidden_dropout",description:`<strong>hidden_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout"},{anchor:"transformers.Wav2Vec2Config.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.Wav2Vec2Config.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for the final projection layer of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.Wav2Vec2Config.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.Wav2Vec2Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.Wav2Vec2Config.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature encoder. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.Wav2Vec2Config.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature encoder.`,name:"feat_proj_dropout"},{anchor:"transformers.Wav2Vec2Config.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.Wav2Vec2Config.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for quantized feature encoder states.`,name:"feat_quantizer_dropout"},{anchor:"transformers.Wav2Vec2Config.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.Wav2Vec2Config.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.Wav2Vec2Config.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.Wav2Vec2Config.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.Wav2Vec2Config.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.Wav2Vec2Config.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.Wav2Vec2Config.do_stable_layer_norm",description:`<strong>do_stable_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to apply <em>stable</em> layer norm architecture of the Transformer encoder. <code>do_stable_layer_norm is True</code> corresponds to applying layer norm before the attention layer, whereas <code>do_stable_layer_norm is False</code> corresponds to applying layer norm after the attention layer.`,name:"do_stable_layer_norm"},{anchor:"transformers.Wav2Vec2Config.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature encoder. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.Wav2Vec2Config.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.Wav2Vec2Config.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.Wav2Vec2Config.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.Wav2Vec2Config.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.Wav2Vec2Config.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.Wav2Vec2Config.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.Wav2Vec2Config.num_codevectors_per_group",description:`<strong>num_codevectors_per_group</strong> (<code>int</code>, <em>optional</em>, defaults to 320) &#x2014; Number of entries in each quantization codebook (group).`,name:"num_codevectors_per_group"},{anchor:"transformers.Wav2Vec2Config.num_codevector_groups",description:`<strong>num_codevector_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Number of codevector groups for product codevector quantization.`,name:"num_codevector_groups"},{anchor:"transformers.Wav2Vec2Config.contrastive_logits_temperature",description:`<strong>contrastive_logits_temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The temperature <em>kappa</em> in the contrastive loss.`,name:"contrastive_logits_temperature"},{anchor:"transformers.Wav2Vec2Config.feat_quantizer_dropout",description:`<strong>feat_quantizer_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for the output of the feature encoder that&#x2019;s used by the quantizer.`,name:"feat_quantizer_dropout"},{anchor:"transformers.Wav2Vec2Config.num_negatives",description:`<strong>num_negatives</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Number of negative samples for the contrastive loss.`,name:"num_negatives"},{anchor:"transformers.Wav2Vec2Config.codevector_dim",description:`<strong>codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the quantized feature vectors.`,name:"codevector_dim"},{anchor:"transformers.Wav2Vec2Config.proj_codevector_dim",description:`<strong>proj_codevector_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the final projection of both the quantized and the transformer features.`,name:"proj_codevector_dim"},{anchor:"transformers.Wav2Vec2Config.diversity_loss_weight",description:`<strong>diversity_loss_weight</strong> (<code>int</code>, <em>optional</em>, defaults to 0.1) &#x2014; The weight of the codebook diversity loss component.`,name:"diversity_loss_weight"},{anchor:"transformers.Wav2Vec2Config.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.Wav2Vec2Config.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.Wav2Vec2Config.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification">Wav2Vec2ForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.Wav2Vec2Config.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"},{anchor:"transformers.Wav2Vec2Config.tdnn_dim",description:`<strong>tdnn_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 1500)</code>) &#x2014; A tuple of integers defining the number of output channels of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dim</em> defines the number of <em>TDNN</em> layers.`,name:"tdnn_dim"},{anchor:"transformers.Wav2Vec2Config.tdnn_kernel",description:`<strong>tdnn_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 3, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_kernel</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_kernel"},{anchor:"transformers.Wav2Vec2Config.tdnn_dilation",description:`<strong>tdnn_dilation</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(1, 2, 3, 1, 1)</code>) &#x2014; A tuple of integers defining the dilation factor of each 1D convolutional layer in <em>TDNN</em> module of the <em>XVector</em> model. The length of <em>tdnn_dilation</em> has to match the length of <em>tdnn_dim</em>.`,name:"tdnn_dilation"},{anchor:"transformers.Wav2Vec2Config.xvector_output_dim",description:`<strong>xvector_output_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the <em>XVector</em> embedding vectors.`,name:"xvector_output_dim"},{anchor:"transformers.Wav2Vec2Config.add_adapter",description:`<strong>add_adapter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for warm-starting Wav2Vec2 for SpeechEncoderDecoder models.`,name:"add_adapter"},{anchor:"transformers.Wav2Vec2Config.adapter_kernel_size",description:`<strong>adapter_kernel_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Kernel size of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_kernel_size"},{anchor:"transformers.Wav2Vec2Config.adapter_stride",description:`<strong>adapter_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; Stride of the convolutional layers in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"adapter_stride"},{anchor:"transformers.Wav2Vec2Config.num_adapter_layers",description:`<strong>num_adapter_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; Number of convolutional layers that should be used in the adapter network. Only relevant if <code>add_adapter is True</code>.`,name:"num_adapter_layers"},{anchor:"transformers.Wav2Vec2Config.output_hidden_size",description:`<strong>output_hidden_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Dimensionality of the encoder output layer. If not defined, this defaults to <em>hidden-size</em>. Only relevant if <code>add_adapter is True</code>.`,name:"output_hidden_size"}]}}),Ea=new ue({props:{code:`from transformers import Wav2Vec2Model, Wav2Vec2Config # Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration configuration = Wav2Vec2Config() # Initializing a model from the facebook/wav2vec2-base-960h style configuration model = Wav2Vec2Model(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Model, Wav2Vec2Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Wav2Vec2Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/wav2vec2-base-960h style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2Model(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),Pa=new ee({}),Ca=new V({props:{name:"class transformers.Wav2Vec2CTCTokenizer",anchor:"transformers.Wav2Vec2CTCTokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"word_delimiter_token",val:" = '|'"},{name:"replace_word_delimiter_char",val:" = ' '"},{name:"do_lower_case",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/tokenization_wav2vec2.py#L125",parametersDescription:[{anchor:"transformers.Wav2Vec2CTCTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.Wav2Vec2CTCTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sentence token.`,name:"bos_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sentence token.`,name:"eos_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.word_delimiter_token",description:`<strong>word_delimiter_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;|&quot;</code>) &#x2014; The token used for defining the end of a word.`,name:"word_delimiter_token"},{anchor:"transformers.Wav2Vec2CTCTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to accept lowercase input and lowercase the output when decoding.</p> <p>**kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>`,name:"do_lower_case"}]}}),Ma=new V({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2379",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),za=new V({props:{name:"save_vocabulary",anchor:"transformers.Wav2Vec2CTCTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/tokenization_wav2vec2.py#L591"}}),Aa=new V({props:{name:"decode",anchor:"transformers.Wav2Vec2CTCTokenizer.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"output_char_offsets",val:": bool = False"},{name:"output_word_offsets",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/tokenization_wav2vec2.py#L484",parametersDescription:[{anchor:"transformers.Wav2Vec2CTCTokenizer.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.Wav2Vec2CTCTokenizer.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.Wav2Vec2CTCTokenizer.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.Wav2Vec2CTCTokenizer.decode.output_char_offsets",description:`<strong>output_char_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Please take a look at the example of <code>decode</code> to better understand how to make use of <code>output_word_offsets</code>.</p> </div>`,name:"output_char_offsets"},{anchor:"transformers.Wav2Vec2CTCTokenizer.decode.output_word_offsets",description:`<strong>output_word_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Please take a look at the example of <code>decode</code> to better understand how to make use of <code>output_word_offsets</code>.</p> </div>`,name:"output_word_offsets"},{anchor:"transformers.Wav2Vec2CTCTokenizer.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The list of decoded sentences. Will be a <code>Wav2Vec2CTCTokenizerOutput</code>when <code>output_char_offsets == True</code> or <code>output_word_offsets == True</code>.</p> `}}),Da=new ue({props:{code:`# Let's see how to retrieve time steps for a model from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC from datasets import load_dataset import datasets import torch # import model, feature extractor, tokenizer model = AutoModelForCTC.from_pretrained("facebook/wav2vec2-base-960h") tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") # load first sample of English common_voice dataset = load_dataset("common_voice", "en", split="train", streaming=True) dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000)) dataset_iter = iter(dataset) sample = next(dataset_iter) # forward sample through model to get greedily predicted transcription ids input_values = feature_extractor(sample["audio"]["array"], return_tensors="pt").input_values logits = model(input_values).logits[0] pred_ids = torch.argmax(logits, axis=-1) # retrieve word stamps (analogous commands for \`output_char_offsets\`) outputs = tokenizer.decode(pred_ids, output_word_offsets=True) # compute \`time_offset\` in seconds as product of downsampling ratio and sampling_rate time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate word_offsets = [ { "word": d["word"], "start_time": round(d["start_offset"] * time_offset, 2), "end_time": round(d["end_offset"] * time_offset, 2), } for d in outputs.word_offsets ] # compare word offsets with audio \`common_voice_en_100038.mp3\` online on the dataset viewer: # https://huggingface.co/datasets/common_voice/viewer/en/train word_offsets[:3]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Let&#x27;s see how to retrieve time steps for a model</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> datasets <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># import model, feature extractor, tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load first sample of English common_voice</span> <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;common_voice&quot;</span>, <span class="hljs-string">&quot;en&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>, streaming=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, datasets.Audio(sampling_rate=<span class="hljs-number">16_000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset_iter = <span class="hljs-built_in">iter</span>(dataset) <span class="hljs-meta">&gt;&gt;&gt; </span>sample = <span class="hljs-built_in">next</span>(dataset_iter) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward sample through model to get greedily predicted transcription ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = feature_extractor(sample[<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_values).logits[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>pred_ids = torch.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve word stamps (analogous commands for \`output_char_offsets\`)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = tokenizer.decode(pred_ids, output_word_offsets=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute \`time_offset\` in seconds as product of downsampling ratio and sampling_rate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>word_offsets = [ <span class="hljs-meta">... </span> { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;word&quot;</span>: d[<span class="hljs-string">&quot;word&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&quot;start_time&quot;</span>: <span class="hljs-built_in">round</span>(d[<span class="hljs-string">&quot;start_offset&quot;</span>] * time_offset, <span class="hljs-number">2</span>), <span class="hljs-meta">... </span> <span class="hljs-string">&quot;end_time&quot;</span>: <span class="hljs-built_in">round</span>(d[<span class="hljs-string">&quot;end_offset&quot;</span>] * time_offset, <span class="hljs-number">2</span>), <span class="hljs-meta">... </span> } <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> outputs.word_offsets <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compare word offsets with audio \`common_voice_en_100038.mp3\` online on the dataset viewer:</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># https://huggingface.co/datasets/common_voice/viewer/en/train</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_offsets[:<span class="hljs-number">3</span>] [{<span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;WHY&#x27;</span>, <span class="hljs-string">&#x27;start_time&#x27;</span>: <span class="hljs-number">1.42</span>, <span class="hljs-string">&#x27;end_time&#x27;</span>: <span class="hljs-number">1.54</span>}, {<span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;DOES&#x27;</span>, <span class="hljs-string">&#x27;start_time&#x27;</span>: <span class="hljs-number">1.64</span>, <span class="hljs-string">&#x27;end_time&#x27;</span>: <span class="hljs-number">1.9</span>}, {<span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;MILISANDRA&#x27;</span>, <span class="hljs-string">&#x27;start_time&#x27;</span>: <span class="hljs-number">2.26</span>, <span class="hljs-string">&#x27;end_time&#x27;</span>: <span class="hljs-number">2.9</span>}]`}}),Oa=new V({props:{name:"batch_decode",anchor:"transformers.Wav2Vec2CTCTokenizer.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"output_char_offsets",val:": bool = False"},{name:"output_word_offsets",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/tokenization_wav2vec2.py#L414",parametersDescription:[{anchor:"transformers.Wav2Vec2CTCTokenizer.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.Wav2Vec2CTCTokenizer.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.Wav2Vec2CTCTokenizer.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.Wav2Vec2CTCTokenizer.batch_decode.output_char_offsets",description:`<strong>output_char_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Please take a look at the Example of <code>decode</code> to better understand how to make use of <code>output_word_offsets</code>. <code>batch_decode</code> works the same way with batched output.</p> </div>`,name:"output_char_offsets"},{anchor:"transformers.Wav2Vec2CTCTokenizer.batch_decode.output_word_offsets",description:`<strong>output_word_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Please take a look at the Example of <code>decode</code> to better understand how to make use of <code>output_word_offsets</code>. <code>batch_decode</code> works the same way with batched output.</p> </div>`,name:"output_word_offsets"},{anchor:"transformers.Wav2Vec2CTCTokenizer.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The list of decoded sentences. Will be a <code>Wav2Vec2CTCTokenizerOutput</code>when <code>output_char_offsets == True</code> or <code>output_word_offsets == True</code>.</p> `}}),Sa=new ee({}),Na=new V({props:{name:"class transformers.Wav2Vec2FeatureExtractor",anchor:"transformers.Wav2Vec2FeatureExtractor",parameters:[{name:"feature_size",val:" = 1"},{name:"sampling_rate",val:" = 16000"},{name:"padding_value",val:" = 0.0"},{name:"return_attention_mask",val:" = False"},{name:"do_normalize",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py#L32",parametersDescription:[{anchor:"transformers.Wav2Vec2FeatureExtractor.feature_size",description:`<strong>feature_size</strong> (<code>int</code>, defaults to 1) &#x2014; The feature dimension of the extracted features.`,name:"feature_size"},{anchor:"transformers.Wav2Vec2FeatureExtractor.sampling_rate",description:`<strong>sampling_rate</strong> (<code>int</code>, defaults to 16000) &#x2014; The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).`,name:"sampling_rate"},{anchor:"transformers.Wav2Vec2FeatureExtractor.padding_value",description:`<strong>padding_value</strong> (<code>float</code>, defaults to 0.0) &#x2014; The value that is used to fill the padding values.`,name:"padding_value"},{anchor:"transformers.Wav2Vec2FeatureExtractor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly improve the performance for some models, <em>e.g.</em>, <a href="https://huggingface.co/models?search=lv60" rel="nofollow">wav2vec2-lv60</a>.`,name:"do_normalize"},{anchor:"transformers.Wav2Vec2FeatureExtractor.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor.__call__"><strong>call</strong>()</a> should return <code>attention_mask</code>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;group&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, have <strong>not</strong> been trained using <code>attention_mask</code>. For such models, <code>input_values</code> should simply be padded with 0 and no <code>attention_mask</code> should be passed.</p> <p>For Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;layer&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self" rel="nofollow">wav2vec2-lv60</a>, <code>attention_mask</code> should be passed for batched inference.</p> </div>`,name:"return_attention_mask"}]}}),Ba=new V({props:{name:"__call__",anchor:"transformers.Wav2Vec2FeatureExtractor.__call__",parameters:[{name:"raw_speech",val:": typing.Union[numpy.ndarray, typing.List[float], typing.List[numpy.ndarray], typing.List[typing.List[float]]]"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"truncation",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"sampling_rate",val:": typing.Optional[int] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py#L103",parametersDescription:[{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.raw_speech",description:`<strong>raw_speech</strong> (<code>np.ndarray</code>, <code>List[float]</code>, <code>List[np.ndarray]</code>, <code>List[List[float]]</code>) &#x2014; The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values.`,name:"raw_speech"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>) &#x2014; Activates truncation to cut input sequences longer than <em>max_length</em> to <em>max_length</em>.`,name:"truncation"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.</p> </blockquote>`,name:"pad_to_multiple_of"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor&#x2019;s default.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;group&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, have <strong>not</strong> been trained using <code>attention_mask</code>. For such models, <code>input_values</code> should simply be padded with 0 and no <code>attention_mask</code> should be passed.</p> <p>For Wav2Vec2 models that have set <code>config.feat_extract_norm == &quot;layer&quot;</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self" rel="nofollow">wav2vec2-lv60</a>, <code>attention_mask</code> should be passed for batched inference.</p> </div>`,name:"return_attention_mask"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.sampling_rate",description:`<strong>sampling_rate</strong> (<code>int</code>, <em>optional</em>) &#x2014; The sampling rate at which the <code>raw_speech</code> input was sampled. It is strongly recommended to pass <code>sampling_rate</code> at the forward call to prevent silent errors.`,name:"sampling_rate"},{anchor:"transformers.Wav2Vec2FeatureExtractor.__call__.padding_value",description:"<strong>padding_value</strong> (<code>float</code>, defaults to 0.0) &#x2014;",name:"padding_value"}]}}),Ua=new ee({}),Ha=new V({props:{name:"class transformers.Wav2Vec2Processor",anchor:"transformers.Wav2Vec2Processor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/processing_wav2vec2.py#L26",parametersDescription:[{anchor:"transformers.Wav2Vec2Processor.feature_extractor",description:`<strong>feature_extractor</strong> (<code>Wav2Vec2FeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor">Wav2Vec2FeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.Wav2Vec2Processor.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. The tokenizer is a required input.`,name:"tokenizer"}]}}),Xa=new V({props:{name:"__call__",anchor:"transformers.Wav2Vec2Processor.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/processing_wav2vec2.py#L66"}}),Za=new V({props:{name:"pad",anchor:"transformers.Wav2Vec2Processor.pad",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/processing_wav2vec2.py#L75"}}),Ka=new V({props:{name:"from_pretrained",anchor:"transformers.Wav2Vec2Processor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/processing_wav2vec2.py#L47"}}),Qa=new V({props:{name:"save_pretrained",anchor:"transformers.ProcessorMixin.save_pretrained",parameters:[{name:"save_directory",val:""},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L95",parametersDescription:[{anchor:"transformers.ProcessorMixin.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.ProcessorMixin.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your processor to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}]}}),Lo=new fe({props:{$$slots:{default:[yT]},$$scope:{ctx:E}}}),es=new V({props:{name:"batch_decode",anchor:"transformers.Wav2Vec2Processor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/processing_wav2vec2.py#L84"}}),os=new V({props:{name:"decode",anchor:"transformers.Wav2Vec2Processor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/processing_wav2vec2.py#L91"}}),ss=new V({props:{name:"as_target_processor",anchor:"transformers.Wav2Vec2Processor.as_target_processor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/processing_wav2vec2.py#L98"}}),ns=new ee({}),rs=new V({props:{name:"class transformers.Wav2Vec2ProcessorWithLM",anchor:"transformers.Wav2Vec2ProcessorWithLM",parameters:[{name:"feature_extractor",val:": FeatureExtractionMixin"},{name:"tokenizer",val:": PreTrainedTokenizerBase"},{name:"decoder",val:": BeamSearchDecoderCTC"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L63",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor">Wav2Vec2FeatureExtractor</a>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor">Wav2Vec2FeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer">Wav2Vec2CTCTokenizer</a>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer">Wav2Vec2CTCTokenizer</a>. The tokenizer is a required input.`,name:"tokenizer"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decoder",description:`<strong>decoder</strong> (<code>pyctcdecode.BeamSearchDecoderCTC</code>) &#x2014; An instance of <code>pyctcdecode.BeamSearchDecoderCTC</code>. The decoder is a required input.`,name:"decoder"}]}}),is=new V({props:{name:"__call__",anchor:"transformers.Wav2Vec2ProcessorWithLM.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L209"}}),ds=new V({props:{name:"pad",anchor:"transformers.Wav2Vec2ProcessorWithLM.pad",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L219"}}),ps=new V({props:{name:"from_pretrained",anchor:"transformers.Wav2Vec2ProcessorWithLM.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L107",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor">SequenceFeatureExtractor</a> and <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a></li> </ul>`,name:"pretrained_model_name_or_path"}]}}),Uo=new fe({props:{$$slots:{default:[kT]},$$scope:{ctx:E}}}),hs=new V({props:{name:"save_pretrained",anchor:"transformers.Wav2Vec2ProcessorWithLM.save_pretrained",parameters:[{name:"save_directory",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L103"}}),fs=new V({props:{name:"batch_decode",anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode",parameters:[{name:"logits",val:": ndarray"},{name:"num_processes",val:": typing.Optional[int] = None"},{name:"beam_width",val:": typing.Optional[int] = None"},{name:"beam_prune_logp",val:": typing.Optional[float] = None"},{name:"token_min_logp",val:": typing.Optional[float] = None"},{name:"hotwords",val:": typing.Optional[typing.Iterable[str]] = None"},{name:"hotword_weight",val:": typing.Optional[float] = None"},{name:"alpha",val:": typing.Optional[float] = None"},{name:"beta",val:": typing.Optional[float] = None"},{name:"unk_score_offset",val:": typing.Optional[float] = None"},{name:"lm_score_boundary",val:": typing.Optional[bool] = None"},{name:"output_word_offsets",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L229",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.logits",description:`<strong>logits</strong> (<code>np.ndarray</code>) &#x2014; The logits output vector of the model representing the log probabilities for each token.`,name:"logits"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.num_processes",description:`<strong>num_processes</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of processes on which the function should be parallelized over. Defaults to the number of available CPUs.`,name:"num_processes"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.beam_width",description:`<strong>beam_width</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum number of beams at each step in decoding. Defaults to pyctcdecode&#x2019;s DEFAULT_BEAM_WIDTH.`,name:"beam_width"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.beam_prune_logp",description:`<strong>beam_prune_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; Beams that are much worse than best beam will be pruned Defaults to pyctcdecode&#x2019;s DEFAULT_PRUNE_LOGP.`,name:"beam_prune_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.token_min_logp",description:`<strong>token_min_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; Tokens below this logp are skipped unless they are argmax of frame Defaults to pyctcdecode&#x2019;s DEFAULT_MIN_TOKEN_LOGP.`,name:"token_min_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.hotwords",description:`<strong>hotwords</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of words with extra importance, can be OOV for LM`,name:"hotwords"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.hotword_weight",description:`<strong>hotword_weight</strong> (<code>int</code>, <em>optional</em>) &#x2014; Weight factor for hotword importance Defaults to pyctcdecode&#x2019;s DEFAULT_HOTWORD_WEIGHT.`,name:"hotword_weight"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.alpha",description:`<strong>alpha</strong> (<code>float</code>, <em>optional</em>) &#x2014; Weight for language model during shallow fusion`,name:"alpha"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.beta",description:`<strong>beta</strong> (<code>float</code>, <em>optional</em>) &#x2014; Weight for length score adjustment of during scoring`,name:"beta"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.unk_score_offset",description:`<strong>unk_score_offset</strong> (<code>float</code>, <em>optional</em>) &#x2014; Amount of log score offset for unknown tokens`,name:"unk_score_offset"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.lm_score_boundary",description:`<strong>lm_score_boundary</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to have kenlm respect boundaries when scoring`,name:"lm_score_boundary"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.batch_decode.output_word_offsets",description:`<strong>output_word_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Please take a look at the Example of <code>decode</code> to better understand how to make use of <code>output_word_offsets</code>. <code>batch_decode</code> works the same way with batched output.</p> </div>`,name:"output_word_offsets"}],returnDescription:` <p><code>Wav2Vec2DecoderWithLMOutput</code> or <code>tuple</code>.</p> `}}),Ho=new fe({props:{$$slots:{default:[TT]},$$scope:{ctx:E}}}),us=new V({props:{name:"decode",anchor:"transformers.Wav2Vec2ProcessorWithLM.decode",parameters:[{name:"logits",val:": ndarray"},{name:"beam_width",val:": typing.Optional[int] = None"},{name:"beam_prune_logp",val:": typing.Optional[float] = None"},{name:"token_min_logp",val:": typing.Optional[float] = None"},{name:"hotwords",val:": typing.Optional[typing.Iterable[str]] = None"},{name:"hotword_weight",val:": typing.Optional[float] = None"},{name:"alpha",val:": typing.Optional[float] = None"},{name:"beta",val:": typing.Optional[float] = None"},{name:"unk_score_offset",val:": typing.Optional[float] = None"},{name:"lm_score_boundary",val:": typing.Optional[bool] = None"},{name:"output_word_offsets",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L346",parametersDescription:[{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.logits",description:`<strong>logits</strong> (<code>np.ndarray</code>) &#x2014; The logits output vector of the model representing the log probabilities for each token.`,name:"logits"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.beam_width",description:`<strong>beam_width</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum number of beams at each step in decoding. Defaults to pyctcdecode&#x2019;s DEFAULT_BEAM_WIDTH.`,name:"beam_width"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.beam_prune_logp",description:`<strong>beam_prune_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; A threshold to prune beams with log-probs less than best_beam_logp + beam_prune_logp. The value should be &lt;= 0. Defaults to pyctcdecode&#x2019;s DEFAULT_PRUNE_LOGP.`,name:"beam_prune_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.token_min_logp",description:`<strong>token_min_logp</strong> (<code>int</code>, <em>optional</em>) &#x2014; Tokens with log-probs below token_min_logp are skipped unless they are have the maximum log-prob for an utterance. Defaults to pyctcdecode&#x2019;s DEFAULT_MIN_TOKEN_LOGP.`,name:"token_min_logp"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.hotwords",description:`<strong>hotwords</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of words with extra importance which can be missing from the LM&#x2019;s vocabulary, e.g. [&#x201C;huggingface&#x201D;]`,name:"hotwords"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.hotword_weight",description:`<strong>hotword_weight</strong> (<code>int</code>, <em>optional</em>) &#x2014; Weight multiplier that boosts hotword scores. Defaults to pyctcdecode&#x2019;s DEFAULT_HOTWORD_WEIGHT.`,name:"hotword_weight"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.alpha",description:`<strong>alpha</strong> (<code>float</code>, <em>optional</em>) &#x2014; Weight for language model during shallow fusion`,name:"alpha"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.beta",description:`<strong>beta</strong> (<code>float</code>, <em>optional</em>) &#x2014; Weight for length score adjustment of during scoring`,name:"beta"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.unk_score_offset",description:`<strong>unk_score_offset</strong> (<code>float</code>, <em>optional</em>) &#x2014; Amount of log score offset for unknown tokens`,name:"unk_score_offset"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.lm_score_boundary",description:`<strong>lm_score_boundary</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to have kenlm respect boundaries when scoring`,name:"lm_score_boundary"},{anchor:"transformers.Wav2Vec2ProcessorWithLM.decode.output_word_offsets",description:`<strong>output_word_offsets</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>Please take a look at the example of <code>decode</code> to better understand how to make use of <code>output_word_offsets</code>.</p> </div>`,name:"output_word_offsets"}],returnDescription:` <p><code>Wav2Vec2DecoderWithLMOutput</code> or <code>tuple</code>.</p> `}}),gs=new ue({props:{code:`# Let's see how to retrieve time steps for a model from transformers import AutoTokenizer, AutoProcessor, AutoModelForCTC from datasets import load_dataset import datasets import torch # import model, feature extractor, tokenizer model = AutoModelForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") # load first sample of English common_voice dataset = load_dataset("common_voice", "en", split="train", streaming=True) dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000)) dataset_iter = iter(dataset) sample = next(dataset_iter) # forward sample through model to get greedily predicted transcription ids input_values = processor(sample["audio"]["array"], return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values).logits[0].cpu().numpy() # retrieve word stamps (analogous commands for \`output_char_offsets\`) outputs = processor.decode(logits, output_word_offsets=True) # compute \`time_offset\` in seconds as product of downsampling ratio and sampling_rate time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate word_offsets = [ { "word": d["word"], "start_time": round(d["start_offset"] * time_offset, 2), "end_time": round(d["end_offset"] * time_offset, 2), } for d in outputs.word_offsets ] # compare word offsets with audio \`common_voice_en_100038.mp3\` online on the dataset viewer: # https://huggingface.co/datasets/common_voice/viewer/en/train word_offsets[:4]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Let&#x27;s see how to retrieve time steps for a model</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoProcessor, AutoModelForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> datasets <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># import model, feature extractor, tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base-100h-with-lm&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base-100h-with-lm&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load first sample of English common_voice</span> <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;common_voice&quot;</span>, <span class="hljs-string">&quot;en&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>, streaming=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, datasets.Audio(sampling_rate=<span class="hljs-number">16_000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset_iter = <span class="hljs-built_in">iter</span>(dataset) <span class="hljs-meta">&gt;&gt;&gt; </span>sample = <span class="hljs-built_in">next</span>(dataset_iter) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward sample through model to get greedily predicted transcription ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(sample[<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(input_values).logits[<span class="hljs-number">0</span>].cpu().numpy() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve word stamps (analogous commands for \`output_char_offsets\`)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = processor.decode(logits, output_word_offsets=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute \`time_offset\` in seconds as product of downsampling ratio and sampling_rate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>word_offsets = [ <span class="hljs-meta">... </span> { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;word&quot;</span>: d[<span class="hljs-string">&quot;word&quot;</span>], <span class="hljs-meta">... </span> <span class="hljs-string">&quot;start_time&quot;</span>: <span class="hljs-built_in">round</span>(d[<span class="hljs-string">&quot;start_offset&quot;</span>] * time_offset, <span class="hljs-number">2</span>), <span class="hljs-meta">... </span> <span class="hljs-string">&quot;end_time&quot;</span>: <span class="hljs-built_in">round</span>(d[<span class="hljs-string">&quot;end_offset&quot;</span>] * time_offset, <span class="hljs-number">2</span>), <span class="hljs-meta">... </span> } <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> outputs.word_offsets <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compare word offsets with audio \`common_voice_en_100038.mp3\` online on the dataset viewer:</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># https://huggingface.co/datasets/common_voice/viewer/en/train</span> <span class="hljs-meta">&gt;&gt;&gt; </span>word_offsets[:<span class="hljs-number">4</span>] [{<span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;WHY&#x27;</span>, <span class="hljs-string">&#x27;start_time&#x27;</span>: <span class="hljs-number">1.42</span>, <span class="hljs-string">&#x27;end_time&#x27;</span>: <span class="hljs-number">1.54</span>}, {<span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;DOES&#x27;</span>, <span class="hljs-string">&#x27;start_time&#x27;</span>: <span class="hljs-number">1.64</span>, <span class="hljs-string">&#x27;end_time&#x27;</span>: <span class="hljs-number">1.88</span>}, {<span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;A&#x27;</span>, <span class="hljs-string">&#x27;start_time&#x27;</span>: <span class="hljs-number">2.12</span>, <span class="hljs-string">&#x27;end_time&#x27;</span>: <span class="hljs-number">2.14</span>}, {<span class="hljs-string">&#x27;word&#x27;</span>: <span class="hljs-string">&#x27;MILE&#x27;</span>, <span class="hljs-string">&#x27;start_time&#x27;</span>: <span class="hljs-number">2.26</span>, <span class="hljs-string">&#x27;end_time&#x27;</span>: <span class="hljs-number">2.46</span>}]`}}),_s=new V({props:{name:"as_target_processor",anchor:"transformers.Wav2Vec2ProcessorWithLM.as_target_processor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L486"}}),vs=new ee({}),ws=new V({props:{name:"class transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput",anchor:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput",parameters:[{name:"text",val:": typing.Union[typing.List[str], str]"},{name:"logit_score",val:": typing.Union[typing.List[float], float] = None"},{name:"lm_score",val:": typing.Union[typing.List[float], float] = None"},{name:"word_offsets",val:": typing.Union[typing.List[typing.List[typing.Dict[str, typing.Union[int, str]]]], typing.List[typing.Dict[str, typing.Union[int, str]]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py#L41",parametersDescription:[{anchor:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput.text",description:`<strong>text</strong> (list of <code>str</code> or <code>str</code>) &#x2014; Decoded logits in text from. Usually the speech transcription.`,name:"text"},{anchor:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput.logit_score",description:`<strong>logit_score</strong> (list of <code>float</code> or <code>float</code>) &#x2014; Total logit score of the beam associated with produced text.`,name:"logit_score"},{anchor:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput.lm_score",description:`<strong>lm_score</strong> (list of <code>float</code>) &#x2014; Fused lm_score of the beam associated with produced text.`,name:"lm_score"},{anchor:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput.word_offsets",description:`<strong>word_offsets</strong> (list of <code>List[Dict[str, Union[int, str]]]</code> or <code>List[Dict[str, Union[int, str]]]</code>) &#x2014; Offsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets can be used to compute time stamps for each word.`,name:"word_offsets"}]}}),ys=new V({props:{name:"class transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput",anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"extract_features",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L92",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model.`,name:"extract_features"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Ts=new V({props:{name:"class transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput",anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"projected_states",val:": FloatTensor = None"},{name:"projected_quantized_states",val:": FloatTensor = None"},{name:"codevector_perplexity",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"contrastive_loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"diversity_loss",val:": typing.Optional[torch.FloatTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L121",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> . (classification) loss.`,name:"loss"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.projected_states",description:`<strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.`,name:"projected_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.projected_quantized_states",description:`<strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.`,name:"projected_quantized_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.contrastive_loss",description:`<strong>contrastive_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; The contrastive loss (L_m) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> .`,name:"contrastive_loss"},{anchor:"transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput.diversity_loss",description:`<strong>diversity_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) &#x2014; The diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> .`,name:"diversity_loss"}]}}),Ws=new V({props:{name:"class transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput",anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"extract_features",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L46",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, last_conv_dim)</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model with <code>last_conv_dim</code> being the dimension of the last convolutional layer.`,name:"extract_features"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),js=new V({props:{name:"replace",anchor:"None",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120"}}),Vs=new V({props:{name:"class transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput",anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput",parameters:[{name:"projected_states",val:": ndarray = None"},{name:"projected_quantized_states",val:": ndarray = None"},{name:"codevector_perplexity",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L76",parametersDescription:[{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.loss",description:`<strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>jnp.ndarray</code> of shape <code>(1,)</code>) &#x2014; Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow">official paper</a> . (classification) loss.`,name:"loss"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.projected_states",description:`<strong>projected_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.`,name:"projected_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.projected_quantized_states",description:`<strong>projected_quantized_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) &#x2014; Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.`,name:"projected_quantized_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}]}}),Es=new V({props:{name:"replace",anchor:"None",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120"}}),Ps=new ee({}),Cs=new V({props:{name:"class transformers.Wav2Vec2Model",anchor:"transformers.Wav2Vec2Model",parameters:[{name:"config",val:": Wav2Vec2Config"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1237",parametersDescription:[{anchor:"transformers.Wav2Vec2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Ds=new V({props:{name:"forward",anchor:"transformers.Wav2Vec2Model.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1323",parametersDescription:[{anchor:"transformers.Wav2Vec2Model.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ko=new fe({props:{$$slots:{default:[xT]},$$scope:{ctx:E}}}),Os=new ue({props:{code:`from transformers import Wav2Vec2Processor, Wav2Vec2Model import torch from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state list(last_hidden_states.shape)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Wav2Vec2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2Model.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">292</span>, <span class="hljs-number">768</span>]`}}),Ss=new ee({}),Ns=new V({props:{name:"class transformers.Wav2Vec2ForCTC",anchor:"transformers.Wav2Vec2ForCTC",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1647",parametersDescription:[{anchor:"transformers.Wav2Vec2ForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),Rs=new V({props:{name:"forward",anchor:"transformers.Wav2Vec2ForCTC.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1688",parametersDescription:[{anchor:"transformers.Wav2Vec2ForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Yo=new fe({props:{$$slots:{default:[WT]},$$scope:{ctx:E}}}),Xs=new ue({props:{code:`from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) transcription[0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, Wav2Vec2ForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription[<span class="hljs-number">0</span>] <span class="hljs-string">&#x27;MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL&#x27;</span>`}}),Gs=new ue({props:{code:`with processor.as_target_processor(): inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids # compute loss loss = model(**inputs).loss round(loss.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">53.48</span>`}}),Js=new ee({}),Zs=new V({props:{name:"class transformers.Wav2Vec2ForSequenceClassification",anchor:"transformers.Wav2Vec2ForSequenceClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1777",parametersDescription:[{anchor:"transformers.Wav2Vec2ForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),on=new V({props:{name:"forward",anchor:"transformers.Wav2Vec2ForSequenceClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1822",parametersDescription:[{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ta=new fe({props:{$$slots:{default:[$T]},$$scope:{ctx:E}}}),an=new ue({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks") # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_ids = torch.argmax(logits, dim=-1).item() predicted_label = model.config.id2label[predicted_class_ids] predicted_label`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;superb/wav2vec2-base-superb-ks&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;superb/wav2vec2-base-superb-ks&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label <span class="hljs-string">&#x27;_unknown_&#x27;</span>`}}),sn=new ue({props:{code:`# compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss round(loss.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">6.54</span>`}}),nn=new ee({}),rn=new V({props:{name:"class transformers.Wav2Vec2ForAudioFrameClassification",anchor:"transformers.Wav2Vec2ForAudioFrameClassification",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1900",parametersDescription:[{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),hn=new V({props:{name:"forward",anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1943",parametersDescription:[{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForAudioFrameClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),aa=new fe({props:{$$slots:{default:[jT]},$$scope:{ctx:E}}}),fn=new ue({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sd") model = Wav2Vec2ForAudioFrameClassification.from_pretrained("anton-l/wav2vec2-base-superb-sd") # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) with torch.no_grad(): logits = model(**inputs).logits probabilities = torch.sigmoid(logits[0]) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (probabilities > 0.5).long() labels[0].tolist()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;anton-l/wav2vec2-base-superb-sd&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForAudioFrameClassification.from_pretrained(<span class="hljs-string">&quot;anton-l/wav2vec2-base-superb-sd&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, sampling_rate=sampling_rate) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>probabilities = torch.sigmoid(logits[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># labels is a one-hot array of shape (num_frames, num_speakers)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = (probabilities &gt; <span class="hljs-number">0.5</span>).long() <span class="hljs-meta">&gt;&gt;&gt; </span>labels[<span class="hljs-number">0</span>].tolist() [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>]`}}),un=new ee({}),gn=new V({props:{name:"class transformers.Wav2Vec2ForXVector",anchor:"transformers.Wav2Vec2ForXVector",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L2055",parametersDescription:[{anchor:"transformers.Wav2Vec2ForXVector.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),kn=new V({props:{name:"forward",anchor:"transformers.Wav2Vec2ForXVector.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L2117",parametersDescription:[{anchor:"transformers.Wav2Vec2ForXVector.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForXVector.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForXVector.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForXVector.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForXVector.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForXVector.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],returnDescription:` <p>A <code>transformers.models.wav2vec2.modeling_wav2vec2.XVectorOutput</code>or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Classification hidden states before AMSoftmax.</p> </li> <li> <p><strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) \u2014 Utterance embeddings used for vector similarity-based retrieval.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.models.wav2vec2.modeling_wav2vec2.XVectorOutput</code>or <code>tuple(torch.FloatTensor)</code></p> `}}),na=new fe({props:{$$slots:{default:[VT]},$$scope:{ctx:E}}}),Tn=new ue({props:{code:`from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForXVector from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sv") model = Wav2Vec2ForXVector.from_pretrained("anton-l/wav2vec2-base-superb-sv") # audio file is decoded on the fly inputs = feature_extractor( [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ) with torch.no_grad(): embeddings = model(**inputs).embeddings embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() # the resulting embeddings can be used for cosine similarity-based retrieval cosine_sim = torch.nn.CosineSimilarity(dim=-1) similarity = cosine_sim(embeddings[0], embeddings[1]) threshold = 0.7 # the optimal threshold is dataset-dependent if similarity < threshold: print("Speakers are not the same!") round(similarity.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForXVector <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;anton-l/wav2vec2-base-superb-sv&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForXVector.from_pretrained(<span class="hljs-string">&quot;anton-l/wav2vec2-base-superb-sv&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor( <span class="hljs-meta">... </span> [d[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> dataset[:<span class="hljs-number">2</span>][<span class="hljs-string">&quot;audio&quot;</span>]], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> embeddings = model(**inputs).embeddings <span class="hljs-meta">&gt;&gt;&gt; </span>embeddings = torch.nn.functional.normalize(embeddings, dim=-<span class="hljs-number">1</span>).cpu() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the resulting embeddings can be used for cosine similarity-based retrieval</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.nn.CosineSimilarity(dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>similarity = cosine_sim(embeddings[<span class="hljs-number">0</span>], embeddings[<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>threshold = <span class="hljs-number">0.7</span> <span class="hljs-comment"># the optimal threshold is dataset-dependent</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">if</span> similarity &lt; threshold: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Speakers are not the same!&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(similarity.item(), <span class="hljs-number">2</span>) <span class="hljs-number">0.98</span>`}}),xn=new ee({}),Wn=new V({props:{name:"class transformers.Wav2Vec2ForPreTraining",anchor:"transformers.Wav2Vec2ForPreTraining",parameters:[{name:"config",val:": Wav2Vec2Config"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1386",parametersDescription:[{anchor:"transformers.Wav2Vec2ForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),En=new V({props:{name:"forward",anchor:"transformers.Wav2Vec2ForPreTraining.forward",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"sampled_negative_indices",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L1447",parametersDescription:[{anchor:"transformers.Wav2Vec2ForPreTraining.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.Wav2Vec2ForPreTraining.forward.sampled_negative_indices",description:`<strong>sampled_negative_indices</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, sequence_length, num_negatives)</code>, <em>optional</em>) &#x2014; Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss. Required input for pre-training.`,name:"sampled_negative_indices"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> . (classification) loss.</p> </li> <li> <p><strong>projected_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.</p> </li> <li> <p><strong>projected_quantized_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>contrastive_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 The contrastive loss (L_m) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> .</p> </li> <li> <p><strong>diversity_loss</strong> (<em>optional</em>, returned when <code>sample_negative_indices</code> are passed, <code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 The diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> .</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ia=new fe({props:{$$slots:{default:[FT]},$$scope:{ctx:E}}}),Pn=new ue({props:{code:`import torch from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices from datasets import load_dataset import soundfile as sf feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("patrickvonplaten/wav2vec2-base") model = Wav2Vec2ForPreTraining.from_pretrained("patrickvonplaten/wav2vec2-base") ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values # Batch size 1 # compute masked indices batch_size, raw_sequence_length = input_values.shape sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long) with torch.no_grad(): outputs = model(input_values, mask_time_indices=mask_time_indices) # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # show that cosine similarity is much higher than random cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5 # for contrastive loss training model should be put into train mode model = model.train() loss = model(input_values, mask_time_indices=mask_time_indices).loss`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.wav2vec2.modeling_wav2vec2 <span class="hljs-keyword">import</span> _compute_mask_indices <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = Wav2Vec2ForPreTraining.from_pretrained(<span class="hljs-string">&quot;patrickvonplaten/wav2vec2-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = feature_extractor(ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute masked indices</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size, raw_sequence_length = input_values.shape <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=<span class="hljs-number">0.2</span>, mask_length=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(input_values, mask_time_indices=mask_time_indices) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># show that cosine similarity is much higher than random</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim[mask_time_indices.to(torch.<span class="hljs-built_in">bool</span>)].mean() &gt; <span class="hljs-number">0.5</span> tensor(<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># for contrastive loss training model should be put into train mode</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = model.train() <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, mask_time_indices=mask_time_indices).loss`}}),Cn=new ee({}),qn=new V({props:{name:"class transformers.TFWav2Vec2Model",anchor:"transformers.TFWav2Vec2Model",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1428",parametersDescription:[{anchor:"transformers.TFWav2Vec2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ca=new fe({props:{$$slots:{default:[ET]},$$scope:{ctx:E}}}),Ln=new V({props:{name:"call",anchor:"transformers.TFWav2Vec2Model.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1434",parametersDescription:[{anchor:"transformers.TFWav2Vec2Model.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFWav2Vec2Model.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFWav2Vec2Model.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFWav2Vec2Model.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFWav2Vec2Model.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFWav2Vec2Model.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFWav2Vec2Model.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFWav2Vec2Model.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFWav2Vec2Model.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFWav2Vec2Model.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),da=new fe({props:{$$slots:{default:[PT]},$$scope:{ctx:E}}}),Dn=new ue({props:{code:`from transformers import Wav2Vec2Processor, TFWav2Vec2Model from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFWav2Vec2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFWav2Vec2Model.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),On=new ee({}),Sn=new V({props:{name:"class transformers.TFWav2Vec2ForCTC",anchor:"transformers.TFWav2Vec2ForCTC",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1531",parametersDescription:[{anchor:"transformers.TFWav2Vec2ForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}]}}),ma=new fe({props:{$$slots:{default:[CT]},$$scope:{ctx:E}}}),Hn=new V({props:{name:"call",anchor:"transformers.TFWav2Vec2ForCTC.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L1558",parametersDescription:[{anchor:"transformers.TFWav2Vec2ForCTC.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFWav2Vec2ForCTC.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFWav2Vec2ForCTC.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFWav2Vec2ForCTC.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFWav2Vec2ForCTC.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFWav2Vec2ForCTC.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFWav2Vec2ForCTC.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFWav2Vec2ForCTC.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFWav2Vec2ForCTC.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFWav2Vec2ForCTC.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFWav2Vec2ForCTC.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_values</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config" >Wav2Vec2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),ha=new fe({props:{$$slots:{default:[qT]},$$scope:{ctx:E}}}),Rn=new ue({props:{code:`import tensorflow as tf from transformers import Wav2Vec2Processor, TFWav2Vec2ForCTC from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") model = TFWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) transcription = processor.decode(predicted_ids[0]) # compute loss target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" # wrap processor as target processor to encode labels with processor.as_target_processor(): labels = processor(transcription, return_tensors="tf").input_ids loss = model(input_values, labels=labels).loss`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFWav2Vec2ForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFWav2Vec2ForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_values).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = tf.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.decode(predicted_ids[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_transcription = <span class="hljs-string">&quot;A MAN SAID TO THE UNIVERSE SIR I EXIST&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># wrap processor as target processor to encode labels</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> labels = processor(transcription, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, labels=labels).loss`}}),Xn=new ee({}),Gn=new V({props:{name:"class transformers.FlaxWav2Vec2Model",anchor:"transformers.FlaxWav2Vec2Model",parameters:[{name:"config",val:": Wav2Vec2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1024)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L1045",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxWav2Vec2Model.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),sr=new V({props:{name:"__call__",anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"freeze_feature_encoder",val:": bool = False"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L869",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.input_values",description:`<strong>input_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>jnp.ndarray</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a> .. warning:: <code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.`,name:"attention_mask"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.wav2vec2.configuration_wav2vec2.Wav2Vec2Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>extract_features</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, last_conv_dim)</code>) \u2014 Sequence of extracted feature vectors of the last convolutional layer of the model with <code>last_conv_dim</code> being the dimension of the last convolutional layer.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ua=new fe({props:{$$slots:{default:[MT]},$$scope:{ctx:E}}}),nr=new ue({props:{code:`from transformers import Wav2Vec2Processor, FlaxWav2Vec2Model from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-lv60") model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor( ds["speech"][0], sampling_rate=16_000, return_tensors="np" ).input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, FlaxWav2Vec2Model <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxWav2Vec2Model.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor( <span class="hljs-meta">... </span> ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),rr=new ee({}),ir=new V({props:{name:"class transformers.FlaxWav2Vec2ForCTC",anchor:"transformers.FlaxWav2Vec2ForCTC",parameters:[{name:"config",val:": Wav2Vec2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1024)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L1163",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2ForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxWav2Vec2ForCTC.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),gr=new V({props:{name:"__call__",anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"freeze_feature_encoder",val:": bool = False"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L869",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.input_values",description:`<strong>input_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>jnp.ndarray</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a> .. warning:: <code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.`,name:"attention_mask"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxWav2Vec2PreTrainedModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.wav2vec2.configuration_wav2vec2.Wav2Vec2Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_flax_outputs.FlaxMaskedLMOutput" >transformers.modeling_flax_outputs.FlaxMaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),_a=new fe({props:{$$slots:{default:[zT]},$$scope:{ctx:E}}}),_r=new ue({props:{code:`import jax.numpy as jnp from transformers import Wav2Vec2Processor, FlaxWav2Vec2ForCTC from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60") model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor( ds["speech"][0], sampling_rate=16_000, return_tensors="np" ).input_values # Batch size 1 logits = model(input_values).logits predicted_ids = jnp.argmax(logits, axis=-1) transcription = processor.decode(predicted_ids[0]) # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST"`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, FlaxWav2Vec2ForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-960h-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxWav2Vec2ForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-960h-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor( <span class="hljs-meta">... </span> ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_values).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = jnp.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.decode(predicted_ids[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># should give: &quot;A MAN SAID TO THE UNIVERSE SIR I EXIST&quot;</span>`}}),vr=new ee({}),wr=new V({props:{name:"class transformers.FlaxWav2Vec2ForPreTraining",anchor:"transformers.FlaxWav2Vec2ForPreTraining",parameters:[{name:"config",val:": Wav2Vec2Config"},{name:"input_shape",val:": typing.Tuple = (1, 1024)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L1301",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2ForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Config">Wav2Vec2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}]}}),Vr=new V({props:{name:"__call__",anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__",parameters:[{name:"input_values",val:""},{name:"attention_mask",val:" = None"},{name:"mask_time_indices",val:" = None"},{name:"gumbel_temperature",val:": int = 1"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"gumbel_rng",val:": PRNGKey = None"},{name:"train",val:": bool = False"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"freeze_feature_encoder",val:": bool = False"},{name:"return_dict",val:": typing.Optional[bool] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py#L1304",parametersDescription:[{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.input_values",description:`<strong>input_values</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>jnp.ndarray</em>. See <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a> .. warning:: <code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/wav2vec2-base-960h" rel="nofollow">wav2vec2-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.`,name:"attention_mask"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.mask_time_indices",description:`<strong>mask_time_indices</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in <em>config.proj_codevector_dim</em> space.`,name:"mask_time_indices"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxWav2Vec2ForPreTraining.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.wav2vec2.configuration_wav2vec2.Wav2Vec2Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<em>optional</em>, returned when model is in train mode, <code>jnp.ndarray</code> of shape <code>(1,)</code>) \u2014 Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the <a href="https://arxiv.org/pdf/2006.11477.pdf" rel="nofollow" >official paper</a> . (classification) loss.</p> </li> <li> <p><strong>projected_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Hidden-states of the model projected to <em>config.proj_codevector_dim</em> that can be used to predict the masked projected quantized states.</p> </li> <li> <p><strong>projected_quantized_states</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.proj_codevector_dim)</code>) \u2014 Quantized extracted feature vectors projected to <em>config.proj_codevector_dim</em> representing the positive target vectors for contrastive loss.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput" >transformers.models.wav2vec2.modeling_flax_wav2vec2.FlaxWav2Vec2ForPreTrainingOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),wa=new fe({props:{$$slots:{default:[AT]},$$scope:{ctx:E}}}),Fr=new ue({props:{code:`import optax import numpy as np import jax.numpy as jnp from transformers import Wav2Vec2FeatureExtractor, FlaxWav2Vec2ForPreTraining from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices from datasets import load_dataset import soundfile as sf feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60") model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1 # compute masked indices batch_size, raw_sequence_length = input_values.shape sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) outputs = model(input_values, mask_time_indices=mask_time_indices) # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states) # show that cosine similarity is much higher than random assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> optax <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, FlaxWav2Vec2ForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.wav2vec2.modeling_flax_wav2vec2 <span class="hljs-keyword">import</span> _compute_mask_indices <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxWav2Vec2ForPreTraining.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-large-lv60&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = feature_extractor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute masked indices</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size, raw_sequence_length = input_values.shape <span class="hljs-meta">&gt;&gt;&gt; </span>sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) <span class="hljs-meta">&gt;&gt;&gt; </span>mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=<span class="hljs-number">0.2</span>, mask_length=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_values, mask_time_indices=mask_time_indices) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># show that cosine similarity is much higher than random</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> np.asarray(cosine_sim)[mask_time_indices].mean() &gt; <span class="hljs-number">0.5</span>`}}),{c(){p=a("meta"),x=l(),f=a("h1"),T=a("a"),W=a("span"),u(k.$$.fragment),y=l(),j=a("span"),O=r("Wav2Vec2"),q=l(),F=a("h2"),A=a("a"),L=a("span"),u(X.$$.fragment),S=l(),C=a("span"),Ve=r("Overview"),ge=l(),B=a("p"),H=r("The Wav2Vec2 model was proposed in "),_e=a("a"),Ge=r("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),M=r(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),I=l(),Fe=a("p"),ve=r("The abstract from the paper is the following:"),Ye=l(),Ee=a("p"),oe=a("em"),ct=r(`We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.`),et=l(),D=a("p"),dt=r("Tips:"),de=l(),pe=a("ul"),Je=a("li"),we=r("Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),pt=l(),be=a("li"),ae=r(`Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),Pe=a("a"),ye=r("Wav2Vec2CTCTokenizer"),mt=r("."),$=l(),P=a("p"),Ze=r("This model was contributed by "),Ke=a("a"),Bt=r("patrickvonplaten"),se=r("."),xt=l(),ke=a("h2"),Ce=a("a"),G=a("span"),u(J.$$.fragment),Ut=l(),ht=a("span"),Te=r("Wav2Vec2Config"),Wt=l(),U=a("div"),u(xe.$$.fragment),Ht=l(),Rt=a("p"),lm=r("This is the configuration class to store the configuration of a "),Mr=a("a"),cm=r("Wav2Vec2Model"),dm=r(`. It is used to instantiate an Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2 `),Fa=a("a"),pm=r("facebook/wav2vec2-base-960h"),mm=r(" architecture."),hm=l(),Xt=a("p"),fm=r("Configuration objects inherit from "),zr=a("a"),um=r("PretrainedConfig"),gm=r(` and can be used to control the model outputs. Read the documentation from `),Ar=a("a"),_m=r("PretrainedConfig"),vm=r(" for more information."),wm=l(),Xi=a("p"),bm=r("Example:"),ym=l(),u(Ea.$$.fragment),sd=l(),Gt=a("h2"),Fo=a("a"),Gi=a("span"),u(Pa.$$.fragment),km=l(),Ji=a("span"),Tm=r("Wav2Vec2CTCTokenizer"),nd=l(),Z=a("div"),u(Ca.$$.fragment),xm=l(),Zi=a("p"),Wm=r("Constructs a Wav2Vec2CTC tokenizer."),$m=l(),qa=a("p"),jm=r("This tokenizer inherits from "),Lr=a("a"),Vm=r("PreTrainedTokenizer"),Fm=r(` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),Em=l(),Eo=a("div"),u(Ma.$$.fragment),Pm=l(),Ki=a("p"),Cm=r(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),qm=l(),Dr=a("div"),u(za.$$.fragment),Mm=l(),qe=a("div"),u(Aa.$$.fragment),zm=l(),Qi=a("p"),Am=r(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Lm=l(),La=a("p"),Dm=r("Similar to doing "),Yi=a("code"),Om=r("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Sm=r("."),Nm=l(),el=a("p"),Im=r("Example:"),Bm=l(),u(Da.$$.fragment),Um=l(),Po=a("div"),u(Oa.$$.fragment),Hm=l(),tl=a("p"),Rm=r("Convert a list of lists of token ids into a list of strings by calling decode."),rd=l(),Jt=a("h2"),Co=a("a"),ol=a("span"),u(Sa.$$.fragment),Xm=l(),al=a("span"),Gm=r("Wav2Vec2FeatureExtractor"),id=l(),Qe=a("div"),u(Na.$$.fragment),Jm=l(),sl=a("p"),Zm=r("Constructs a Wav2Vec2 feature extractor."),Km=l(),Ia=a("p"),Qm=r("This feature extractor inherits from "),Or=a("a"),Ym=r("SequenceFeatureExtractor"),eh=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),th=l(),qo=a("div"),u(Ba.$$.fragment),oh=l(),nl=a("p"),ah=r("Main method to featurize and prepare for the model one or several sequence(s). sequences."),ld=l(),Zt=a("h2"),Mo=a("a"),rl=a("span"),u(Ua.$$.fragment),sh=l(),il=a("span"),nh=r("Wav2Vec2Processor"),cd=l(),z=a("div"),u(Ha.$$.fragment),rh=l(),ll=a("p"),ih=r(`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor and a Wav2Vec2 CTC tokenizer into a single processor.`),lh=l(),Me=a("p"),Sr=a("a"),ch=r("Wav2Vec2Processor"),dh=r(" offers all the functionalities of "),Nr=a("a"),ph=r("Wav2Vec2FeatureExtractor"),mh=r(" and "),Ir=a("a"),hh=r("PreTrainedTokenizer"),fh=r(`. See the docstring of `),Ra=a("a"),cl=a("strong"),uh=r("call"),gh=r("()"),_h=r(" and "),Br=a("a"),vh=r("decode()"),wh=r(" for more information."),bh=l(),zo=a("div"),u(Xa.$$.fragment),yh=l(),ft=a("p"),kh=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Ga=a("a"),dl=a("strong"),Th=r("call"),xh=r("()"),Wh=r(` and returns its output. If used in the context `),Ur=a("a"),$h=r("as_target_processor()"),jh=r(` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ja=a("a"),pl=a("strong"),Vh=r("call"),Fh=r("()"),Eh=r(". Please refer to the docstring of the above two methods for more information."),Ph=l(),Ao=a("div"),u(Za.$$.fragment),Ch=l(),ut=a("p"),qh=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Hr=a("a"),Mh=r("pad()"),zh=r(` and returns its output. If used in the context `),Rr=a("a"),Ah=r("as_target_processor()"),Lh=r(` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Xr=a("a"),Dh=r("pad()"),Oh=r(". Please refer to the docstring of the above two methods for more information."),Sh=l(),Gr=a("div"),u(Ka.$$.fragment),Nh=l(),$t=a("div"),u(Qa.$$.fragment),Ih=l(),Ya=a("p"),Bh=r(`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Jr=a("a"),Uh=r("from_pretrained()"),Hh=r(" method."),Rh=l(),u(Lo.$$.fragment),Xh=l(),Do=a("div"),u(es.$$.fragment),Gh=l(),ts=a("p"),Jh=r("This method forwards all its arguments to PreTrainedTokenizer\u2019s "),Zr=a("a"),Zh=r("batch_decode()"),Kh=r(`. Please refer to the docstring of this method for more information.`),Qh=l(),Oo=a("div"),u(os.$$.fragment),Yh=l(),as=a("p"),ef=r("This method forwards all its arguments to PreTrainedTokenizer\u2019s "),Kr=a("a"),tf=r("decode()"),of=r(`. Please refer to the docstring of this method for more information.`),af=l(),So=a("div"),u(ss.$$.fragment),sf=l(),ml=a("p"),nf=r(`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),dd=l(),Kt=a("h2"),No=a("a"),hl=a("span"),u(ns.$$.fragment),rf=l(),fl=a("span"),lf=r("Wav2Vec2ProcessorWithLM"),pd=l(),N=a("div"),u(rs.$$.fragment),cf=l(),ul=a("p"),df=r(`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor, a Wav2Vec2 CTC tokenizer and a decoder with language model support into a single processor for language model boosted speech recognition decoding.`),pf=l(),Io=a("div"),u(is.$$.fragment),mf=l(),gt=a("p"),hf=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),ls=a("a"),gl=a("strong"),ff=r("call"),uf=r("()"),gf=r(` and returns its output. If used in the context `),Qr=a("a"),_f=r("as_target_processor()"),vf=r(` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),cs=a("a"),_l=a("strong"),wf=r("call"),bf=r("()"),yf=r(`. Please refer to the docstring of the above two methods for more information.`),kf=l(),Bo=a("div"),u(ds.$$.fragment),Tf=l(),_t=a("p"),xf=r(`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Yr=a("a"),Wf=r("pad()"),$f=r(` and returns its output. If used in the context `),ei=a("a"),jf=r("as_target_processor()"),Vf=r(` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),ti=a("a"),Ff=r("pad()"),Ef=r(`. Please refer to the docstring of the above two methods for more information.`),Pf=l(),jt=a("div"),u(ps.$$.fragment),Cf=l(),ms=a("p"),qf=r("Instantiate a "),oi=a("a"),Mf=r("Wav2Vec2ProcessorWithLM"),zf=r(" from a pretrained Wav2Vec2 processor."),Af=l(),u(Uo.$$.fragment),Lf=l(),ai=a("div"),u(hs.$$.fragment),Df=l(),Vt=a("div"),u(fs.$$.fragment),Of=l(),vl=a("p"),Sf=r("Batch decode output logits to audio transcription with language model support."),Nf=l(),u(Ho.$$.fragment),If=l(),tt=a("div"),u(us.$$.fragment),Bf=l(),wl=a("p"),Uf=r("Decode output logits to audio transcription with language model support."),Hf=l(),bl=a("p"),Rf=r("Example:"),Xf=l(),u(gs.$$.fragment),Gf=l(),Ro=a("div"),u(_s.$$.fragment),Jf=l(),yl=a("p"),Zf=r(`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),md=l(),Qt=a("h2"),Xo=a("a"),kl=a("span"),u(vs.$$.fragment),Kf=l(),Tl=a("span"),Qf=r("Wav2Vec2 specific outputs"),hd=l(),Yt=a("div"),u(ws.$$.fragment),Yf=l(),bs=a("p"),eu=r("Output type of "),xl=a("code"),tu=r("Wav2Vec2DecoderWithLM"),ou=r(", with transcription."),fd=l(),eo=a("div"),u(ys.$$.fragment),au=l(),ks=a("p"),su=r("Output type of "),Wl=a("code"),nu=r("Wav2Vec2BaseModelOutput"),ru=r(", with potential hidden states and attentions."),ud=l(),to=a("div"),u(Ts.$$.fragment),iu=l(),xs=a("p"),lu=r("Output type of "),si=a("a"),cu=r("Wav2Vec2ForPreTraining"),du=r(", with potential hidden states and attentions."),gd=l(),vt=a("div"),u(Ws.$$.fragment),pu=l(),$s=a("p"),mu=r("Output type of "),$l=a("code"),hu=r("FlaxWav2Vec2BaseModelOutput"),fu=r(", with potential hidden states and attentions."),uu=l(),Go=a("div"),u(js.$$.fragment),gu=l(),jl=a("p"),_u=r("\u201CReturns a new object replacing the specified fields with new values."),_d=l(),wt=a("div"),u(Vs.$$.fragment),vu=l(),Fs=a("p"),wu=r("Output type of "),Vl=a("code"),bu=r("FlaxWav2Vec2ForPreTrainingOutput"),yu=r(", with potential hidden states and attentions."),ku=l(),Jo=a("div"),u(Es.$$.fragment),Tu=l(),Fl=a("p"),xu=r("\u201CReturns a new object replacing the specified fields with new values."),vd=l(),oo=a("h2"),Zo=a("a"),El=a("span"),u(Ps.$$.fragment),Wu=l(),Pl=a("span"),$u=r("Wav2Vec2Model"),wd=l(),We=a("div"),u(Cs.$$.fragment),ju=l(),qs=a("p"),Vu=r(`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Ms=a("a"),Fu=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),Eu=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Pu=l(),zs=a("p"),Cu=r("This model inherits from "),ni=a("a"),qu=r("PreTrainedModel"),Mu=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),zu=l(),As=a("p"),Au=r("This model is a PyTorch "),Ls=a("a"),Lu=r("torch.nn.Module"),Du=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ou=l(),ze=a("div"),u(Ds.$$.fragment),Su=l(),ao=a("p"),Nu=r("The "),ri=a("a"),Iu=r("Wav2Vec2Model"),Bu=r(" forward method, overrides the "),Cl=a("code"),Uu=r("__call__"),Hu=r(" special method."),Ru=l(),u(Ko.$$.fragment),Xu=l(),ql=a("p"),Gu=r("Example:"),Ju=l(),u(Os.$$.fragment),bd=l(),so=a("h2"),Qo=a("a"),Ml=a("span"),u(Ss.$$.fragment),Zu=l(),zl=a("span"),Ku=r("Wav2Vec2ForCTC"),yd=l(),$e=a("div"),u(Ns.$$.fragment),Qu=l(),no=a("p"),Yu=r("Wav2Vec2 Model with a "),Al=a("code"),eg=r("language modeling"),tg=r(` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),Is=a("a"),og=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),ag=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),sg=l(),Bs=a("p"),ng=r("This model inherits from "),ii=a("a"),rg=r("PreTrainedModel"),ig=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),lg=l(),Us=a("p"),cg=r("This model is a PyTorch "),Hs=a("a"),dg=r("torch.nn.Module"),pg=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mg=l(),me=a("div"),u(Rs.$$.fragment),hg=l(),ro=a("p"),fg=r("The "),li=a("a"),ug=r("Wav2Vec2ForCTC"),gg=r(" forward method, overrides the "),Ll=a("code"),_g=r("__call__"),vg=r(" special method."),wg=l(),u(Yo.$$.fragment),bg=l(),Dl=a("p"),yg=r("Example:"),kg=l(),u(Xs.$$.fragment),Tg=l(),u(Gs.$$.fragment),kd=l(),io=a("h2"),ea=a("a"),Ol=a("span"),u(Js.$$.fragment),xg=l(),Sl=a("span"),Wg=r("Wav2Vec2ForSequenceClassification"),Td=l(),ne=a("div"),u(Zs.$$.fragment),$g=l(),Nl=a("p"),jg=r(`Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),Vg=l(),Ks=a("p"),Fg=r("Wav2Vec2 was proposed in "),Qs=a("a"),Eg=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),Pg=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Cg=l(),Ys=a("p"),qg=r("This model inherits from "),ci=a("a"),Mg=r("PreTrainedModel"),zg=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ag=l(),en=a("p"),Lg=r("This model is a PyTorch "),tn=a("a"),Dg=r("torch.nn.Module"),Og=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sg=l(),he=a("div"),u(on.$$.fragment),Ng=l(),lo=a("p"),Ig=r("The "),di=a("a"),Bg=r("Wav2Vec2ForSequenceClassification"),Ug=r(" forward method, overrides the "),Il=a("code"),Hg=r("__call__"),Rg=r(" special method."),Xg=l(),u(ta.$$.fragment),Gg=l(),Bl=a("p"),Jg=r("Example:"),Zg=l(),u(an.$$.fragment),Kg=l(),u(sn.$$.fragment),xd=l(),co=a("h2"),oa=a("a"),Ul=a("span"),u(nn.$$.fragment),Qg=l(),Hl=a("span"),Yg=r("Wav2Vec2ForAudioFrameClassification"),Wd=l(),re=a("div"),u(rn.$$.fragment),e_=l(),Rl=a("p"),t_=r("Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization."),o_=l(),ln=a("p"),a_=r("Wav2Vec2 was proposed in "),cn=a("a"),s_=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),n_=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),r_=l(),dn=a("p"),i_=r("This model inherits from "),pi=a("a"),l_=r("PreTrainedModel"),c_=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),d_=l(),pn=a("p"),p_=r("This model is a PyTorch "),mn=a("a"),m_=r("torch.nn.Module"),h_=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),f_=l(),Ae=a("div"),u(hn.$$.fragment),u_=l(),po=a("p"),g_=r("The "),mi=a("a"),__=r("Wav2Vec2ForAudioFrameClassification"),v_=r(" forward method, overrides the "),Xl=a("code"),w_=r("__call__"),b_=r(" special method."),y_=l(),u(aa.$$.fragment),k_=l(),Gl=a("p"),T_=r("Example:"),x_=l(),u(fn.$$.fragment),$d=l(),mo=a("h2"),sa=a("a"),Jl=a("span"),u(un.$$.fragment),W_=l(),Zl=a("span"),$_=r("Wav2Vec2ForXVector"),jd=l(),ie=a("div"),u(gn.$$.fragment),j_=l(),Kl=a("p"),V_=r("Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification."),F_=l(),_n=a("p"),E_=r("Wav2Vec2 was proposed in "),vn=a("a"),P_=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),C_=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),q_=l(),wn=a("p"),M_=r("This model inherits from "),hi=a("a"),z_=r("PreTrainedModel"),A_=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),L_=l(),bn=a("p"),D_=r("This model is a PyTorch "),yn=a("a"),O_=r("torch.nn.Module"),S_=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),N_=l(),Le=a("div"),u(kn.$$.fragment),I_=l(),ho=a("p"),B_=r("The "),fi=a("a"),U_=r("Wav2Vec2ForXVector"),H_=r(" forward method, overrides the "),Ql=a("code"),R_=r("__call__"),X_=r(" special method."),G_=l(),u(na.$$.fragment),J_=l(),Yl=a("p"),Z_=r("Example:"),K_=l(),u(Tn.$$.fragment),Vd=l(),fo=a("h2"),ra=a("a"),ec=a("span"),u(xn.$$.fragment),Q_=l(),tc=a("span"),Y_=r("Wav2Vec2ForPreTraining"),Fd=l(),je=a("div"),u(Wn.$$.fragment),ev=l(),uo=a("p"),tv=r("Wav2Vec2 Model with a quantizer and "),oc=a("code"),ov=r("VQ"),av=r(` head on top. Wav2Vec2 was proposed in `),$n=a("a"),sv=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),nv=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),rv=l(),jn=a("p"),iv=r("This model inherits from "),ui=a("a"),lv=r("PreTrainedModel"),cv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),dv=l(),Vn=a("p"),pv=r("This model is a PyTorch "),Fn=a("a"),mv=r("torch.nn.Module"),hv=r(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),fv=l(),De=a("div"),u(En.$$.fragment),uv=l(),go=a("p"),gv=r("The "),gi=a("a"),_v=r("Wav2Vec2ForPreTraining"),vv=r(" forward method, overrides the "),ac=a("code"),wv=r("__call__"),bv=r(" special method."),yv=l(),u(ia.$$.fragment),kv=l(),sc=a("p"),Tv=r("Example:"),xv=l(),u(Pn.$$.fragment),Ed=l(),_o=a("h2"),la=a("a"),nc=a("span"),u(Cn.$$.fragment),Wv=l(),rc=a("span"),$v=r("TFWav2Vec2Model"),Pd=l(),le=a("div"),u(qn.$$.fragment),jv=l(),ic=a("p"),Vv=r("The bare TFWav2Vec2 Model transformer outputing raw hidden-states without any specific head on top."),Fv=l(),Mn=a("p"),Ev=r("This model inherits from "),_i=a("a"),Pv=r("TFPreTrainedModel"),Cv=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qv=l(),zn=a("p"),Mv=r("This model is also a "),An=a("a"),zv=r("tf.keras.Model"),Av=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Lv=l(),u(ca.$$.fragment),Dv=l(),Oe=a("div"),u(Ln.$$.fragment),Ov=l(),vo=a("p"),Sv=r("The "),vi=a("a"),Nv=r("TFWav2Vec2Model"),Iv=r(" forward method, overrides the "),lc=a("code"),Bv=r("__call__"),Uv=r(" special method."),Hv=l(),u(da.$$.fragment),Rv=l(),cc=a("p"),Xv=r("Example:"),Gv=l(),u(Dn.$$.fragment),Cd=l(),wo=a("h2"),pa=a("a"),dc=a("span"),u(On.$$.fragment),Jv=l(),pc=a("span"),Zv=r("TFWav2Vec2ForCTC"),qd=l(),ce=a("div"),u(Sn.$$.fragment),Kv=l(),Nn=a("p"),Qv=r("TFWav2Vec2 Model with a "),mc=a("code"),Yv=r("language modeling"),e2=r(" head on top for Connectionist Temporal Classification (CTC)."),t2=l(),In=a("p"),o2=r("This model inherits from "),wi=a("a"),a2=r("TFPreTrainedModel"),s2=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),n2=l(),Bn=a("p"),r2=r("This model is also a "),Un=a("a"),i2=r("tf.keras.Model"),l2=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),c2=l(),u(ma.$$.fragment),d2=l(),Se=a("div"),u(Hn.$$.fragment),p2=l(),bo=a("p"),m2=r("The "),bi=a("a"),h2=r("TFWav2Vec2ForCTC"),f2=r(" forward method, overrides the "),hc=a("code"),u2=r("__call__"),g2=r(" special method."),_2=l(),u(ha.$$.fragment),v2=l(),fc=a("p"),w2=r("Example:"),b2=l(),u(Rn.$$.fragment),Md=l(),yo=a("h2"),fa=a("a"),uc=a("span"),u(Xn.$$.fragment),y2=l(),gc=a("span"),k2=r("FlaxWav2Vec2Model"),zd=l(),K=a("div"),u(Gn.$$.fragment),T2=l(),Jn=a("p"),x2=r(`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Zn=a("a"),W2=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),$2=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),j2=l(),Kn=a("p"),V2=r("This model inherits from "),yi=a("a"),F2=r("FlaxPreTrainedModel"),E2=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),P2=l(),Qn=a("p"),C2=r(`This model is also a Flax Linen `),Yn=a("a"),q2=r("flax.nn.Module"),M2=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),z2=l(),_c=a("p"),A2=r("Finally, this model supports inherent JAX features such as:"),L2=l(),bt=a("ul"),vc=a("li"),er=a("a"),D2=r("Just-In-Time (JIT) compilation"),O2=l(),wc=a("li"),tr=a("a"),S2=r("Automatic Differentiation"),N2=l(),bc=a("li"),or=a("a"),I2=r("Vectorization"),B2=l(),yc=a("li"),ar=a("a"),U2=r("Parallelization"),H2=l(),Ne=a("div"),u(sr.$$.fragment),R2=l(),ko=a("p"),X2=r("The "),kc=a("code"),G2=r("FlaxWav2Vec2PreTrainedModel"),J2=r("forward method, overrides the "),Tc=a("code"),Z2=r("__call__"),K2=r(" special method."),Q2=l(),u(ua.$$.fragment),Y2=l(),xc=a("p"),ew=r("Example:"),tw=l(),u(nr.$$.fragment),Ad=l(),To=a("h2"),ga=a("a"),Wc=a("span"),u(rr.$$.fragment),ow=l(),$c=a("span"),aw=r("FlaxWav2Vec2ForCTC"),Ld=l(),Q=a("div"),u(ir.$$.fragment),sw=l(),xo=a("p"),nw=r("Wav2Vec2 Model with a "),jc=a("code"),rw=r("language modeling"),iw=r(` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),lr=a("a"),lw=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),cw=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),dw=l(),cr=a("p"),pw=r("This model inherits from "),ki=a("a"),mw=r("FlaxPreTrainedModel"),hw=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),fw=l(),dr=a("p"),uw=r(`This model is also a Flax Linen `),pr=a("a"),gw=r("flax.nn.Module"),_w=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),vw=l(),Vc=a("p"),ww=r("Finally, this model supports inherent JAX features such as:"),bw=l(),yt=a("ul"),Fc=a("li"),mr=a("a"),yw=r("Just-In-Time (JIT) compilation"),kw=l(),Ec=a("li"),hr=a("a"),Tw=r("Automatic Differentiation"),xw=l(),Pc=a("li"),fr=a("a"),Ww=r("Vectorization"),$w=l(),Cc=a("li"),ur=a("a"),jw=r("Parallelization"),Vw=l(),Ie=a("div"),u(gr.$$.fragment),Fw=l(),Wo=a("p"),Ew=r("The "),qc=a("code"),Pw=r("FlaxWav2Vec2PreTrainedModel"),Cw=r("forward method, overrides the "),Mc=a("code"),qw=r("__call__"),Mw=r(" special method."),zw=l(),u(_a.$$.fragment),Aw=l(),zc=a("p"),Lw=r("Example:"),Dw=l(),u(_r.$$.fragment),Dd=l(),$o=a("h2"),va=a("a"),Ac=a("span"),u(vr.$$.fragment),Ow=l(),Lc=a("span"),Sw=r("FlaxWav2Vec2ForPreTraining"),Od=l(),Y=a("div"),u(wr.$$.fragment),Nw=l(),jo=a("p"),Iw=r("Wav2Vec2 Model with a quantizer and "),Dc=a("code"),Bw=r("VQ"),Uw=r(` head on top. Wav2Vec2 was proposed in `),br=a("a"),Hw=r(`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),Rw=r(` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Xw=l(),yr=a("p"),Gw=r("This model inherits from "),Ti=a("a"),Jw=r("FlaxPreTrainedModel"),Zw=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kw=l(),kr=a("p"),Qw=r(`This model is also a Flax Linen `),Tr=a("a"),Yw=r("flax.nn.Module"),eb=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),tb=l(),Oc=a("p"),ob=r("Finally, this model supports inherent JAX features such as:"),ab=l(),kt=a("ul"),Sc=a("li"),xr=a("a"),sb=r("Just-In-Time (JIT) compilation"),nb=l(),Nc=a("li"),Wr=a("a"),rb=r("Automatic Differentiation"),ib=l(),Ic=a("li"),$r=a("a"),lb=r("Vectorization"),cb=l(),Bc=a("li"),jr=a("a"),db=r("Parallelization"),pb=l(),Be=a("div"),u(Vr.$$.fragment),mb=l(),Vo=a("p"),hb=r("The "),xi=a("a"),fb=r("FlaxWav2Vec2ForPreTraining"),ub=r(" forward method, overrides the "),Uc=a("code"),gb=r("__call__"),_b=r(" special method."),vb=l(),u(wa.$$.fragment),wb=l(),Hc=a("p"),bb=r("Example:"),yb=l(),u(Fr.$$.fragment),this.h()},l(o){const m=bT('[data-svelte="svelte-1phssyn"]',document.head);p=s(m,"META",{name:!0,content:!0}),m.forEach(t),x=c(o),f=s(o,"H1",{class:!0});var Er=n(f);T=s(Er,"A",{id:!0,class:!0,href:!0});var Rc=n(T);W=s(Rc,"SPAN",{});var Xc=n(W);g(k.$$.fragment,Xc),Xc.forEach(t),Rc.forEach(t),y=c(Er),j=s(Er,"SPAN",{});var Gc=n(j);O=i(Gc,"Wav2Vec2"),Gc.forEach(t),Er.forEach(t),q=c(o),F=s(o,"H2",{class:!0});var Pr=n(F);A=s(Pr,"A",{id:!0,class:!0,href:!0});var Jc=n(A);L=s(Jc,"SPAN",{});var Zc=n(L);g(X.$$.fragment,Zc),Zc.forEach(t),Jc.forEach(t),S=c(Pr),C=s(Pr,"SPAN",{});var Kc=n(C);Ve=i(Kc,"Overview"),Kc.forEach(t),Pr.forEach(t),ge=c(o),B=s(o,"P",{});var Cr=n(B);H=i(Cr,"The Wav2Vec2 model was proposed in "),_e=s(Cr,"A",{href:!0,rel:!0});var Qc=n(_e);Ge=i(Qc,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),Qc.forEach(t),M=i(Cr," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),Cr.forEach(t),I=c(o),Fe=s(o,"P",{});var Yc=n(Fe);ve=i(Yc,"The abstract from the paper is the following:"),Yc.forEach(t),Ye=c(o),Ee=s(o,"P",{});var ed=n(Ee);oe=s(ed,"EM",{});var td=n(oe);ct=i(td,`We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.`),td.forEach(t),ed.forEach(t),et=c(o),D=s(o,"P",{});var od=n(D);dt=i(od,"Tips:"),od.forEach(t),de=c(o),pe=s(o,"UL",{});var qr=n(pe);Je=s(qr,"LI",{});var ad=n(Je);we=i(ad,"Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),ad.forEach(t),pt=c(qr),be=s(qr,"LI",{});var Nd=n(be);ae=i(Nd,`Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using `),Pe=s(Nd,"A",{href:!0});var jb=n(Pe);ye=i(jb,"Wav2Vec2CTCTokenizer"),jb.forEach(t),mt=i(Nd,"."),Nd.forEach(t),qr.forEach(t),$=c(o),P=s(o,"P",{});var Id=n(P);Ze=i(Id,"This model was contributed by "),Ke=s(Id,"A",{href:!0,rel:!0});var Vb=n(Ke);Bt=i(Vb,"patrickvonplaten"),Vb.forEach(t),se=i(Id,"."),Id.forEach(t),xt=c(o),ke=s(o,"H2",{class:!0});var Bd=n(ke);Ce=s(Bd,"A",{id:!0,class:!0,href:!0});var Fb=n(Ce);G=s(Fb,"SPAN",{});var Eb=n(G);g(J.$$.fragment,Eb),Eb.forEach(t),Fb.forEach(t),Ut=c(Bd),ht=s(Bd,"SPAN",{});var Pb=n(ht);Te=i(Pb,"Wav2Vec2Config"),Pb.forEach(t),Bd.forEach(t),Wt=c(o),U=s(o,"DIV",{class:!0});var Ft=n(U);g(xe.$$.fragment,Ft),Ht=c(Ft),Rt=s(Ft,"P",{});var Wi=n(Rt);lm=i(Wi,"This is the configuration class to store the configuration of a "),Mr=s(Wi,"A",{href:!0});var Cb=n(Mr);cm=i(Cb,"Wav2Vec2Model"),Cb.forEach(t),dm=i(Wi,`. It is used to instantiate an Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2 `),Fa=s(Wi,"A",{href:!0,rel:!0});var qb=n(Fa);pm=i(qb,"facebook/wav2vec2-base-960h"),qb.forEach(t),mm=i(Wi," architecture."),Wi.forEach(t),hm=c(Ft),Xt=s(Ft,"P",{});var $i=n(Xt);fm=i($i,"Configuration objects inherit from "),zr=s($i,"A",{href:!0});var Mb=n(zr);um=i(Mb,"PretrainedConfig"),Mb.forEach(t),gm=i($i,` and can be used to control the model outputs. Read the documentation from `),Ar=s($i,"A",{href:!0});var zb=n(Ar);_m=i(zb,"PretrainedConfig"),zb.forEach(t),vm=i($i," for more information."),$i.forEach(t),wm=c(Ft),Xi=s(Ft,"P",{});var Ab=n(Xi);bm=i(Ab,"Example:"),Ab.forEach(t),ym=c(Ft),g(Ea.$$.fragment,Ft),Ft.forEach(t),sd=c(o),Gt=s(o,"H2",{class:!0});var Ud=n(Gt);Fo=s(Ud,"A",{id:!0,class:!0,href:!0});var Lb=n(Fo);Gi=s(Lb,"SPAN",{});var Db=n(Gi);g(Pa.$$.fragment,Db),Db.forEach(t),Lb.forEach(t),km=c(Ud),Ji=s(Ud,"SPAN",{});var Ob=n(Ji);Tm=i(Ob,"Wav2Vec2CTCTokenizer"),Ob.forEach(t),Ud.forEach(t),nd=c(o),Z=s(o,"DIV",{class:!0});var Ue=n(Z);g(Ca.$$.fragment,Ue),xm=c(Ue),Zi=s(Ue,"P",{});var Sb=n(Zi);Wm=i(Sb,"Constructs a Wav2Vec2CTC tokenizer."),Sb.forEach(t),$m=c(Ue),qa=s(Ue,"P",{});var Hd=n(qa);jm=i(Hd,"This tokenizer inherits from "),Lr=s(Hd,"A",{href:!0});var Nb=n(Lr);Vm=i(Nb,"PreTrainedTokenizer"),Nb.forEach(t),Fm=i(Hd,` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),Hd.forEach(t),Em=c(Ue),Eo=s(Ue,"DIV",{class:!0});var Rd=n(Eo);g(Ma.$$.fragment,Rd),Pm=c(Rd),Ki=s(Rd,"P",{});var Ib=n(Ki);Cm=i(Ib,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Ib.forEach(t),Rd.forEach(t),qm=c(Ue),Dr=s(Ue,"DIV",{class:!0});var Bb=n(Dr);g(za.$$.fragment,Bb),Bb.forEach(t),Mm=c(Ue),qe=s(Ue,"DIV",{class:!0});var Et=n(qe);g(Aa.$$.fragment,Et),zm=c(Et),Qi=s(Et,"P",{});var Ub=n(Qi);Am=i(Ub,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Ub.forEach(t),Lm=c(Et),La=s(Et,"P",{});var Xd=n(La);Dm=i(Xd,"Similar to doing "),Yi=s(Xd,"CODE",{});var Hb=n(Yi);Om=i(Hb,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Hb.forEach(t),Sm=i(Xd,"."),Xd.forEach(t),Nm=c(Et),el=s(Et,"P",{});var Rb=n(el);Im=i(Rb,"Example:"),Rb.forEach(t),Bm=c(Et),g(Da.$$.fragment,Et),Et.forEach(t),Um=c(Ue),Po=s(Ue,"DIV",{class:!0});var Gd=n(Po);g(Oa.$$.fragment,Gd),Hm=c(Gd),tl=s(Gd,"P",{});var Xb=n(tl);Rm=i(Xb,"Convert a list of lists of token ids into a list of strings by calling decode."),Xb.forEach(t),Gd.forEach(t),Ue.forEach(t),rd=c(o),Jt=s(o,"H2",{class:!0});var Jd=n(Jt);Co=s(Jd,"A",{id:!0,class:!0,href:!0});var Gb=n(Co);ol=s(Gb,"SPAN",{});var Jb=n(ol);g(Sa.$$.fragment,Jb),Jb.forEach(t),Gb.forEach(t),Xm=c(Jd),al=s(Jd,"SPAN",{});var Zb=n(al);Gm=i(Zb,"Wav2Vec2FeatureExtractor"),Zb.forEach(t),Jd.forEach(t),id=c(o),Qe=s(o,"DIV",{class:!0});var ba=n(Qe);g(Na.$$.fragment,ba),Jm=c(ba),sl=s(ba,"P",{});var Kb=n(sl);Zm=i(Kb,"Constructs a Wav2Vec2 feature extractor."),Kb.forEach(t),Km=c(ba),Ia=s(ba,"P",{});var Zd=n(Ia);Qm=i(Zd,"This feature extractor inherits from "),Or=s(Zd,"A",{href:!0});var Qb=n(Or);Ym=i(Qb,"SequenceFeatureExtractor"),Qb.forEach(t),eh=i(Zd,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Zd.forEach(t),th=c(ba),qo=s(ba,"DIV",{class:!0});var Kd=n(qo);g(Ba.$$.fragment,Kd),oh=c(Kd),nl=s(Kd,"P",{});var Yb=n(nl);ah=i(Yb,"Main method to featurize and prepare for the model one or several sequence(s). sequences."),Yb.forEach(t),Kd.forEach(t),ba.forEach(t),ld=c(o),Zt=s(o,"H2",{class:!0});var Qd=n(Zt);Mo=s(Qd,"A",{id:!0,class:!0,href:!0});var e1=n(Mo);rl=s(e1,"SPAN",{});var t1=n(rl);g(Ua.$$.fragment,t1),t1.forEach(t),e1.forEach(t),sh=c(Qd),il=s(Qd,"SPAN",{});var o1=n(il);nh=i(o1,"Wav2Vec2Processor"),o1.forEach(t),Qd.forEach(t),cd=c(o),z=s(o,"DIV",{class:!0});var R=n(z);g(Ha.$$.fragment,R),rh=c(R),ll=s(R,"P",{});var a1=n(ll);ih=i(a1,`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor and a Wav2Vec2 CTC tokenizer into a single processor.`),a1.forEach(t),lh=c(R),Me=s(R,"P",{});var Tt=n(Me);Sr=s(Tt,"A",{href:!0});var s1=n(Sr);ch=i(s1,"Wav2Vec2Processor"),s1.forEach(t),dh=i(Tt," offers all the functionalities of "),Nr=s(Tt,"A",{href:!0});var n1=n(Nr);ph=i(n1,"Wav2Vec2FeatureExtractor"),n1.forEach(t),mh=i(Tt," and "),Ir=s(Tt,"A",{href:!0});var r1=n(Ir);hh=i(r1,"PreTrainedTokenizer"),r1.forEach(t),fh=i(Tt,`. See the docstring of `),Ra=s(Tt,"A",{href:!0});var kb=n(Ra);cl=s(kb,"STRONG",{});var i1=n(cl);uh=i(i1,"call"),i1.forEach(t),gh=i(kb,"()"),kb.forEach(t),_h=i(Tt," and "),Br=s(Tt,"A",{href:!0});var l1=n(Br);vh=i(l1,"decode()"),l1.forEach(t),wh=i(Tt," for more information."),Tt.forEach(t),bh=c(R),zo=s(R,"DIV",{class:!0});var Yd=n(zo);g(Xa.$$.fragment,Yd),yh=c(Yd),ft=s(Yd,"P",{});var ya=n(ft);kh=i(ya,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Ga=s(ya,"A",{href:!0});var Tb=n(Ga);dl=s(Tb,"STRONG",{});var c1=n(dl);Th=i(c1,"call"),c1.forEach(t),xh=i(Tb,"()"),Tb.forEach(t),Wh=i(ya,` and returns its output. If used in the context `),Ur=s(ya,"A",{href:!0});var d1=n(Ur);$h=i(d1,"as_target_processor()"),d1.forEach(t),jh=i(ya,` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Ja=s(ya,"A",{href:!0});var xb=n(Ja);pl=s(xb,"STRONG",{});var p1=n(pl);Vh=i(p1,"call"),p1.forEach(t),Fh=i(xb,"()"),xb.forEach(t),Eh=i(ya,". Please refer to the docstring of the above two methods for more information."),ya.forEach(t),Yd.forEach(t),Ph=c(R),Ao=s(R,"DIV",{class:!0});var ep=n(Ao);g(Za.$$.fragment,ep),Ch=c(ep),ut=s(ep,"P",{});var ka=n(ut);qh=i(ka,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Hr=s(ka,"A",{href:!0});var m1=n(Hr);Mh=i(m1,"pad()"),m1.forEach(t),zh=i(ka,` and returns its output. If used in the context `),Rr=s(ka,"A",{href:!0});var h1=n(Rr);Ah=i(h1,"as_target_processor()"),h1.forEach(t),Lh=i(ka,` this method forwards all its arguments to PreTrainedTokenizer\u2019s `),Xr=s(ka,"A",{href:!0});var f1=n(Xr);Dh=i(f1,"pad()"),f1.forEach(t),Oh=i(ka,". Please refer to the docstring of the above two methods for more information."),ka.forEach(t),ep.forEach(t),Sh=c(R),Gr=s(R,"DIV",{class:!0});var u1=n(Gr);g(Ka.$$.fragment,u1),u1.forEach(t),Nh=c(R),$t=s(R,"DIV",{class:!0});var ji=n($t);g(Qa.$$.fragment,ji),Ih=c(ji),Ya=s(ji,"P",{});var tp=n(Ya);Bh=i(tp,`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Jr=s(tp,"A",{href:!0});var g1=n(Jr);Uh=i(g1,"from_pretrained()"),g1.forEach(t),Hh=i(tp," method."),tp.forEach(t),Rh=c(ji),g(Lo.$$.fragment,ji),ji.forEach(t),Xh=c(R),Do=s(R,"DIV",{class:!0});var op=n(Do);g(es.$$.fragment,op),Gh=c(op),ts=s(op,"P",{});var ap=n(ts);Jh=i(ap,"This method forwards all its arguments to PreTrainedTokenizer\u2019s "),Zr=s(ap,"A",{href:!0});var _1=n(Zr);Zh=i(_1,"batch_decode()"),_1.forEach(t),Kh=i(ap,`. Please refer to the docstring of this method for more information.`),ap.forEach(t),op.forEach(t),Qh=c(R),Oo=s(R,"DIV",{class:!0});var sp=n(Oo);g(os.$$.fragment,sp),Yh=c(sp),as=s(sp,"P",{});var np=n(as);ef=i(np,"This method forwards all its arguments to PreTrainedTokenizer\u2019s "),Kr=s(np,"A",{href:!0});var v1=n(Kr);tf=i(v1,"decode()"),v1.forEach(t),of=i(np,`. Please refer to the docstring of this method for more information.`),np.forEach(t),sp.forEach(t),af=c(R),So=s(R,"DIV",{class:!0});var rp=n(So);g(ss.$$.fragment,rp),sf=c(rp),ml=s(rp,"P",{});var w1=n(ml);nf=i(w1,`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),w1.forEach(t),rp.forEach(t),R.forEach(t),dd=c(o),Kt=s(o,"H2",{class:!0});var ip=n(Kt);No=s(ip,"A",{id:!0,class:!0,href:!0});var b1=n(No);hl=s(b1,"SPAN",{});var y1=n(hl);g(ns.$$.fragment,y1),y1.forEach(t),b1.forEach(t),rf=c(ip),fl=s(ip,"SPAN",{});var k1=n(fl);lf=i(k1,"Wav2Vec2ProcessorWithLM"),k1.forEach(t),ip.forEach(t),pd=c(o),N=s(o,"DIV",{class:!0});var te=n(N);g(rs.$$.fragment,te),cf=c(te),ul=s(te,"P",{});var T1=n(ul);df=i(T1,`Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor, a Wav2Vec2 CTC tokenizer and a decoder with language model support into a single processor for language model boosted speech recognition decoding.`),T1.forEach(t),pf=c(te),Io=s(te,"DIV",{class:!0});var lp=n(Io);g(is.$$.fragment,lp),mf=c(lp),gt=s(lp,"P",{});var Ta=n(gt);hf=i(Ta,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),ls=s(Ta,"A",{href:!0});var Wb=n(ls);gl=s(Wb,"STRONG",{});var x1=n(gl);ff=i(x1,"call"),x1.forEach(t),uf=i(Wb,"()"),Wb.forEach(t),gf=i(Ta,` and returns its output. If used in the context `),Qr=s(Ta,"A",{href:!0});var W1=n(Qr);_f=i(W1,"as_target_processor()"),W1.forEach(t),vf=i(Ta,` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),cs=s(Ta,"A",{href:!0});var $b=n(cs);_l=s($b,"STRONG",{});var $1=n(_l);wf=i($1,"call"),$1.forEach(t),bf=i($b,"()"),$b.forEach(t),yf=i(Ta,`. Please refer to the docstring of the above two methods for more information.`),Ta.forEach(t),lp.forEach(t),kf=c(te),Bo=s(te,"DIV",{class:!0});var cp=n(Bo);g(ds.$$.fragment,cp),Tf=c(cp),_t=s(cp,"P",{});var xa=n(_t);xf=i(xa,`When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor\u2019s `),Yr=s(xa,"A",{href:!0});var j1=n(Yr);Wf=i(j1,"pad()"),j1.forEach(t),$f=i(xa,` and returns its output. If used in the context `),ei=s(xa,"A",{href:!0});var V1=n(ei);jf=i(V1,"as_target_processor()"),V1.forEach(t),Vf=i(xa,` this method forwards all its arguments to Wav2Vec2CTCTokenizer\u2019s `),ti=s(xa,"A",{href:!0});var F1=n(ti);Ff=i(F1,"pad()"),F1.forEach(t),Ef=i(xa,`. Please refer to the docstring of the above two methods for more information.`),xa.forEach(t),cp.forEach(t),Pf=c(te),jt=s(te,"DIV",{class:!0});var Vi=n(jt);g(ps.$$.fragment,Vi),Cf=c(Vi),ms=s(Vi,"P",{});var dp=n(ms);qf=i(dp,"Instantiate a "),oi=s(dp,"A",{href:!0});var E1=n(oi);Mf=i(E1,"Wav2Vec2ProcessorWithLM"),E1.forEach(t),zf=i(dp," from a pretrained Wav2Vec2 processor."),dp.forEach(t),Af=c(Vi),g(Uo.$$.fragment,Vi),Vi.forEach(t),Lf=c(te),ai=s(te,"DIV",{class:!0});var P1=n(ai);g(hs.$$.fragment,P1),P1.forEach(t),Df=c(te),Vt=s(te,"DIV",{class:!0});var Fi=n(Vt);g(fs.$$.fragment,Fi),Of=c(Fi),vl=s(Fi,"P",{});var C1=n(vl);Sf=i(C1,"Batch decode output logits to audio transcription with language model support."),C1.forEach(t),Nf=c(Fi),g(Ho.$$.fragment,Fi),Fi.forEach(t),If=c(te),tt=s(te,"DIV",{class:!0});var Wa=n(tt);g(us.$$.fragment,Wa),Bf=c(Wa),wl=s(Wa,"P",{});var q1=n(wl);Uf=i(q1,"Decode output logits to audio transcription with language model support."),q1.forEach(t),Hf=c(Wa),bl=s(Wa,"P",{});var M1=n(bl);Rf=i(M1,"Example:"),M1.forEach(t),Xf=c(Wa),g(gs.$$.fragment,Wa),Wa.forEach(t),Gf=c(te),Ro=s(te,"DIV",{class:!0});var pp=n(Ro);g(_s.$$.fragment,pp),Jf=c(pp),yl=s(pp,"P",{});var z1=n(yl);Zf=i(z1,`Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2.`),z1.forEach(t),pp.forEach(t),te.forEach(t),md=c(o),Qt=s(o,"H2",{class:!0});var mp=n(Qt);Xo=s(mp,"A",{id:!0,class:!0,href:!0});var A1=n(Xo);kl=s(A1,"SPAN",{});var L1=n(kl);g(vs.$$.fragment,L1),L1.forEach(t),A1.forEach(t),Kf=c(mp),Tl=s(mp,"SPAN",{});var D1=n(Tl);Qf=i(D1,"Wav2Vec2 specific outputs"),D1.forEach(t),mp.forEach(t),hd=c(o),Yt=s(o,"DIV",{class:!0});var hp=n(Yt);g(ws.$$.fragment,hp),Yf=c(hp),bs=s(hp,"P",{});var fp=n(bs);eu=i(fp,"Output type of "),xl=s(fp,"CODE",{});var O1=n(xl);tu=i(O1,"Wav2Vec2DecoderWithLM"),O1.forEach(t),ou=i(fp,", with transcription."),fp.forEach(t),hp.forEach(t),fd=c(o),eo=s(o,"DIV",{class:!0});var up=n(eo);g(ys.$$.fragment,up),au=c(up),ks=s(up,"P",{});var gp=n(ks);su=i(gp,"Output type of "),Wl=s(gp,"CODE",{});var S1=n(Wl);nu=i(S1,"Wav2Vec2BaseModelOutput"),S1.forEach(t),ru=i(gp,", with potential hidden states and attentions."),gp.forEach(t),up.forEach(t),ud=c(o),to=s(o,"DIV",{class:!0});var _p=n(to);g(Ts.$$.fragment,_p),iu=c(_p),xs=s(_p,"P",{});var vp=n(xs);lu=i(vp,"Output type of "),si=s(vp,"A",{href:!0});var N1=n(si);cu=i(N1,"Wav2Vec2ForPreTraining"),N1.forEach(t),du=i(vp,", with potential hidden states and attentions."),vp.forEach(t),_p.forEach(t),gd=c(o),vt=s(o,"DIV",{class:!0});var Ei=n(vt);g(Ws.$$.fragment,Ei),pu=c(Ei),$s=s(Ei,"P",{});var wp=n($s);mu=i(wp,"Output type of "),$l=s(wp,"CODE",{});var I1=n($l);hu=i(I1,"FlaxWav2Vec2BaseModelOutput"),I1.forEach(t),fu=i(wp,", with potential hidden states and attentions."),wp.forEach(t),uu=c(Ei),Go=s(Ei,"DIV",{class:!0});var bp=n(Go);g(js.$$.fragment,bp),gu=c(bp),jl=s(bp,"P",{});var B1=n(jl);_u=i(B1,"\u201CReturns a new object replacing the specified fields with new values."),B1.forEach(t),bp.forEach(t),Ei.forEach(t),_d=c(o),wt=s(o,"DIV",{class:!0});var Pi=n(wt);g(Vs.$$.fragment,Pi),vu=c(Pi),Fs=s(Pi,"P",{});var yp=n(Fs);wu=i(yp,"Output type of "),Vl=s(yp,"CODE",{});var U1=n(Vl);bu=i(U1,"FlaxWav2Vec2ForPreTrainingOutput"),U1.forEach(t),yu=i(yp,", with potential hidden states and attentions."),yp.forEach(t),ku=c(Pi),Jo=s(Pi,"DIV",{class:!0});var kp=n(Jo);g(Es.$$.fragment,kp),Tu=c(kp),Fl=s(kp,"P",{});var H1=n(Fl);xu=i(H1,"\u201CReturns a new object replacing the specified fields with new values."),H1.forEach(t),kp.forEach(t),Pi.forEach(t),vd=c(o),oo=s(o,"H2",{class:!0});var Tp=n(oo);Zo=s(Tp,"A",{id:!0,class:!0,href:!0});var R1=n(Zo);El=s(R1,"SPAN",{});var X1=n(El);g(Ps.$$.fragment,X1),X1.forEach(t),R1.forEach(t),Wu=c(Tp),Pl=s(Tp,"SPAN",{});var G1=n(Pl);$u=i(G1,"Wav2Vec2Model"),G1.forEach(t),Tp.forEach(t),wd=c(o),We=s(o,"DIV",{class:!0});var Pt=n(We);g(Cs.$$.fragment,Pt),ju=c(Pt),qs=s(Pt,"P",{});var xp=n(qs);Vu=i(xp,`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Ms=s(xp,"A",{href:!0,rel:!0});var J1=n(Ms);Fu=i(J1,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),J1.forEach(t),Eu=i(xp,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),xp.forEach(t),Pu=c(Pt),zs=s(Pt,"P",{});var Wp=n(zs);Cu=i(Wp,"This model inherits from "),ni=s(Wp,"A",{href:!0});var Z1=n(ni);qu=i(Z1,"PreTrainedModel"),Z1.forEach(t),Mu=i(Wp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Wp.forEach(t),zu=c(Pt),As=s(Pt,"P",{});var $p=n(As);Au=i($p,"This model is a PyTorch "),Ls=s($p,"A",{href:!0,rel:!0});var K1=n(Ls);Lu=i(K1,"torch.nn.Module"),K1.forEach(t),Du=i($p,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$p.forEach(t),Ou=c(Pt),ze=s(Pt,"DIV",{class:!0});var Ct=n(ze);g(Ds.$$.fragment,Ct),Su=c(Ct),ao=s(Ct,"P",{});var Ci=n(ao);Nu=i(Ci,"The "),ri=s(Ci,"A",{href:!0});var Q1=n(ri);Iu=i(Q1,"Wav2Vec2Model"),Q1.forEach(t),Bu=i(Ci," forward method, overrides the "),Cl=s(Ci,"CODE",{});var Y1=n(Cl);Uu=i(Y1,"__call__"),Y1.forEach(t),Hu=i(Ci," special method."),Ci.forEach(t),Ru=c(Ct),g(Ko.$$.fragment,Ct),Xu=c(Ct),ql=s(Ct,"P",{});var ey=n(ql);Gu=i(ey,"Example:"),ey.forEach(t),Ju=c(Ct),g(Os.$$.fragment,Ct),Ct.forEach(t),Pt.forEach(t),bd=c(o),so=s(o,"H2",{class:!0});var jp=n(so);Qo=s(jp,"A",{id:!0,class:!0,href:!0});var ty=n(Qo);Ml=s(ty,"SPAN",{});var oy=n(Ml);g(Ss.$$.fragment,oy),oy.forEach(t),ty.forEach(t),Zu=c(jp),zl=s(jp,"SPAN",{});var ay=n(zl);Ku=i(ay,"Wav2Vec2ForCTC"),ay.forEach(t),jp.forEach(t),yd=c(o),$e=s(o,"DIV",{class:!0});var qt=n($e);g(Ns.$$.fragment,qt),Qu=c(qt),no=s(qt,"P",{});var qi=n(no);Yu=i(qi,"Wav2Vec2 Model with a "),Al=s(qi,"CODE",{});var sy=n(Al);eg=i(sy,"language modeling"),sy.forEach(t),tg=i(qi,` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),Is=s(qi,"A",{href:!0,rel:!0});var ny=n(Is);og=i(ny,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),ny.forEach(t),ag=i(qi,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),qi.forEach(t),sg=c(qt),Bs=s(qt,"P",{});var Vp=n(Bs);ng=i(Vp,"This model inherits from "),ii=s(Vp,"A",{href:!0});var ry=n(ii);rg=i(ry,"PreTrainedModel"),ry.forEach(t),ig=i(Vp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Vp.forEach(t),lg=c(qt),Us=s(qt,"P",{});var Fp=n(Us);cg=i(Fp,"This model is a PyTorch "),Hs=s(Fp,"A",{href:!0,rel:!0});var iy=n(Hs);dg=i(iy,"torch.nn.Module"),iy.forEach(t),pg=i(Fp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Fp.forEach(t),mg=c(qt),me=s(qt,"DIV",{class:!0});var ot=n(me);g(Rs.$$.fragment,ot),hg=c(ot),ro=s(ot,"P",{});var Mi=n(ro);fg=i(Mi,"The "),li=s(Mi,"A",{href:!0});var ly=n(li);ug=i(ly,"Wav2Vec2ForCTC"),ly.forEach(t),gg=i(Mi," forward method, overrides the "),Ll=s(Mi,"CODE",{});var cy=n(Ll);_g=i(cy,"__call__"),cy.forEach(t),vg=i(Mi," special method."),Mi.forEach(t),wg=c(ot),g(Yo.$$.fragment,ot),bg=c(ot),Dl=s(ot,"P",{});var dy=n(Dl);yg=i(dy,"Example:"),dy.forEach(t),kg=c(ot),g(Xs.$$.fragment,ot),Tg=c(ot),g(Gs.$$.fragment,ot),ot.forEach(t),qt.forEach(t),kd=c(o),io=s(o,"H2",{class:!0});var Ep=n(io);ea=s(Ep,"A",{id:!0,class:!0,href:!0});var py=n(ea);Ol=s(py,"SPAN",{});var my=n(Ol);g(Js.$$.fragment,my),my.forEach(t),py.forEach(t),xg=c(Ep),Sl=s(Ep,"SPAN",{});var hy=n(Sl);Wg=i(hy,"Wav2Vec2ForSequenceClassification"),hy.forEach(t),Ep.forEach(t),Td=c(o),ne=s(o,"DIV",{class:!0});var at=n(ne);g(Zs.$$.fragment,at),$g=c(at),Nl=s(at,"P",{});var fy=n(Nl);jg=i(fy,`Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),fy.forEach(t),Vg=c(at),Ks=s(at,"P",{});var Pp=n(Ks);Fg=i(Pp,"Wav2Vec2 was proposed in "),Qs=s(Pp,"A",{href:!0,rel:!0});var uy=n(Qs);Eg=i(uy,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),uy.forEach(t),Pg=i(Pp,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Pp.forEach(t),Cg=c(at),Ys=s(at,"P",{});var Cp=n(Ys);qg=i(Cp,"This model inherits from "),ci=s(Cp,"A",{href:!0});var gy=n(ci);Mg=i(gy,"PreTrainedModel"),gy.forEach(t),zg=i(Cp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Cp.forEach(t),Ag=c(at),en=s(at,"P",{});var qp=n(en);Lg=i(qp,"This model is a PyTorch "),tn=s(qp,"A",{href:!0,rel:!0});var _y=n(tn);Dg=i(_y,"torch.nn.Module"),_y.forEach(t),Og=i(qp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),qp.forEach(t),Sg=c(at),he=s(at,"DIV",{class:!0});var st=n(he);g(on.$$.fragment,st),Ng=c(st),lo=s(st,"P",{});var zi=n(lo);Ig=i(zi,"The "),di=s(zi,"A",{href:!0});var vy=n(di);Bg=i(vy,"Wav2Vec2ForSequenceClassification"),vy.forEach(t),Ug=i(zi," forward method, overrides the "),Il=s(zi,"CODE",{});var wy=n(Il);Hg=i(wy,"__call__"),wy.forEach(t),Rg=i(zi," special method."),zi.forEach(t),Xg=c(st),g(ta.$$.fragment,st),Gg=c(st),Bl=s(st,"P",{});var by=n(Bl);Jg=i(by,"Example:"),by.forEach(t),Zg=c(st),g(an.$$.fragment,st),Kg=c(st),g(sn.$$.fragment,st),st.forEach(t),at.forEach(t),xd=c(o),co=s(o,"H2",{class:!0});var Mp=n(co);oa=s(Mp,"A",{id:!0,class:!0,href:!0});var yy=n(oa);Ul=s(yy,"SPAN",{});var ky=n(Ul);g(nn.$$.fragment,ky),ky.forEach(t),yy.forEach(t),Qg=c(Mp),Hl=s(Mp,"SPAN",{});var Ty=n(Hl);Yg=i(Ty,"Wav2Vec2ForAudioFrameClassification"),Ty.forEach(t),Mp.forEach(t),Wd=c(o),re=s(o,"DIV",{class:!0});var nt=n(re);g(rn.$$.fragment,nt),e_=c(nt),Rl=s(nt,"P",{});var xy=n(Rl);t_=i(xy,"Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization."),xy.forEach(t),o_=c(nt),ln=s(nt,"P",{});var zp=n(ln);a_=i(zp,"Wav2Vec2 was proposed in "),cn=s(zp,"A",{href:!0,rel:!0});var Wy=n(cn);s_=i(Wy,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),Wy.forEach(t),n_=i(zp,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),zp.forEach(t),r_=c(nt),dn=s(nt,"P",{});var Ap=n(dn);i_=i(Ap,"This model inherits from "),pi=s(Ap,"A",{href:!0});var $y=n(pi);l_=i($y,"PreTrainedModel"),$y.forEach(t),c_=i(Ap,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Ap.forEach(t),d_=c(nt),pn=s(nt,"P",{});var Lp=n(pn);p_=i(Lp,"This model is a PyTorch "),mn=s(Lp,"A",{href:!0,rel:!0});var jy=n(mn);m_=i(jy,"torch.nn.Module"),jy.forEach(t),h_=i(Lp,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lp.forEach(t),f_=c(nt),Ae=s(nt,"DIV",{class:!0});var Mt=n(Ae);g(hn.$$.fragment,Mt),u_=c(Mt),po=s(Mt,"P",{});var Ai=n(po);g_=i(Ai,"The "),mi=s(Ai,"A",{href:!0});var Vy=n(mi);__=i(Vy,"Wav2Vec2ForAudioFrameClassification"),Vy.forEach(t),v_=i(Ai," forward method, overrides the "),Xl=s(Ai,"CODE",{});var Fy=n(Xl);w_=i(Fy,"__call__"),Fy.forEach(t),b_=i(Ai," special method."),Ai.forEach(t),y_=c(Mt),g(aa.$$.fragment,Mt),k_=c(Mt),Gl=s(Mt,"P",{});var Ey=n(Gl);T_=i(Ey,"Example:"),Ey.forEach(t),x_=c(Mt),g(fn.$$.fragment,Mt),Mt.forEach(t),nt.forEach(t),$d=c(o),mo=s(o,"H2",{class:!0});var Dp=n(mo);sa=s(Dp,"A",{id:!0,class:!0,href:!0});var Py=n(sa);Jl=s(Py,"SPAN",{});var Cy=n(Jl);g(un.$$.fragment,Cy),Cy.forEach(t),Py.forEach(t),W_=c(Dp),Zl=s(Dp,"SPAN",{});var qy=n(Zl);$_=i(qy,"Wav2Vec2ForXVector"),qy.forEach(t),Dp.forEach(t),jd=c(o),ie=s(o,"DIV",{class:!0});var rt=n(ie);g(gn.$$.fragment,rt),j_=c(rt),Kl=s(rt,"P",{});var My=n(Kl);V_=i(My,"Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification."),My.forEach(t),F_=c(rt),_n=s(rt,"P",{});var Op=n(_n);E_=i(Op,"Wav2Vec2 was proposed in "),vn=s(Op,"A",{href:!0,rel:!0});var zy=n(vn);P_=i(zy,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),zy.forEach(t),C_=i(Op,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Op.forEach(t),q_=c(rt),wn=s(rt,"P",{});var Sp=n(wn);M_=i(Sp,"This model inherits from "),hi=s(Sp,"A",{href:!0});var Ay=n(hi);z_=i(Ay,"PreTrainedModel"),Ay.forEach(t),A_=i(Sp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Sp.forEach(t),L_=c(rt),bn=s(rt,"P",{});var Np=n(bn);D_=i(Np,"This model is a PyTorch "),yn=s(Np,"A",{href:!0,rel:!0});var Ly=n(yn);O_=i(Ly,"torch.nn.Module"),Ly.forEach(t),S_=i(Np,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Np.forEach(t),N_=c(rt),Le=s(rt,"DIV",{class:!0});var zt=n(Le);g(kn.$$.fragment,zt),I_=c(zt),ho=s(zt,"P",{});var Li=n(ho);B_=i(Li,"The "),fi=s(Li,"A",{href:!0});var Dy=n(fi);U_=i(Dy,"Wav2Vec2ForXVector"),Dy.forEach(t),H_=i(Li," forward method, overrides the "),Ql=s(Li,"CODE",{});var Oy=n(Ql);R_=i(Oy,"__call__"),Oy.forEach(t),X_=i(Li," special method."),Li.forEach(t),G_=c(zt),g(na.$$.fragment,zt),J_=c(zt),Yl=s(zt,"P",{});var Sy=n(Yl);Z_=i(Sy,"Example:"),Sy.forEach(t),K_=c(zt),g(Tn.$$.fragment,zt),zt.forEach(t),rt.forEach(t),Vd=c(o),fo=s(o,"H2",{class:!0});var Ip=n(fo);ra=s(Ip,"A",{id:!0,class:!0,href:!0});var Ny=n(ra);ec=s(Ny,"SPAN",{});var Iy=n(ec);g(xn.$$.fragment,Iy),Iy.forEach(t),Ny.forEach(t),Q_=c(Ip),tc=s(Ip,"SPAN",{});var By=n(tc);Y_=i(By,"Wav2Vec2ForPreTraining"),By.forEach(t),Ip.forEach(t),Fd=c(o),je=s(o,"DIV",{class:!0});var At=n(je);g(Wn.$$.fragment,At),ev=c(At),uo=s(At,"P",{});var Di=n(uo);tv=i(Di,"Wav2Vec2 Model with a quantizer and "),oc=s(Di,"CODE",{});var Uy=n(oc);ov=i(Uy,"VQ"),Uy.forEach(t),av=i(Di,` head on top. Wav2Vec2 was proposed in `),$n=s(Di,"A",{href:!0,rel:!0});var Hy=n($n);sv=i(Hy,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),Hy.forEach(t),nv=i(Di,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Di.forEach(t),rv=c(At),jn=s(At,"P",{});var Bp=n(jn);iv=i(Bp,"This model inherits from "),ui=s(Bp,"A",{href:!0});var Ry=n(ui);lv=i(Ry,"PreTrainedModel"),Ry.forEach(t),cv=i(Bp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Bp.forEach(t),dv=c(At),Vn=s(At,"P",{});var Up=n(Vn);pv=i(Up,"This model is a PyTorch "),Fn=s(Up,"A",{href:!0,rel:!0});var Xy=n(Fn);mv=i(Xy,"torch.nn.Module"),Xy.forEach(t),hv=i(Up,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Up.forEach(t),fv=c(At),De=s(At,"DIV",{class:!0});var Lt=n(De);g(En.$$.fragment,Lt),uv=c(Lt),go=s(Lt,"P",{});var Oi=n(go);gv=i(Oi,"The "),gi=s(Oi,"A",{href:!0});var Gy=n(gi);_v=i(Gy,"Wav2Vec2ForPreTraining"),Gy.forEach(t),vv=i(Oi," forward method, overrides the "),ac=s(Oi,"CODE",{});var Jy=n(ac);wv=i(Jy,"__call__"),Jy.forEach(t),bv=i(Oi," special method."),Oi.forEach(t),yv=c(Lt),g(ia.$$.fragment,Lt),kv=c(Lt),sc=s(Lt,"P",{});var Zy=n(sc);Tv=i(Zy,"Example:"),Zy.forEach(t),xv=c(Lt),g(Pn.$$.fragment,Lt),Lt.forEach(t),At.forEach(t),Ed=c(o),_o=s(o,"H2",{class:!0});var Hp=n(_o);la=s(Hp,"A",{id:!0,class:!0,href:!0});var Ky=n(la);nc=s(Ky,"SPAN",{});var Qy=n(nc);g(Cn.$$.fragment,Qy),Qy.forEach(t),Ky.forEach(t),Wv=c(Hp),rc=s(Hp,"SPAN",{});var Yy=n(rc);$v=i(Yy,"TFWav2Vec2Model"),Yy.forEach(t),Hp.forEach(t),Pd=c(o),le=s(o,"DIV",{class:!0});var it=n(le);g(qn.$$.fragment,it),jv=c(it),ic=s(it,"P",{});var ek=n(ic);Vv=i(ek,"The bare TFWav2Vec2 Model transformer outputing raw hidden-states without any specific head on top."),ek.forEach(t),Fv=c(it),Mn=s(it,"P",{});var Rp=n(Mn);Ev=i(Rp,"This model inherits from "),_i=s(Rp,"A",{href:!0});var tk=n(_i);Pv=i(tk,"TFPreTrainedModel"),tk.forEach(t),Cv=i(Rp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rp.forEach(t),qv=c(it),zn=s(it,"P",{});var Xp=n(zn);Mv=i(Xp,"This model is also a "),An=s(Xp,"A",{href:!0,rel:!0});var ok=n(An);zv=i(ok,"tf.keras.Model"),ok.forEach(t),Av=i(Xp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Xp.forEach(t),Lv=c(it),g(ca.$$.fragment,it),Dv=c(it),Oe=s(it,"DIV",{class:!0});var Dt=n(Oe);g(Ln.$$.fragment,Dt),Ov=c(Dt),vo=s(Dt,"P",{});var Si=n(vo);Sv=i(Si,"The "),vi=s(Si,"A",{href:!0});var ak=n(vi);Nv=i(ak,"TFWav2Vec2Model"),ak.forEach(t),Iv=i(Si," forward method, overrides the "),lc=s(Si,"CODE",{});var sk=n(lc);Bv=i(sk,"__call__"),sk.forEach(t),Uv=i(Si," special method."),Si.forEach(t),Hv=c(Dt),g(da.$$.fragment,Dt),Rv=c(Dt),cc=s(Dt,"P",{});var nk=n(cc);Xv=i(nk,"Example:"),nk.forEach(t),Gv=c(Dt),g(Dn.$$.fragment,Dt),Dt.forEach(t),it.forEach(t),Cd=c(o),wo=s(o,"H2",{class:!0});var Gp=n(wo);pa=s(Gp,"A",{id:!0,class:!0,href:!0});var rk=n(pa);dc=s(rk,"SPAN",{});var ik=n(dc);g(On.$$.fragment,ik),ik.forEach(t),rk.forEach(t),Jv=c(Gp),pc=s(Gp,"SPAN",{});var lk=n(pc);Zv=i(lk,"TFWav2Vec2ForCTC"),lk.forEach(t),Gp.forEach(t),qd=c(o),ce=s(o,"DIV",{class:!0});var lt=n(ce);g(Sn.$$.fragment,lt),Kv=c(lt),Nn=s(lt,"P",{});var Jp=n(Nn);Qv=i(Jp,"TFWav2Vec2 Model with a "),mc=s(Jp,"CODE",{});var ck=n(mc);Yv=i(ck,"language modeling"),ck.forEach(t),e2=i(Jp," head on top for Connectionist Temporal Classification (CTC)."),Jp.forEach(t),t2=c(lt),In=s(lt,"P",{});var Zp=n(In);o2=i(Zp,"This model inherits from "),wi=s(Zp,"A",{href:!0});var dk=n(wi);a2=i(dk,"TFPreTrainedModel"),dk.forEach(t),s2=i(Zp,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zp.forEach(t),n2=c(lt),Bn=s(lt,"P",{});var Kp=n(Bn);r2=i(Kp,"This model is also a "),Un=s(Kp,"A",{href:!0,rel:!0});var pk=n(Un);i2=i(pk,"tf.keras.Model"),pk.forEach(t),l2=i(Kp,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Kp.forEach(t),c2=c(lt),g(ma.$$.fragment,lt),d2=c(lt),Se=s(lt,"DIV",{class:!0});var Ot=n(Se);g(Hn.$$.fragment,Ot),p2=c(Ot),bo=s(Ot,"P",{});var Ni=n(bo);m2=i(Ni,"The "),bi=s(Ni,"A",{href:!0});var mk=n(bi);h2=i(mk,"TFWav2Vec2ForCTC"),mk.forEach(t),f2=i(Ni," forward method, overrides the "),hc=s(Ni,"CODE",{});var hk=n(hc);u2=i(hk,"__call__"),hk.forEach(t),g2=i(Ni," special method."),Ni.forEach(t),_2=c(Ot),g(ha.$$.fragment,Ot),v2=c(Ot),fc=s(Ot,"P",{});var fk=n(fc);w2=i(fk,"Example:"),fk.forEach(t),b2=c(Ot),g(Rn.$$.fragment,Ot),Ot.forEach(t),lt.forEach(t),Md=c(o),yo=s(o,"H2",{class:!0});var Qp=n(yo);fa=s(Qp,"A",{id:!0,class:!0,href:!0});var uk=n(fa);uc=s(uk,"SPAN",{});var gk=n(uc);g(Xn.$$.fragment,gk),gk.forEach(t),uk.forEach(t),y2=c(Qp),gc=s(Qp,"SPAN",{});var _k=n(gc);k2=i(_k,"FlaxWav2Vec2Model"),_k.forEach(t),Qp.forEach(t),zd=c(o),K=s(o,"DIV",{class:!0});var He=n(K);g(Gn.$$.fragment,He),T2=c(He),Jn=s(He,"P",{});var Yp=n(Jn);x2=i(Yp,`The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. Wav2Vec2 was proposed in `),Zn=s(Yp,"A",{href:!0,rel:!0});var vk=n(Zn);W2=i(vk,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),vk.forEach(t),$2=i(Yp,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Yp.forEach(t),j2=c(He),Kn=s(He,"P",{});var em=n(Kn);V2=i(em,"This model inherits from "),yi=s(em,"A",{href:!0});var wk=n(yi);F2=i(wk,"FlaxPreTrainedModel"),wk.forEach(t),E2=i(em,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),em.forEach(t),P2=c(He),Qn=s(He,"P",{});var tm=n(Qn);C2=i(tm,`This model is also a Flax Linen `),Yn=s(tm,"A",{href:!0,rel:!0});var bk=n(Yn);q2=i(bk,"flax.nn.Module"),bk.forEach(t),M2=i(tm,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),tm.forEach(t),z2=c(He),_c=s(He,"P",{});var yk=n(_c);A2=i(yk,"Finally, this model supports inherent JAX features such as:"),yk.forEach(t),L2=c(He),bt=s(He,"UL",{});var $a=n(bt);vc=s($a,"LI",{});var kk=n(vc);er=s(kk,"A",{href:!0,rel:!0});var Tk=n(er);D2=i(Tk,"Just-In-Time (JIT) compilation"),Tk.forEach(t),kk.forEach(t),O2=c($a),wc=s($a,"LI",{});var xk=n(wc);tr=s(xk,"A",{href:!0,rel:!0});var Wk=n(tr);S2=i(Wk,"Automatic Differentiation"),Wk.forEach(t),xk.forEach(t),N2=c($a),bc=s($a,"LI",{});var $k=n(bc);or=s($k,"A",{href:!0,rel:!0});var jk=n(or);I2=i(jk,"Vectorization"),jk.forEach(t),$k.forEach(t),B2=c($a),yc=s($a,"LI",{});var Vk=n(yc);ar=s(Vk,"A",{href:!0,rel:!0});var Fk=n(ar);U2=i(Fk,"Parallelization"),Fk.forEach(t),Vk.forEach(t),$a.forEach(t),H2=c(He),Ne=s(He,"DIV",{class:!0});var St=n(Ne);g(sr.$$.fragment,St),R2=c(St),ko=s(St,"P",{});var Ii=n(ko);X2=i(Ii,"The "),kc=s(Ii,"CODE",{});var Ek=n(kc);G2=i(Ek,"FlaxWav2Vec2PreTrainedModel"),Ek.forEach(t),J2=i(Ii,"forward method, overrides the "),Tc=s(Ii,"CODE",{});var Pk=n(Tc);Z2=i(Pk,"__call__"),Pk.forEach(t),K2=i(Ii," special method."),Ii.forEach(t),Q2=c(St),g(ua.$$.fragment,St),Y2=c(St),xc=s(St,"P",{});var Ck=n(xc);ew=i(Ck,"Example:"),Ck.forEach(t),tw=c(St),g(nr.$$.fragment,St),St.forEach(t),He.forEach(t),Ad=c(o),To=s(o,"H2",{class:!0});var om=n(To);ga=s(om,"A",{id:!0,class:!0,href:!0});var qk=n(ga);Wc=s(qk,"SPAN",{});var Mk=n(Wc);g(rr.$$.fragment,Mk),Mk.forEach(t),qk.forEach(t),ow=c(om),$c=s(om,"SPAN",{});var zk=n($c);aw=i(zk,"FlaxWav2Vec2ForCTC"),zk.forEach(t),om.forEach(t),Ld=c(o),Q=s(o,"DIV",{class:!0});var Re=n(Q);g(ir.$$.fragment,Re),sw=c(Re),xo=s(Re,"P",{});var Bi=n(xo);nw=i(Bi,"Wav2Vec2 Model with a "),jc=s(Bi,"CODE",{});var Ak=n(jc);rw=i(Ak,"language modeling"),Ak.forEach(t),iw=i(Bi,` head on top for Connectionist Temporal Classification (CTC). Wav2Vec2 was proposed in `),lr=s(Bi,"A",{href:!0,rel:!0});var Lk=n(lr);lw=i(Lk,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),Lk.forEach(t),cw=i(Bi,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Bi.forEach(t),dw=c(Re),cr=s(Re,"P",{});var am=n(cr);pw=i(am,"This model inherits from "),ki=s(am,"A",{href:!0});var Dk=n(ki);mw=i(Dk,"FlaxPreTrainedModel"),Dk.forEach(t),hw=i(am,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),am.forEach(t),fw=c(Re),dr=s(Re,"P",{});var sm=n(dr);uw=i(sm,`This model is also a Flax Linen `),pr=s(sm,"A",{href:!0,rel:!0});var Ok=n(pr);gw=i(Ok,"flax.nn.Module"),Ok.forEach(t),_w=i(sm,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),sm.forEach(t),vw=c(Re),Vc=s(Re,"P",{});var Sk=n(Vc);ww=i(Sk,"Finally, this model supports inherent JAX features such as:"),Sk.forEach(t),bw=c(Re),yt=s(Re,"UL",{});var ja=n(yt);Fc=s(ja,"LI",{});var Nk=n(Fc);mr=s(Nk,"A",{href:!0,rel:!0});var Ik=n(mr);yw=i(Ik,"Just-In-Time (JIT) compilation"),Ik.forEach(t),Nk.forEach(t),kw=c(ja),Ec=s(ja,"LI",{});var Bk=n(Ec);hr=s(Bk,"A",{href:!0,rel:!0});var Uk=n(hr);Tw=i(Uk,"Automatic Differentiation"),Uk.forEach(t),Bk.forEach(t),xw=c(ja),Pc=s(ja,"LI",{});var Hk=n(Pc);fr=s(Hk,"A",{href:!0,rel:!0});var Rk=n(fr);Ww=i(Rk,"Vectorization"),Rk.forEach(t),Hk.forEach(t),$w=c(ja),Cc=s(ja,"LI",{});var Xk=n(Cc);ur=s(Xk,"A",{href:!0,rel:!0});var Gk=n(ur);jw=i(Gk,"Parallelization"),Gk.forEach(t),Xk.forEach(t),ja.forEach(t),Vw=c(Re),Ie=s(Re,"DIV",{class:!0});var Nt=n(Ie);g(gr.$$.fragment,Nt),Fw=c(Nt),Wo=s(Nt,"P",{});var Ui=n(Wo);Ew=i(Ui,"The "),qc=s(Ui,"CODE",{});var Jk=n(qc);Pw=i(Jk,"FlaxWav2Vec2PreTrainedModel"),Jk.forEach(t),Cw=i(Ui,"forward method, overrides the "),Mc=s(Ui,"CODE",{});var Zk=n(Mc);qw=i(Zk,"__call__"),Zk.forEach(t),Mw=i(Ui," special method."),Ui.forEach(t),zw=c(Nt),g(_a.$$.fragment,Nt),Aw=c(Nt),zc=s(Nt,"P",{});var Kk=n(zc);Lw=i(Kk,"Example:"),Kk.forEach(t),Dw=c(Nt),g(_r.$$.fragment,Nt),Nt.forEach(t),Re.forEach(t),Dd=c(o),$o=s(o,"H2",{class:!0});var nm=n($o);va=s(nm,"A",{id:!0,class:!0,href:!0});var Qk=n(va);Ac=s(Qk,"SPAN",{});var Yk=n(Ac);g(vr.$$.fragment,Yk),Yk.forEach(t),Qk.forEach(t),Ow=c(nm),Lc=s(nm,"SPAN",{});var eT=n(Lc);Sw=i(eT,"FlaxWav2Vec2ForPreTraining"),eT.forEach(t),nm.forEach(t),Od=c(o),Y=s(o,"DIV",{class:!0});var Xe=n(Y);g(wr.$$.fragment,Xe),Nw=c(Xe),jo=s(Xe,"P",{});var Hi=n(jo);Iw=i(Hi,"Wav2Vec2 Model with a quantizer and "),Dc=s(Hi,"CODE",{});var tT=n(Dc);Bw=i(tT,"VQ"),tT.forEach(t),Uw=i(Hi,` head on top. Wav2Vec2 was proposed in `),br=s(Hi,"A",{href:!0,rel:!0});var oT=n(br);Hw=i(oT,`wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations`),oT.forEach(t),Rw=i(Hi,` by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.`),Hi.forEach(t),Xw=c(Xe),yr=s(Xe,"P",{});var rm=n(yr);Gw=i(rm,"This model inherits from "),Ti=s(rm,"A",{href:!0});var aT=n(Ti);Jw=i(aT,"FlaxPreTrainedModel"),aT.forEach(t),Zw=i(rm,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rm.forEach(t),Kw=c(Xe),kr=s(Xe,"P",{});var im=n(kr);Qw=i(im,`This model is also a Flax Linen `),Tr=s(im,"A",{href:!0,rel:!0});var sT=n(Tr);Yw=i(sT,"flax.nn.Module"),sT.forEach(t),eb=i(im,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),im.forEach(t),tb=c(Xe),Oc=s(Xe,"P",{});var nT=n(Oc);ob=i(nT,"Finally, this model supports inherent JAX features such as:"),nT.forEach(t),ab=c(Xe),kt=s(Xe,"UL",{});var Va=n(kt);Sc=s(Va,"LI",{});var rT=n(Sc);xr=s(rT,"A",{href:!0,rel:!0});var iT=n(xr);sb=i(iT,"Just-In-Time (JIT) compilation"),iT.forEach(t),rT.forEach(t),nb=c(Va),Nc=s(Va,"LI",{});var lT=n(Nc);Wr=s(lT,"A",{href:!0,rel:!0});var cT=n(Wr);rb=i(cT,"Automatic Differentiation"),cT.forEach(t),lT.forEach(t),ib=c(Va),Ic=s(Va,"LI",{});var dT=n(Ic);$r=s(dT,"A",{href:!0,rel:!0});var pT=n($r);lb=i(pT,"Vectorization"),pT.forEach(t),dT.forEach(t),cb=c(Va),Bc=s(Va,"LI",{});var mT=n(Bc);jr=s(mT,"A",{href:!0,rel:!0});var hT=n(jr);db=i(hT,"Parallelization"),hT.forEach(t),mT.forEach(t),Va.forEach(t),pb=c(Xe),Be=s(Xe,"DIV",{class:!0});var It=n(Be);g(Vr.$$.fragment,It),mb=c(It),Vo=s(It,"P",{});var Ri=n(Vo);hb=i(Ri,"The "),xi=s(Ri,"A",{href:!0});var fT=n(xi);fb=i(fT,"FlaxWav2Vec2ForPreTraining"),fT.forEach(t),ub=i(Ri," forward method, overrides the "),Uc=s(Ri,"CODE",{});var uT=n(Uc);gb=i(uT,"__call__"),uT.forEach(t),_b=i(Ri," special method."),Ri.forEach(t),vb=c(It),g(wa.$$.fragment,It),wb=c(It),Hc=s(It,"P",{});var gT=n(Hc);bb=i(gT,"Example:"),gT.forEach(t),yb=c(It),g(Fr.$$.fragment,It),It.forEach(t),Xe.forEach(t),this.h()},h(){d(p,"name","hf:doc:metadata"),d(p,"content",JSON.stringify(DT)),d(T,"id","wav2vec2"),d(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(T,"href","#wav2vec2"),d(f,"class","relative group"),d(A,"id","overview"),d(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(A,"href","#overview"),d(F,"class","relative group"),d(_e,"href","https://arxiv.org/abs/2006.11477"),d(_e,"rel","nofollow"),d(Pe,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),d(Ke,"href","https://huggingface.co/patrickvonplaten"),d(Ke,"rel","nofollow"),d(Ce,"id","transformers.Wav2Vec2Config"),d(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ce,"href","#transformers.Wav2Vec2Config"),d(ke,"class","relative group"),d(Mr,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Model"),d(Fa,"href","https://huggingface.co/facebook/wav2vec2-base-960h"),d(Fa,"rel","nofollow"),d(zr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(Ar,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),d(U,"class","docstring"),d(Fo,"id","transformers.Wav2Vec2CTCTokenizer"),d(Fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Fo,"href","#transformers.Wav2Vec2CTCTokenizer"),d(Gt,"class","relative group"),d(Lr,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(Eo,"class","docstring"),d(Dr,"class","docstring"),d(qe,"class","docstring"),d(Po,"class","docstring"),d(Z,"class","docstring"),d(Co,"id","transformers.Wav2Vec2FeatureExtractor"),d(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Co,"href","#transformers.Wav2Vec2FeatureExtractor"),d(Jt,"class","relative group"),d(Or,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor"),d(qo,"class","docstring"),d(Qe,"class","docstring"),d(Mo,"id","transformers.Wav2Vec2Processor"),d(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Mo,"href","#transformers.Wav2Vec2Processor"),d(Zt,"class","relative group"),d(Sr,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor"),d(Nr,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),d(Ir,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),d(Ra,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__"),d(Br,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.decode"),d(Ga,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor.__call__"),d(Ur,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.as_target_processor"),d(Ja,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),d(zo,"class","docstring"),d(Hr,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor.pad"),d(Rr,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.as_target_processor"),d(Xr,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.pad"),d(Ao,"class","docstring"),d(Gr,"class","docstring"),d(Jr,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.ProcessorMixin.from_pretrained"),d($t,"class","docstring"),d(Zr,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),d(Do,"class","docstring"),d(Kr,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),d(Oo,"class","docstring"),d(So,"class","docstring"),d(z,"class","docstring"),d(No,"id","transformers.Wav2Vec2ProcessorWithLM"),d(No,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(No,"href","#transformers.Wav2Vec2ProcessorWithLM"),d(Kt,"class","relative group"),d(ls,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor.__call__"),d(Qr,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM.as_target_processor"),d(cs,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),d(Io,"class","docstring"),d(Yr,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor.pad"),d(ei,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM.as_target_processor"),d(ti,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.pad"),d(Bo,"class","docstring"),d(oi,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM"),d(jt,"class","docstring"),d(ai,"class","docstring"),d(Vt,"class","docstring"),d(tt,"class","docstring"),d(Ro,"class","docstring"),d(N,"class","docstring"),d(Xo,"id","transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput"),d(Xo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Xo,"href","#transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput"),d(Qt,"class","relative group"),d(Yt,"class","docstring"),d(eo,"class","docstring"),d(si,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForPreTraining"),d(to,"class","docstring"),d(Go,"class","docstring"),d(vt,"class","docstring"),d(Jo,"class","docstring"),d(wt,"class","docstring"),d(Zo,"id","transformers.Wav2Vec2Model"),d(Zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Zo,"href","#transformers.Wav2Vec2Model"),d(oo,"class","relative group"),d(Ms,"href","https://arxiv.org/abs/2006.11477"),d(Ms,"rel","nofollow"),d(ni,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Ls,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Ls,"rel","nofollow"),d(ri,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2Model"),d(ze,"class","docstring"),d(We,"class","docstring"),d(Qo,"id","transformers.Wav2Vec2ForCTC"),d(Qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Qo,"href","#transformers.Wav2Vec2ForCTC"),d(so,"class","relative group"),d(Is,"href","https://arxiv.org/abs/2006.11477"),d(Is,"rel","nofollow"),d(ii,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Hs,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Hs,"rel","nofollow"),d(li,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC"),d(me,"class","docstring"),d($e,"class","docstring"),d(ea,"id","transformers.Wav2Vec2ForSequenceClassification"),d(ea,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ea,"href","#transformers.Wav2Vec2ForSequenceClassification"),d(io,"class","relative group"),d(Qs,"href","https://arxiv.org/abs/2006.11477"),d(Qs,"rel","nofollow"),d(ci,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(tn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(tn,"rel","nofollow"),d(di,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification"),d(he,"class","docstring"),d(ne,"class","docstring"),d(oa,"id","transformers.Wav2Vec2ForAudioFrameClassification"),d(oa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(oa,"href","#transformers.Wav2Vec2ForAudioFrameClassification"),d(co,"class","relative group"),d(cn,"href","https://arxiv.org/abs/2006.11477"),d(cn,"rel","nofollow"),d(pi,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(mn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(mn,"rel","nofollow"),d(mi,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForAudioFrameClassification"),d(Ae,"class","docstring"),d(re,"class","docstring"),d(sa,"id","transformers.Wav2Vec2ForXVector"),d(sa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(sa,"href","#transformers.Wav2Vec2ForXVector"),d(mo,"class","relative group"),d(vn,"href","https://arxiv.org/abs/2006.11477"),d(vn,"rel","nofollow"),d(hi,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(yn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(yn,"rel","nofollow"),d(fi,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForXVector"),d(Le,"class","docstring"),d(ie,"class","docstring"),d(ra,"id","transformers.Wav2Vec2ForPreTraining"),d(ra,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ra,"href","#transformers.Wav2Vec2ForPreTraining"),d(fo,"class","relative group"),d($n,"href","https://arxiv.org/abs/2006.11477"),d($n,"rel","nofollow"),d(ui,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),d(Fn,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),d(Fn,"rel","nofollow"),d(gi,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.Wav2Vec2ForPreTraining"),d(De,"class","docstring"),d(je,"class","docstring"),d(la,"id","transformers.TFWav2Vec2Model"),d(la,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(la,"href","#transformers.TFWav2Vec2Model"),d(_o,"class","relative group"),d(_i,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),d(An,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),d(An,"rel","nofollow"),d(vi,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.TFWav2Vec2Model"),d(Oe,"class","docstring"),d(le,"class","docstring"),d(pa,"id","transformers.TFWav2Vec2ForCTC"),d(pa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(pa,"href","#transformers.TFWav2Vec2ForCTC"),d(wo,"class","relative group"),d(wi,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),d(Un,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),d(Un,"rel","nofollow"),d(bi,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.TFWav2Vec2ForCTC"),d(Se,"class","docstring"),d(ce,"class","docstring"),d(fa,"id","transformers.FlaxWav2Vec2Model"),d(fa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(fa,"href","#transformers.FlaxWav2Vec2Model"),d(yo,"class","relative group"),d(Zn,"href","https://arxiv.org/abs/2006.11477"),d(Zn,"rel","nofollow"),d(yi,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(Yn,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),d(Yn,"rel","nofollow"),d(er,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(er,"rel","nofollow"),d(tr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(tr,"rel","nofollow"),d(or,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(or,"rel","nofollow"),d(ar,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(ar,"rel","nofollow"),d(Ne,"class","docstring"),d(K,"class","docstring"),d(ga,"id","transformers.FlaxWav2Vec2ForCTC"),d(ga,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ga,"href","#transformers.FlaxWav2Vec2ForCTC"),d(To,"class","relative group"),d(lr,"href","https://arxiv.org/abs/2006.11477"),d(lr,"rel","nofollow"),d(ki,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(pr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),d(pr,"rel","nofollow"),d(mr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(mr,"rel","nofollow"),d(hr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(hr,"rel","nofollow"),d(fr,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d(fr,"rel","nofollow"),d(ur,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(ur,"rel","nofollow"),d(Ie,"class","docstring"),d(Q,"class","docstring"),d(va,"id","transformers.FlaxWav2Vec2ForPreTraining"),d(va,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(va,"href","#transformers.FlaxWav2Vec2ForPreTraining"),d($o,"class","relative group"),d(br,"href","https://arxiv.org/abs/2006.11477"),d(br,"rel","nofollow"),d(Ti,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),d(Tr,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),d(Tr,"rel","nofollow"),d(xr,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),d(xr,"rel","nofollow"),d(Wr,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),d(Wr,"rel","nofollow"),d($r,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),d($r,"rel","nofollow"),d(jr,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),d(jr,"rel","nofollow"),d(xi,"href","/docs/transformers/pr_16143/en/model_doc/wav2vec2#transformers.FlaxWav2Vec2ForPreTraining"),d(Be,"class","docstring"),d(Y,"class","docstring")},m(o,m){e(document.head,p),h(o,x,m),h(o,f,m),e(f,T),e(T,W),_(k,W,null),e(f,y),e(f,j),e(j,O),h(o,q,m),h(o,F,m),e(F,A),e(A,L),_(X,L,null),e(F,S),e(F,C),e(C,Ve),h(o,ge,m),h(o,B,m),e(B,H),e(B,_e),e(_e,Ge),e(B,M),h(o,I,m),h(o,Fe,m),e(Fe,ve),h(o,Ye,m),h(o,Ee,m),e(Ee,oe),e(oe,ct),h(o,et,m),h(o,D,m),e(D,dt),h(o,de,m),h(o,pe,m),e(pe,Je),e(Je,we),e(pe,pt),e(pe,be),e(be,ae),e(be,Pe),e(Pe,ye),e(be,mt),h(o,$,m),h(o,P,m),e(P,Ze),e(P,Ke),e(Ke,Bt),e(P,se),h(o,xt,m),h(o,ke,m),e(ke,Ce),e(Ce,G),_(J,G,null),e(ke,Ut),e(ke,ht),e(ht,Te),h(o,Wt,m),h(o,U,m),_(xe,U,null),e(U,Ht),e(U,Rt),e(Rt,lm),e(Rt,Mr),e(Mr,cm),e(Rt,dm),e(Rt,Fa),e(Fa,pm),e(Rt,mm),e(U,hm),e(U,Xt),e(Xt,fm),e(Xt,zr),e(zr,um),e(Xt,gm),e(Xt,Ar),e(Ar,_m),e(Xt,vm),e(U,wm),e(U,Xi),e(Xi,bm),e(U,ym),_(Ea,U,null),h(o,sd,m),h(o,Gt,m),e(Gt,Fo),e(Fo,Gi),_(Pa,Gi,null),e(Gt,km),e(Gt,Ji),e(Ji,Tm),h(o,nd,m),h(o,Z,m),_(Ca,Z,null),e(Z,xm),e(Z,Zi),e(Zi,Wm),e(Z,$m),e(Z,qa),e(qa,jm),e(qa,Lr),e(Lr,Vm),e(qa,Fm),e(Z,Em),e(Z,Eo),_(Ma,Eo,null),e(Eo,Pm),e(Eo,Ki),e(Ki,Cm),e(Z,qm),e(Z,Dr),_(za,Dr,null),e(Z,Mm),e(Z,qe),_(Aa,qe,null),e(qe,zm),e(qe,Qi),e(Qi,Am),e(qe,Lm),e(qe,La),e(La,Dm),e(La,Yi),e(Yi,Om),e(La,Sm),e(qe,Nm),e(qe,el),e(el,Im),e(qe,Bm),_(Da,qe,null),e(Z,Um),e(Z,Po),_(Oa,Po,null),e(Po,Hm),e(Po,tl),e(tl,Rm),h(o,rd,m),h(o,Jt,m),e(Jt,Co),e(Co,ol),_(Sa,ol,null),e(Jt,Xm),e(Jt,al),e(al,Gm),h(o,id,m),h(o,Qe,m),_(Na,Qe,null),e(Qe,Jm),e(Qe,sl),e(sl,Zm),e(Qe,Km),e(Qe,Ia),e(Ia,Qm),e(Ia,Or),e(Or,Ym),e(Ia,eh),e(Qe,th),e(Qe,qo),_(Ba,qo,null),e(qo,oh),e(qo,nl),e(nl,ah),h(o,ld,m),h(o,Zt,m),e(Zt,Mo),e(Mo,rl),_(Ua,rl,null),e(Zt,sh),e(Zt,il),e(il,nh),h(o,cd,m),h(o,z,m),_(Ha,z,null),e(z,rh),e(z,ll),e(ll,ih),e(z,lh),e(z,Me),e(Me,Sr),e(Sr,ch),e(Me,dh),e(Me,Nr),e(Nr,ph),e(Me,mh),e(Me,Ir),e(Ir,hh),e(Me,fh),e(Me,Ra),e(Ra,cl),e(cl,uh),e(Ra,gh),e(Me,_h),e(Me,Br),e(Br,vh),e(Me,wh),e(z,bh),e(z,zo),_(Xa,zo,null),e(zo,yh),e(zo,ft),e(ft,kh),e(ft,Ga),e(Ga,dl),e(dl,Th),e(Ga,xh),e(ft,Wh),e(ft,Ur),e(Ur,$h),e(ft,jh),e(ft,Ja),e(Ja,pl),e(pl,Vh),e(Ja,Fh),e(ft,Eh),e(z,Ph),e(z,Ao),_(Za,Ao,null),e(Ao,Ch),e(Ao,ut),e(ut,qh),e(ut,Hr),e(Hr,Mh),e(ut,zh),e(ut,Rr),e(Rr,Ah),e(ut,Lh),e(ut,Xr),e(Xr,Dh),e(ut,Oh),e(z,Sh),e(z,Gr),_(Ka,Gr,null),e(z,Nh),e(z,$t),_(Qa,$t,null),e($t,Ih),e($t,Ya),e(Ya,Bh),e(Ya,Jr),e(Jr,Uh),e(Ya,Hh),e($t,Rh),_(Lo,$t,null),e(z,Xh),e(z,Do),_(es,Do,null),e(Do,Gh),e(Do,ts),e(ts,Jh),e(ts,Zr),e(Zr,Zh),e(ts,Kh),e(z,Qh),e(z,Oo),_(os,Oo,null),e(Oo,Yh),e(Oo,as),e(as,ef),e(as,Kr),e(Kr,tf),e(as,of),e(z,af),e(z,So),_(ss,So,null),e(So,sf),e(So,ml),e(ml,nf),h(o,dd,m),h(o,Kt,m),e(Kt,No),e(No,hl),_(ns,hl,null),e(Kt,rf),e(Kt,fl),e(fl,lf),h(o,pd,m),h(o,N,m),_(rs,N,null),e(N,cf),e(N,ul),e(ul,df),e(N,pf),e(N,Io),_(is,Io,null),e(Io,mf),e(Io,gt),e(gt,hf),e(gt,ls),e(ls,gl),e(gl,ff),e(ls,uf),e(gt,gf),e(gt,Qr),e(Qr,_f),e(gt,vf),e(gt,cs),e(cs,_l),e(_l,wf),e(cs,bf),e(gt,yf),e(N,kf),e(N,Bo),_(ds,Bo,null),e(Bo,Tf),e(Bo,_t),e(_t,xf),e(_t,Yr),e(Yr,Wf),e(_t,$f),e(_t,ei),e(ei,jf),e(_t,Vf),e(_t,ti),e(ti,Ff),e(_t,Ef),e(N,Pf),e(N,jt),_(ps,jt,null),e(jt,Cf),e(jt,ms),e(ms,qf),e(ms,oi),e(oi,Mf),e(ms,zf),e(jt,Af),_(Uo,jt,null),e(N,Lf),e(N,ai),_(hs,ai,null),e(N,Df),e(N,Vt),_(fs,Vt,null),e(Vt,Of),e(Vt,vl),e(vl,Sf),e(Vt,Nf),_(Ho,Vt,null),e(N,If),e(N,tt),_(us,tt,null),e(tt,Bf),e(tt,wl),e(wl,Uf),e(tt,Hf),e(tt,bl),e(bl,Rf),e(tt,Xf),_(gs,tt,null),e(N,Gf),e(N,Ro),_(_s,Ro,null),e(Ro,Jf),e(Ro,yl),e(yl,Zf),h(o,md,m),h(o,Qt,m),e(Qt,Xo),e(Xo,kl),_(vs,kl,null),e(Qt,Kf),e(Qt,Tl),e(Tl,Qf),h(o,hd,m),h(o,Yt,m),_(ws,Yt,null),e(Yt,Yf),e(Yt,bs),e(bs,eu),e(bs,xl),e(xl,tu),e(bs,ou),h(o,fd,m),h(o,eo,m),_(ys,eo,null),e(eo,au),e(eo,ks),e(ks,su),e(ks,Wl),e(Wl,nu),e(ks,ru),h(o,ud,m),h(o,to,m),_(Ts,to,null),e(to,iu),e(to,xs),e(xs,lu),e(xs,si),e(si,cu),e(xs,du),h(o,gd,m),h(o,vt,m),_(Ws,vt,null),e(vt,pu),e(vt,$s),e($s,mu),e($s,$l),e($l,hu),e($s,fu),e(vt,uu),e(vt,Go),_(js,Go,null),e(Go,gu),e(Go,jl),e(jl,_u),h(o,_d,m),h(o,wt,m),_(Vs,wt,null),e(wt,vu),e(wt,Fs),e(Fs,wu),e(Fs,Vl),e(Vl,bu),e(Fs,yu),e(wt,ku),e(wt,Jo),_(Es,Jo,null),e(Jo,Tu),e(Jo,Fl),e(Fl,xu),h(o,vd,m),h(o,oo,m),e(oo,Zo),e(Zo,El),_(Ps,El,null),e(oo,Wu),e(oo,Pl),e(Pl,$u),h(o,wd,m),h(o,We,m),_(Cs,We,null),e(We,ju),e(We,qs),e(qs,Vu),e(qs,Ms),e(Ms,Fu),e(qs,Eu),e(We,Pu),e(We,zs),e(zs,Cu),e(zs,ni),e(ni,qu),e(zs,Mu),e(We,zu),e(We,As),e(As,Au),e(As,Ls),e(Ls,Lu),e(As,Du),e(We,Ou),e(We,ze),_(Ds,ze,null),e(ze,Su),e(ze,ao),e(ao,Nu),e(ao,ri),e(ri,Iu),e(ao,Bu),e(ao,Cl),e(Cl,Uu),e(ao,Hu),e(ze,Ru),_(Ko,ze,null),e(ze,Xu),e(ze,ql),e(ql,Gu),e(ze,Ju),_(Os,ze,null),h(o,bd,m),h(o,so,m),e(so,Qo),e(Qo,Ml),_(Ss,Ml,null),e(so,Zu),e(so,zl),e(zl,Ku),h(o,yd,m),h(o,$e,m),_(Ns,$e,null),e($e,Qu),e($e,no),e(no,Yu),e(no,Al),e(Al,eg),e(no,tg),e(no,Is),e(Is,og),e(no,ag),e($e,sg),e($e,Bs),e(Bs,ng),e(Bs,ii),e(ii,rg),e(Bs,ig),e($e,lg),e($e,Us),e(Us,cg),e(Us,Hs),e(Hs,dg),e(Us,pg),e($e,mg),e($e,me),_(Rs,me,null),e(me,hg),e(me,ro),e(ro,fg),e(ro,li),e(li,ug),e(ro,gg),e(ro,Ll),e(Ll,_g),e(ro,vg),e(me,wg),_(Yo,me,null),e(me,bg),e(me,Dl),e(Dl,yg),e(me,kg),_(Xs,me,null),e(me,Tg),_(Gs,me,null),h(o,kd,m),h(o,io,m),e(io,ea),e(ea,Ol),_(Js,Ol,null),e(io,xg),e(io,Sl),e(Sl,Wg),h(o,Td,m),h(o,ne,m),_(Zs,ne,null),e(ne,$g),e(ne,Nl),e(Nl,jg),e(ne,Vg),e(ne,Ks),e(Ks,Fg),e(Ks,Qs),e(Qs,Eg),e(Ks,Pg),e(ne,Cg),e(ne,Ys),e(Ys,qg),e(Ys,ci),e(ci,Mg),e(Ys,zg),e(ne,Ag),e(ne,en),e(en,Lg),e(en,tn),e(tn,Dg),e(en,Og),e(ne,Sg),e(ne,he),_(on,he,null),e(he,Ng),e(he,lo),e(lo,Ig),e(lo,di),e(di,Bg),e(lo,Ug),e(lo,Il),e(Il,Hg),e(lo,Rg),e(he,Xg),_(ta,he,null),e(he,Gg),e(he,Bl),e(Bl,Jg),e(he,Zg),_(an,he,null),e(he,Kg),_(sn,he,null),h(o,xd,m),h(o,co,m),e(co,oa),e(oa,Ul),_(nn,Ul,null),e(co,Qg),e(co,Hl),e(Hl,Yg),h(o,Wd,m),h(o,re,m),_(rn,re,null),e(re,e_),e(re,Rl),e(Rl,t_),e(re,o_),e(re,ln),e(ln,a_),e(ln,cn),e(cn,s_),e(ln,n_),e(re,r_),e(re,dn),e(dn,i_),e(dn,pi),e(pi,l_),e(dn,c_),e(re,d_),e(re,pn),e(pn,p_),e(pn,mn),e(mn,m_),e(pn,h_),e(re,f_),e(re,Ae),_(hn,Ae,null),e(Ae,u_),e(Ae,po),e(po,g_),e(po,mi),e(mi,__),e(po,v_),e(po,Xl),e(Xl,w_),e(po,b_),e(Ae,y_),_(aa,Ae,null),e(Ae,k_),e(Ae,Gl),e(Gl,T_),e(Ae,x_),_(fn,Ae,null),h(o,$d,m),h(o,mo,m),e(mo,sa),e(sa,Jl),_(un,Jl,null),e(mo,W_),e(mo,Zl),e(Zl,$_),h(o,jd,m),h(o,ie,m),_(gn,ie,null),e(ie,j_),e(ie,Kl),e(Kl,V_),e(ie,F_),e(ie,_n),e(_n,E_),e(_n,vn),e(vn,P_),e(_n,C_),e(ie,q_),e(ie,wn),e(wn,M_),e(wn,hi),e(hi,z_),e(wn,A_),e(ie,L_),e(ie,bn),e(bn,D_),e(bn,yn),e(yn,O_),e(bn,S_),e(ie,N_),e(ie,Le),_(kn,Le,null),e(Le,I_),e(Le,ho),e(ho,B_),e(ho,fi),e(fi,U_),e(ho,H_),e(ho,Ql),e(Ql,R_),e(ho,X_),e(Le,G_),_(na,Le,null),e(Le,J_),e(Le,Yl),e(Yl,Z_),e(Le,K_),_(Tn,Le,null),h(o,Vd,m),h(o,fo,m),e(fo,ra),e(ra,ec),_(xn,ec,null),e(fo,Q_),e(fo,tc),e(tc,Y_),h(o,Fd,m),h(o,je,m),_(Wn,je,null),e(je,ev),e(je,uo),e(uo,tv),e(uo,oc),e(oc,ov),e(uo,av),e(uo,$n),e($n,sv),e(uo,nv),e(je,rv),e(je,jn),e(jn,iv),e(jn,ui),e(ui,lv),e(jn,cv),e(je,dv),e(je,Vn),e(Vn,pv),e(Vn,Fn),e(Fn,mv),e(Vn,hv),e(je,fv),e(je,De),_(En,De,null),e(De,uv),e(De,go),e(go,gv),e(go,gi),e(gi,_v),e(go,vv),e(go,ac),e(ac,wv),e(go,bv),e(De,yv),_(ia,De,null),e(De,kv),e(De,sc),e(sc,Tv),e(De,xv),_(Pn,De,null),h(o,Ed,m),h(o,_o,m),e(_o,la),e(la,nc),_(Cn,nc,null),e(_o,Wv),e(_o,rc),e(rc,$v),h(o,Pd,m),h(o,le,m),_(qn,le,null),e(le,jv),e(le,ic),e(ic,Vv),e(le,Fv),e(le,Mn),e(Mn,Ev),e(Mn,_i),e(_i,Pv),e(Mn,Cv),e(le,qv),e(le,zn),e(zn,Mv),e(zn,An),e(An,zv),e(zn,Av),e(le,Lv),_(ca,le,null),e(le,Dv),e(le,Oe),_(Ln,Oe,null),e(Oe,Ov),e(Oe,vo),e(vo,Sv),e(vo,vi),e(vi,Nv),e(vo,Iv),e(vo,lc),e(lc,Bv),e(vo,Uv),e(Oe,Hv),_(da,Oe,null),e(Oe,Rv),e(Oe,cc),e(cc,Xv),e(Oe,Gv),_(Dn,Oe,null),h(o,Cd,m),h(o,wo,m),e(wo,pa),e(pa,dc),_(On,dc,null),e(wo,Jv),e(wo,pc),e(pc,Zv),h(o,qd,m),h(o,ce,m),_(Sn,ce,null),e(ce,Kv),e(ce,Nn),e(Nn,Qv),e(Nn,mc),e(mc,Yv),e(Nn,e2),e(ce,t2),e(ce,In),e(In,o2),e(In,wi),e(wi,a2),e(In,s2),e(ce,n2),e(ce,Bn),e(Bn,r2),e(Bn,Un),e(Un,i2),e(Bn,l2),e(ce,c2),_(ma,ce,null),e(ce,d2),e(ce,Se),_(Hn,Se,null),e(Se,p2),e(Se,bo),e(bo,m2),e(bo,bi),e(bi,h2),e(bo,f2),e(bo,hc),e(hc,u2),e(bo,g2),e(Se,_2),_(ha,Se,null),e(Se,v2),e(Se,fc),e(fc,w2),e(Se,b2),_(Rn,Se,null),h(o,Md,m),h(o,yo,m),e(yo,fa),e(fa,uc),_(Xn,uc,null),e(yo,y2),e(yo,gc),e(gc,k2),h(o,zd,m),h(o,K,m),_(Gn,K,null),e(K,T2),e(K,Jn),e(Jn,x2),e(Jn,Zn),e(Zn,W2),e(Jn,$2),e(K,j2),e(K,Kn),e(Kn,V2),e(Kn,yi),e(yi,F2),e(Kn,E2),e(K,P2),e(K,Qn),e(Qn,C2),e(Qn,Yn),e(Yn,q2),e(Qn,M2),e(K,z2),e(K,_c),e(_c,A2),e(K,L2),e(K,bt),e(bt,vc),e(vc,er),e(er,D2),e(bt,O2),e(bt,wc),e(wc,tr),e(tr,S2),e(bt,N2),e(bt,bc),e(bc,or),e(or,I2),e(bt,B2),e(bt,yc),e(yc,ar),e(ar,U2),e(K,H2),e(K,Ne),_(sr,Ne,null),e(Ne,R2),e(Ne,ko),e(ko,X2),e(ko,kc),e(kc,G2),e(ko,J2),e(ko,Tc),e(Tc,Z2),e(ko,K2),e(Ne,Q2),_(ua,Ne,null),e(Ne,Y2),e(Ne,xc),e(xc,ew),e(Ne,tw),_(nr,Ne,null),h(o,Ad,m),h(o,To,m),e(To,ga),e(ga,Wc),_(rr,Wc,null),e(To,ow),e(To,$c),e($c,aw),h(o,Ld,m),h(o,Q,m),_(ir,Q,null),e(Q,sw),e(Q,xo),e(xo,nw),e(xo,jc),e(jc,rw),e(xo,iw),e(xo,lr),e(lr,lw),e(xo,cw),e(Q,dw),e(Q,cr),e(cr,pw),e(cr,ki),e(ki,mw),e(cr,hw),e(Q,fw),e(Q,dr),e(dr,uw),e(dr,pr),e(pr,gw),e(dr,_w),e(Q,vw),e(Q,Vc),e(Vc,ww),e(Q,bw),e(Q,yt),e(yt,Fc),e(Fc,mr),e(mr,yw),e(yt,kw),e(yt,Ec),e(Ec,hr),e(hr,Tw),e(yt,xw),e(yt,Pc),e(Pc,fr),e(fr,Ww),e(yt,$w),e(yt,Cc),e(Cc,ur),e(ur,jw),e(Q,Vw),e(Q,Ie),_(gr,Ie,null),e(Ie,Fw),e(Ie,Wo),e(Wo,Ew),e(Wo,qc),e(qc,Pw),e(Wo,Cw),e(Wo,Mc),e(Mc,qw),e(Wo,Mw),e(Ie,zw),_(_a,Ie,null),e(Ie,Aw),e(Ie,zc),e(zc,Lw),e(Ie,Dw),_(_r,Ie,null),h(o,Dd,m),h(o,$o,m),e($o,va),e(va,Ac),_(vr,Ac,null),e($o,Ow),e($o,Lc),e(Lc,Sw),h(o,Od,m),h(o,Y,m),_(wr,Y,null),e(Y,Nw),e(Y,jo),e(jo,Iw),e(jo,Dc),e(Dc,Bw),e(jo,Uw),e(jo,br),e(br,Hw),e(jo,Rw),e(Y,Xw),e(Y,yr),e(yr,Gw),e(yr,Ti),e(Ti,Jw),e(yr,Zw),e(Y,Kw),e(Y,kr),e(kr,Qw),e(kr,Tr),e(Tr,Yw),e(kr,eb),e(Y,tb),e(Y,Oc),e(Oc,ob),e(Y,ab),e(Y,kt),e(kt,Sc),e(Sc,xr),e(xr,sb),e(kt,nb),e(kt,Nc),e(Nc,Wr),e(Wr,rb),e(kt,ib),e(kt,Ic),e(Ic,$r),e($r,lb),e(kt,cb),e(kt,Bc),e(Bc,jr),e(jr,db),e(Y,pb),e(Y,Be),_(Vr,Be,null),e(Be,mb),e(Be,Vo),e(Vo,hb),e(Vo,xi),e(xi,fb),e(Vo,ub),e(Vo,Uc),e(Uc,gb),e(Vo,_b),e(Be,vb),_(wa,Be,null),e(Be,wb),e(Be,Hc),e(Hc,bb),e(Be,yb),_(Fr,Be,null),Sd=!0},p(o,[m]){const Er={};m&2&&(Er.$$scope={dirty:m,ctx:o}),Lo.$set(Er);const Rc={};m&2&&(Rc.$$scope={dirty:m,ctx:o}),Uo.$set(Rc);const Xc={};m&2&&(Xc.$$scope={dirty:m,ctx:o}),Ho.$set(Xc);const Gc={};m&2&&(Gc.$$scope={dirty:m,ctx:o}),Ko.$set(Gc);const Pr={};m&2&&(Pr.$$scope={dirty:m,ctx:o}),Yo.$set(Pr);const Jc={};m&2&&(Jc.$$scope={dirty:m,ctx:o}),ta.$set(Jc);const Zc={};m&2&&(Zc.$$scope={dirty:m,ctx:o}),aa.$set(Zc);const Kc={};m&2&&(Kc.$$scope={dirty:m,ctx:o}),na.$set(Kc);const Cr={};m&2&&(Cr.$$scope={dirty:m,ctx:o}),ia.$set(Cr);const Qc={};m&2&&(Qc.$$scope={dirty:m,ctx:o}),ca.$set(Qc);const Yc={};m&2&&(Yc.$$scope={dirty:m,ctx:o}),da.$set(Yc);const ed={};m&2&&(ed.$$scope={dirty:m,ctx:o}),ma.$set(ed);const td={};m&2&&(td.$$scope={dirty:m,ctx:o}),ha.$set(td);const od={};m&2&&(od.$$scope={dirty:m,ctx:o}),ua.$set(od);const qr={};m&2&&(qr.$$scope={dirty:m,ctx:o}),_a.$set(qr);const ad={};m&2&&(ad.$$scope={dirty:m,ctx:o}),wa.$set(ad)},i(o){Sd||(v(k.$$.fragment,o),v(X.$$.fragment,o),v(J.$$.fragment,o),v(xe.$$.fragment,o),v(Ea.$$.fragment,o),v(Pa.$$.fragment,o),v(Ca.$$.fragment,o),v(Ma.$$.fragment,o),v(za.$$.fragment,o),v(Aa.$$.fragment,o),v(Da.$$.fragment,o),v(Oa.$$.fragment,o),v(Sa.$$.fragment,o),v(Na.$$.fragment,o),v(Ba.$$.fragment,o),v(Ua.$$.fragment,o),v(Ha.$$.fragment,o),v(Xa.$$.fragment,o),v(Za.$$.fragment,o),v(Ka.$$.fragment,o),v(Qa.$$.fragment,o),v(Lo.$$.fragment,o),v(es.$$.fragment,o),v(os.$$.fragment,o),v(ss.$$.fragment,o),v(ns.$$.fragment,o),v(rs.$$.fragment,o),v(is.$$.fragment,o),v(ds.$$.fragment,o),v(ps.$$.fragment,o),v(Uo.$$.fragment,o),v(hs.$$.fragment,o),v(fs.$$.fragment,o),v(Ho.$$.fragment,o),v(us.$$.fragment,o),v(gs.$$.fragment,o),v(_s.$$.fragment,o),v(vs.$$.fragment,o),v(ws.$$.fragment,o),v(ys.$$.fragment,o),v(Ts.$$.fragment,o),v(Ws.$$.fragment,o),v(js.$$.fragment,o),v(Vs.$$.fragment,o),v(Es.$$.fragment,o),v(Ps.$$.fragment,o),v(Cs.$$.fragment,o),v(Ds.$$.fragment,o),v(Ko.$$.fragment,o),v(Os.$$.fragment,o),v(Ss.$$.fragment,o),v(Ns.$$.fragment,o),v(Rs.$$.fragment,o),v(Yo.$$.fragment,o),v(Xs.$$.fragment,o),v(Gs.$$.fragment,o),v(Js.$$.fragment,o),v(Zs.$$.fragment,o),v(on.$$.fragment,o),v(ta.$$.fragment,o),v(an.$$.fragment,o),v(sn.$$.fragment,o),v(nn.$$.fragment,o),v(rn.$$.fragment,o),v(hn.$$.fragment,o),v(aa.$$.fragment,o),v(fn.$$.fragment,o),v(un.$$.fragment,o),v(gn.$$.fragment,o),v(kn.$$.fragment,o),v(na.$$.fragment,o),v(Tn.$$.fragment,o),v(xn.$$.fragment,o),v(Wn.$$.fragment,o),v(En.$$.fragment,o),v(ia.$$.fragment,o),v(Pn.$$.fragment,o),v(Cn.$$.fragment,o),v(qn.$$.fragment,o),v(ca.$$.fragment,o),v(Ln.$$.fragment,o),v(da.$$.fragment,o),v(Dn.$$.fragment,o),v(On.$$.fragment,o),v(Sn.$$.fragment,o),v(ma.$$.fragment,o),v(Hn.$$.fragment,o),v(ha.$$.fragment,o),v(Rn.$$.fragment,o),v(Xn.$$.fragment,o),v(Gn.$$.fragment,o),v(sr.$$.fragment,o),v(ua.$$.fragment,o),v(nr.$$.fragment,o),v(rr.$$.fragment,o),v(ir.$$.fragment,o),v(gr.$$.fragment,o),v(_a.$$.fragment,o),v(_r.$$.fragment,o),v(vr.$$.fragment,o),v(wr.$$.fragment,o),v(Vr.$$.fragment,o),v(wa.$$.fragment,o),v(Fr.$$.fragment,o),Sd=!0)},o(o){w(k.$$.fragment,o),w(X.$$.fragment,o),w(J.$$.fragment,o),w(xe.$$.fragment,o),w(Ea.$$.fragment,o),w(Pa.$$.fragment,o),w(Ca.$$.fragment,o),w(Ma.$$.fragment,o),w(za.$$.fragment,o),w(Aa.$$.fragment,o),w(Da.$$.fragment,o),w(Oa.$$.fragment,o),w(Sa.$$.fragment,o),w(Na.$$.fragment,o),w(Ba.$$.fragment,o),w(Ua.$$.fragment,o),w(Ha.$$.fragment,o),w(Xa.$$.fragment,o),w(Za.$$.fragment,o),w(Ka.$$.fragment,o),w(Qa.$$.fragment,o),w(Lo.$$.fragment,o),w(es.$$.fragment,o),w(os.$$.fragment,o),w(ss.$$.fragment,o),w(ns.$$.fragment,o),w(rs.$$.fragment,o),w(is.$$.fragment,o),w(ds.$$.fragment,o),w(ps.$$.fragment,o),w(Uo.$$.fragment,o),w(hs.$$.fragment,o),w(fs.$$.fragment,o),w(Ho.$$.fragment,o),w(us.$$.fragment,o),w(gs.$$.fragment,o),w(_s.$$.fragment,o),w(vs.$$.fragment,o),w(ws.$$.fragment,o),w(ys.$$.fragment,o),w(Ts.$$.fragment,o),w(Ws.$$.fragment,o),w(js.$$.fragment,o),w(Vs.$$.fragment,o),w(Es.$$.fragment,o),w(Ps.$$.fragment,o),w(Cs.$$.fragment,o),w(Ds.$$.fragment,o),w(Ko.$$.fragment,o),w(Os.$$.fragment,o),w(Ss.$$.fragment,o),w(Ns.$$.fragment,o),w(Rs.$$.fragment,o),w(Yo.$$.fragment,o),w(Xs.$$.fragment,o),w(Gs.$$.fragment,o),w(Js.$$.fragment,o),w(Zs.$$.fragment,o),w(on.$$.fragment,o),w(ta.$$.fragment,o),w(an.$$.fragment,o),w(sn.$$.fragment,o),w(nn.$$.fragment,o),w(rn.$$.fragment,o),w(hn.$$.fragment,o),w(aa.$$.fragment,o),w(fn.$$.fragment,o),w(un.$$.fragment,o),w(gn.$$.fragment,o),w(kn.$$.fragment,o),w(na.$$.fragment,o),w(Tn.$$.fragment,o),w(xn.$$.fragment,o),w(Wn.$$.fragment,o),w(En.$$.fragment,o),w(ia.$$.fragment,o),w(Pn.$$.fragment,o),w(Cn.$$.fragment,o),w(qn.$$.fragment,o),w(ca.$$.fragment,o),w(Ln.$$.fragment,o),w(da.$$.fragment,o),w(Dn.$$.fragment,o),w(On.$$.fragment,o),w(Sn.$$.fragment,o),w(ma.$$.fragment,o),w(Hn.$$.fragment,o),w(ha.$$.fragment,o),w(Rn.$$.fragment,o),w(Xn.$$.fragment,o),w(Gn.$$.fragment,o),w(sr.$$.fragment,o),w(ua.$$.fragment,o),w(nr.$$.fragment,o),w(rr.$$.fragment,o),w(ir.$$.fragment,o),w(gr.$$.fragment,o),w(_a.$$.fragment,o),w(_r.$$.fragment,o),w(vr.$$.fragment,o),w(wr.$$.fragment,o),w(Vr.$$.fragment,o),w(wa.$$.fragment,o),w(Fr.$$.fragment,o),Sd=!1},d(o){t(p),o&&t(x),o&&t(f),b(k),o&&t(q),o&&t(F),b(X),o&&t(ge),o&&t(B),o&&t(I),o&&t(Fe),o&&t(Ye),o&&t(Ee),o&&t(et),o&&t(D),o&&t(de),o&&t(pe),o&&t($),o&&t(P),o&&t(xt),o&&t(ke),b(J),o&&t(Wt),o&&t(U),b(xe),b(Ea),o&&t(sd),o&&t(Gt),b(Pa),o&&t(nd),o&&t(Z),b(Ca),b(Ma),b(za),b(Aa),b(Da),b(Oa),o&&t(rd),o&&t(Jt),b(Sa),o&&t(id),o&&t(Qe),b(Na),b(Ba),o&&t(ld),o&&t(Zt),b(Ua),o&&t(cd),o&&t(z),b(Ha),b(Xa),b(Za),b(Ka),b(Qa),b(Lo),b(es),b(os),b(ss),o&&t(dd),o&&t(Kt),b(ns),o&&t(pd),o&&t(N),b(rs),b(is),b(ds),b(ps),b(Uo),b(hs),b(fs),b(Ho),b(us),b(gs),b(_s),o&&t(md),o&&t(Qt),b(vs),o&&t(hd),o&&t(Yt),b(ws),o&&t(fd),o&&t(eo),b(ys),o&&t(ud),o&&t(to),b(Ts),o&&t(gd),o&&t(vt),b(Ws),b(js),o&&t(_d),o&&t(wt),b(Vs),b(Es),o&&t(vd),o&&t(oo),b(Ps),o&&t(wd),o&&t(We),b(Cs),b(Ds),b(Ko),b(Os),o&&t(bd),o&&t(so),b(Ss),o&&t(yd),o&&t($e),b(Ns),b(Rs),b(Yo),b(Xs),b(Gs),o&&t(kd),o&&t(io),b(Js),o&&t(Td),o&&t(ne),b(Zs),b(on),b(ta),b(an),b(sn),o&&t(xd),o&&t(co),b(nn),o&&t(Wd),o&&t(re),b(rn),b(hn),b(aa),b(fn),o&&t($d),o&&t(mo),b(un),o&&t(jd),o&&t(ie),b(gn),b(kn),b(na),b(Tn),o&&t(Vd),o&&t(fo),b(xn),o&&t(Fd),o&&t(je),b(Wn),b(En),b(ia),b(Pn),o&&t(Ed),o&&t(_o),b(Cn),o&&t(Pd),o&&t(le),b(qn),b(ca),b(Ln),b(da),b(Dn),o&&t(Cd),o&&t(wo),b(On),o&&t(qd),o&&t(ce),b(Sn),b(ma),b(Hn),b(ha),b(Rn),o&&t(Md),o&&t(yo),b(Xn),o&&t(zd),o&&t(K),b(Gn),b(sr),b(ua),b(nr),o&&t(Ad),o&&t(To),b(rr),o&&t(Ld),o&&t(Q),b(ir),b(gr),b(_a),b(_r),o&&t(Dd),o&&t($o),b(vr),o&&t(Od),o&&t(Y),b(wr),b(Vr),b(wa),b(Fr)}}}const DT={local:"wav2vec2",sections:[{local:"overview",title:"Overview"},{local:"transformers.Wav2Vec2Config",title:"Wav2Vec2Config"},{local:"transformers.Wav2Vec2CTCTokenizer",title:"Wav2Vec2CTCTokenizer"},{local:"transformers.Wav2Vec2FeatureExtractor",title:"Wav2Vec2FeatureExtractor"},{local:"transformers.Wav2Vec2Processor",title:"Wav2Vec2Processor"},{local:"transformers.Wav2Vec2ProcessorWithLM",title:"Wav2Vec2ProcessorWithLM"},{local:"transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm.Wav2Vec2DecoderWithLMOutput",title:"Wav2Vec2 specific outputs"},{local:"transformers.Wav2Vec2Model",title:"Wav2Vec2Model"},{local:"transformers.Wav2Vec2ForCTC",title:"Wav2Vec2ForCTC"},{local:"transformers.Wav2Vec2ForSequenceClassification",title:"Wav2Vec2ForSequenceClassification"},{local:"transformers.Wav2Vec2ForAudioFrameClassification",title:"Wav2Vec2ForAudioFrameClassification"},{local:"transformers.Wav2Vec2ForXVector",title:"Wav2Vec2ForXVector"},{local:"transformers.Wav2Vec2ForPreTraining",title:"Wav2Vec2ForPreTraining"},{local:"transformers.TFWav2Vec2Model",title:"TFWav2Vec2Model"},{local:"transformers.TFWav2Vec2ForCTC",title:"TFWav2Vec2ForCTC"},{local:"transformers.FlaxWav2Vec2Model",title:"FlaxWav2Vec2Model"},{local:"transformers.FlaxWav2Vec2ForCTC",title:"FlaxWav2Vec2ForCTC"},{local:"transformers.FlaxWav2Vec2ForPreTraining",title:"FlaxWav2Vec2ForPreTraining"}],title:"Wav2Vec2"};function OT(E,p,x){let{fw:f}=p;return E.$$set=T=>{"fw"in T&&x(0,f=T.fw)},[f]}class RT extends _T{constructor(p){super();vT(this,p,OT,LT,wT,{fw:0})}}export{RT as default,DT as metadata};
298
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/model_doc/byt5.mdx-e9ed0683.js
import{S as Mo,i as Go,s as Wo,e as s,k as d,w as k,t as a,M as Uo,c as n,d as o,m as c,a as r,x as g,h as i,b as p,F as t,g as f,y,L as Vo,q as b,o as v,B as w}from"../../chunks/vendor-4833417e.js";import{D as Le}from"../../chunks/Docstring-4f315ed9.js";import{C as Ro}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as at}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Ho(it){let T,te,u,_,he,F,lt,ue,dt,Ae,x,B,_e,N,ct,ke,pt,je,L,ft,O,mt,ht,De,oe,ut,Pe,se,ge,_t,Ce,$,kt,R,gt,yt,M,bt,vt,Ie,A,wt,ne,Tt,$t,Se,re,qt,Fe,z,j,ye,G,xt,be,zt,Ne,ae,Et,Oe,W,Re,ie,Bt,Me,U,Ge,E,D,ve,V,Lt,we,At,We,m,H,jt,Te,Dt,Pt,X,Ct,le,It,St,Ft,q,K,Nt,$e,Ot,Rt,J,de,Mt,qe,Gt,Wt,ce,Ut,xe,Vt,Ht,P,Q,Xt,ze,Kt,Jt,C,Y,Qt,Ee,Yt,Zt,I,Z,eo,ee,to,Be,oo,so,Ue,S,no,pe,ro,ao,Ve;return F=new at({}),N=new at({}),G=new at({}),W=new Ro({props:{code:`from transformers import T5ForConditionalGeneration import torch model = T5ForConditionalGeneration.from_pretrained("google/byt5-small") input_ids = torch.tensor([list("Life is like a box of chocolates.".encode("utf-8"))]) + 3 # add 3 for special tokens labels = ( torch.tensor([list("La vie est comme une bo\xEEte de chocolat.".encode("utf-8"))]) + 3 ) # add 3 for special tokens loss = model(input_ids, labels=labels).loss # forward pass`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration <span class="hljs-keyword">import</span> torch model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/byt5-small&quot;</span>) input_ids = torch.tensor([<span class="hljs-built_in">list</span>(<span class="hljs-string">&quot;Life is like a box of chocolates.&quot;</span>.encode(<span class="hljs-string">&quot;utf-8&quot;</span>))]) + <span class="hljs-number">3</span> <span class="hljs-comment"># add 3 for special tokens</span> labels = ( torch.tensor([<span class="hljs-built_in">list</span>(<span class="hljs-string">&quot;La vie est comme une bo\xEEte de chocolat.&quot;</span>.encode(<span class="hljs-string">&quot;utf-8&quot;</span>))]) + <span class="hljs-number">3</span> ) <span class="hljs-comment"># add 3 for special tokens</span> loss = model(input_ids, labels=labels).loss <span class="hljs-comment"># forward pass</span>`}}),U=new Ro({props:{code:`from transformers import T5ForConditionalGeneration, AutoTokenizer model = T5ForConditionalGeneration.from_pretrained("google/byt5-small") tokenizer = AutoTokenizer.from_pretrained("google/byt5-small") model_inputs = tokenizer( ["Life is like a box of chocolates.", "Today is Monday."], padding="longest", return_tensors="pt" ) labels = tokenizer( ["La vie est comme une bo\xEEte de chocolat.", "Aujourd'hui c'est lundi."], padding="longest", return_tensors="pt" ).input_ids loss = model(**model_inputs, labels=labels).loss # forward pass`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration, AutoTokenizer model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/byt5-small&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;google/byt5-small&quot;</span>) model_inputs = tokenizer( [<span class="hljs-string">&quot;Life is like a box of chocolates.&quot;</span>, <span class="hljs-string">&quot;Today is Monday.&quot;</span>], padding=<span class="hljs-string">&quot;longest&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> ) labels = tokenizer( [<span class="hljs-string">&quot;La vie est comme une bo\xEEte de chocolat.&quot;</span>, <span class="hljs-string">&quot;Aujourd&#x27;hui c&#x27;est lundi.&quot;</span>], padding=<span class="hljs-string">&quot;longest&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> ).input_ids loss = model(**model_inputs, labels=labels).loss <span class="hljs-comment"># forward pass</span>`}}),V=new at({}),H=new Le({props:{name:"class transformers.ByT5Tokenizer",anchor:"transformers.ByT5Tokenizer",parameters:[{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"extra_ids",val:" = 125"},{name:"additional_special_tokens",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/byt5/tokenization_byt5.py#L28",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.ByT5Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.ByT5Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.ByT5Tokenizer.extra_ids",description:`<strong>extra_ids</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as &#x201C;<extra<em>id{%d}&gt;&#x201D; where &#x201D;{%d}&#x201D; is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning (&#x201C;<extra_id_0>&#x201D; is the last token in the vocabulary like in ByT5 preprocessing see <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117" rel="nofollow">here</a>).</extra_id_0></extra<em>`,name:"extra_ids"},{anchor:"transformers.ByT5Tokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"}]}}),K=new Le({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.ByT5Tokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/byt5/tokenization_byt5.py#L177",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.build_inputs_with_special_tokens.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs to which the special tokens will be added.`,name:"token_ids_0"},{anchor:"transformers.ByT5Tokenizer.build_inputs_with_special_tokens.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of <a href="../glossary#input-ids">input IDs</a> with the appropriate special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Q=new Le({props:{name:"convert_tokens_to_string",anchor:"transformers.ByT5Tokenizer.convert_tokens_to_string",parameters:[{name:"tokens",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/byt5/tokenization_byt5.py#L228"}}),Y=new Le({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.ByT5Tokenizer.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/byt5/tokenization_byt5.py#L155",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.create_token_type_ids_from_sequences.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.ByT5Tokenizer.create_token_type_ids_from_sequences.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"}],returnDescription:` <p>List of zeros.</p> `,returnType:` <p><code>List[int]</code></p> `}}),Z=new Le({props:{name:"get_special_tokens_mask",anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/models/byt5/tokenization_byt5.py#L117",parametersDescription:[{anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of IDs.`,name:"token_ids_0"},{anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Optional second list of IDs for sequence pairs.`,name:"token_ids_1"},{anchor:"transformers.ByT5Tokenizer.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){T=s("meta"),te=d(),u=s("h1"),_=s("a"),he=s("span"),k(F.$$.fragment),lt=d(),ue=s("span"),dt=a("ByT5"),Ae=d(),x=s("h2"),B=s("a"),_e=s("span"),k(N.$$.fragment),ct=d(),ke=s("span"),pt=a("Overview"),je=d(),L=s("p"),ft=a("The ByT5 model was presented in "),O=s("a"),mt=a("ByT5: Towards a token-free future with pre-trained byte-to-byte models"),ht=a(` by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.`),De=d(),oe=s("p"),ut=a("The abstract from the paper is the following:"),Pe=d(),se=s("p"),ge=s("em"),_t=a(`Most widely-used pre-trained language models operate on sequences of tokens corresponding to word or subword units. Encoding text as a sequence of tokens requires a tokenizer, which is typically created as an independent artifact from the model. Token-free models that instead operate directly on raw text (bytes or characters) have many benefits: they can process text in any language out of the box, they are more robust to noise, and they minimize technical debt by removing complex and error-prone text preprocessing pipelines. Since byte or character sequences are longer than token sequences, past work on token-free models has often introduced new model architectures designed to amortize the cost of operating directly on raw text. In this paper, we show that a standard Transformer architecture can be used with minimal modifications to process byte sequences. We carefully characterize the trade-offs in terms of parameter count, training FLOPs, and inference speed, and show that byte-level models are competitive with their token-level counterparts. We also demonstrate that byte-level models are significantly more robust to noise and perform better on tasks that are sensitive to spelling and pronunciation. As part of our contribution, we release a new set of pre-trained byte-level Transformer models based on the T5 architecture, as well as all code and data used in our experiments.`),Ce=d(),$=s("p"),kt=a("This model was contributed by "),R=s("a"),gt=a("patrickvonplaten"),yt=a(`. The original code can be found `),M=s("a"),bt=a("here"),vt=a("."),Ie=d(),A=s("p"),wt=a("ByT5\u2019s architecture is based on the T5v1.1 model, so one can refer to "),ne=s("a"),Tt=a("T5v1.1\u2019s documentation page"),$t=a(`. They only differ in how inputs should be prepared for the model, see the code examples below.`),Se=d(),re=s("p"),qt=a(`Since ByT5 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),Fe=d(),z=s("h3"),j=s("a"),ye=s("span"),k(G.$$.fragment),xt=d(),be=s("span"),zt=a("Example"),Ne=d(),ae=s("p"),Et=a("ByT5 works on raw UTF-8 bytes, so it can be used without a tokenizer:"),Oe=d(),k(W.$$.fragment),Re=d(),ie=s("p"),Bt=a("For batched inference and training it is however recommended to make use of the tokenizer:"),Me=d(),k(U.$$.fragment),Ge=d(),E=s("h2"),D=s("a"),ve=s("span"),k(V.$$.fragment),Lt=d(),we=s("span"),At=a("ByT5Tokenizer"),We=d(),m=s("div"),k(H.$$.fragment),jt=d(),Te=s("p"),Dt=a("Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding."),Pt=d(),X=s("p"),Ct=a("This tokenizer inherits from "),le=s("a"),It=a("PreTrainedTokenizer"),St=a(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ft=d(),q=s("div"),k(K.$$.fragment),Nt=d(),$e=s("p"),Ot=a(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Rt=d(),J=s("ul"),de=s("li"),Mt=a("single sequence: "),qe=s("code"),Gt=a("X </s>"),Wt=d(),ce=s("li"),Ut=a("pair of sequences: "),xe=s("code"),Vt=a("A </s> B </s>"),Ht=d(),P=s("div"),k(Q.$$.fragment),Xt=d(),ze=s("p"),Kt=a("Converts a sequence of tokens (string) in a single string."),Jt=d(),C=s("div"),k(Y.$$.fragment),Qt=d(),Ee=s("p"),Yt=a(`Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not make use of token type ids, therefore a list of zeros is returned.`),Zt=d(),I=s("div"),k(Z.$$.fragment),eo=d(),ee=s("p"),to=a(`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Be=s("code"),oo=a("prepare_for_model"),so=a(" method."),Ue=d(),S=s("p"),no=a("See "),pe=s("a"),ro=a("ByT5Tokenizer"),ao=a(" for all details."),this.h()},l(e){const l=Uo('[data-svelte="svelte-1phssyn"]',document.head);T=n(l,"META",{name:!0,content:!0}),l.forEach(o),te=c(e),u=n(e,"H1",{class:!0});var He=r(u);_=n(He,"A",{id:!0,class:!0,href:!0});var co=r(_);he=n(co,"SPAN",{});var po=r(he);g(F.$$.fragment,po),po.forEach(o),co.forEach(o),lt=c(He),ue=n(He,"SPAN",{});var fo=r(ue);dt=i(fo,"ByT5"),fo.forEach(o),He.forEach(o),Ae=c(e),x=n(e,"H2",{class:!0});var Xe=r(x);B=n(Xe,"A",{id:!0,class:!0,href:!0});var mo=r(B);_e=n(mo,"SPAN",{});var ho=r(_e);g(N.$$.fragment,ho),ho.forEach(o),mo.forEach(o),ct=c(Xe),ke=n(Xe,"SPAN",{});var uo=r(ke);pt=i(uo,"Overview"),uo.forEach(o),Xe.forEach(o),je=c(e),L=n(e,"P",{});var Ke=r(L);ft=i(Ke,"The ByT5 model was presented in "),O=n(Ke,"A",{href:!0,rel:!0});var _o=r(O);mt=i(_o,"ByT5: Towards a token-free future with pre-trained byte-to-byte models"),_o.forEach(o),ht=i(Ke,` by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.`),Ke.forEach(o),De=c(e),oe=n(e,"P",{});var ko=r(oe);ut=i(ko,"The abstract from the paper is the following:"),ko.forEach(o),Pe=c(e),se=n(e,"P",{});var go=r(se);ge=n(go,"EM",{});var yo=r(ge);_t=i(yo,`Most widely-used pre-trained language models operate on sequences of tokens corresponding to word or subword units. Encoding text as a sequence of tokens requires a tokenizer, which is typically created as an independent artifact from the model. Token-free models that instead operate directly on raw text (bytes or characters) have many benefits: they can process text in any language out of the box, they are more robust to noise, and they minimize technical debt by removing complex and error-prone text preprocessing pipelines. Since byte or character sequences are longer than token sequences, past work on token-free models has often introduced new model architectures designed to amortize the cost of operating directly on raw text. In this paper, we show that a standard Transformer architecture can be used with minimal modifications to process byte sequences. We carefully characterize the trade-offs in terms of parameter count, training FLOPs, and inference speed, and show that byte-level models are competitive with their token-level counterparts. We also demonstrate that byte-level models are significantly more robust to noise and perform better on tasks that are sensitive to spelling and pronunciation. As part of our contribution, we release a new set of pre-trained byte-level Transformer models based on the T5 architecture, as well as all code and data used in our experiments.`),yo.forEach(o),go.forEach(o),Ce=c(e),$=n(e,"P",{});var fe=r($);kt=i(fe,"This model was contributed by "),R=n(fe,"A",{href:!0,rel:!0});var bo=r(R);gt=i(bo,"patrickvonplaten"),bo.forEach(o),yt=i(fe,`. The original code can be found `),M=n(fe,"A",{href:!0,rel:!0});var vo=r(M);bt=i(vo,"here"),vo.forEach(o),vt=i(fe,"."),fe.forEach(o),Ie=c(e),A=n(e,"P",{});var Je=r(A);wt=i(Je,"ByT5\u2019s architecture is based on the T5v1.1 model, so one can refer to "),ne=n(Je,"A",{href:!0});var wo=r(ne);Tt=i(wo,"T5v1.1\u2019s documentation page"),wo.forEach(o),$t=i(Je,`. They only differ in how inputs should be prepared for the model, see the code examples below.`),Je.forEach(o),Se=c(e),re=n(e,"P",{});var To=r(re);qt=i(To,`Since ByT5 was pre-trained unsupervisedly, there\u2019s no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.`),To.forEach(o),Fe=c(e),z=n(e,"H3",{class:!0});var Qe=r(z);j=n(Qe,"A",{id:!0,class:!0,href:!0});var $o=r(j);ye=n($o,"SPAN",{});var qo=r(ye);g(G.$$.fragment,qo),qo.forEach(o),$o.forEach(o),xt=c(Qe),be=n(Qe,"SPAN",{});var xo=r(be);zt=i(xo,"Example"),xo.forEach(o),Qe.forEach(o),Ne=c(e),ae=n(e,"P",{});var zo=r(ae);Et=i(zo,"ByT5 works on raw UTF-8 bytes, so it can be used without a tokenizer:"),zo.forEach(o),Oe=c(e),g(W.$$.fragment,e),Re=c(e),ie=n(e,"P",{});var Eo=r(ie);Bt=i(Eo,"For batched inference and training it is however recommended to make use of the tokenizer:"),Eo.forEach(o),Me=c(e),g(U.$$.fragment,e),Ge=c(e),E=n(e,"H2",{class:!0});var Ye=r(E);D=n(Ye,"A",{id:!0,class:!0,href:!0});var Bo=r(D);ve=n(Bo,"SPAN",{});var Lo=r(ve);g(V.$$.fragment,Lo),Lo.forEach(o),Bo.forEach(o),Lt=c(Ye),we=n(Ye,"SPAN",{});var Ao=r(we);At=i(Ao,"ByT5Tokenizer"),Ao.forEach(o),Ye.forEach(o),We=c(e),m=n(e,"DIV",{class:!0});var h=r(m);g(H.$$.fragment,h),jt=c(h),Te=n(h,"P",{});var jo=r(Te);Dt=i(jo,"Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding."),jo.forEach(o),Pt=c(h),X=n(h,"P",{});var Ze=r(X);Ct=i(Ze,"This tokenizer inherits from "),le=n(Ze,"A",{href:!0});var Do=r(le);It=i(Do,"PreTrainedTokenizer"),Do.forEach(o),St=i(Ze,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Ze.forEach(o),Ft=c(h),q=n(h,"DIV",{class:!0});var me=r(q);g(K.$$.fragment,me),Nt=c(me),$e=n(me,"P",{});var Po=r($e);Ot=i(Po,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format:`),Po.forEach(o),Rt=c(me),J=n(me,"UL",{});var et=r(J);de=n(et,"LI",{});var io=r(de);Mt=i(io,"single sequence: "),qe=n(io,"CODE",{});var Co=r(qe);Gt=i(Co,"X </s>"),Co.forEach(o),io.forEach(o),Wt=c(et),ce=n(et,"LI",{});var lo=r(ce);Ut=i(lo,"pair of sequences: "),xe=n(lo,"CODE",{});var Io=r(xe);Vt=i(Io,"A </s> B </s>"),Io.forEach(o),lo.forEach(o),et.forEach(o),me.forEach(o),Ht=c(h),P=n(h,"DIV",{class:!0});var tt=r(P);g(Q.$$.fragment,tt),Xt=c(tt),ze=n(tt,"P",{});var So=r(ze);Kt=i(So,"Converts a sequence of tokens (string) in a single string."),So.forEach(o),tt.forEach(o),Jt=c(h),C=n(h,"DIV",{class:!0});var ot=r(C);g(Y.$$.fragment,ot),Qt=c(ot),Ee=n(ot,"P",{});var Fo=r(Ee);Yt=i(Fo,`Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not make use of token type ids, therefore a list of zeros is returned.`),Fo.forEach(o),ot.forEach(o),Zt=c(h),I=n(h,"DIV",{class:!0});var st=r(I);g(Z.$$.fragment,st),eo=c(st),ee=n(st,"P",{});var nt=r(ee);to=i(nt,`Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Be=n(nt,"CODE",{});var No=r(Be);oo=i(No,"prepare_for_model"),No.forEach(o),so=i(nt," method."),nt.forEach(o),st.forEach(o),h.forEach(o),Ue=c(e),S=n(e,"P",{});var rt=r(S);no=i(rt,"See "),pe=n(rt,"A",{href:!0});var Oo=r(pe);ro=i(Oo,"ByT5Tokenizer"),Oo.forEach(o),ao=i(rt," for all details."),rt.forEach(o),this.h()},h(){p(T,"name","hf:doc:metadata"),p(T,"content",JSON.stringify(Xo)),p(_,"id","byt5"),p(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(_,"href","#byt5"),p(u,"class","relative group"),p(B,"id","overview"),p(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(B,"href","#overview"),p(x,"class","relative group"),p(O,"href","https://arxiv.org/abs/2105.13626"),p(O,"rel","nofollow"),p(R,"href","https://huggingface.co/patrickvonplaten"),p(R,"rel","nofollow"),p(M,"href","https://github.com/google-research/byt5"),p(M,"rel","nofollow"),p(ne,"href","t5v1.1"),p(j,"id","example"),p(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(j,"href","#example"),p(z,"class","relative group"),p(D,"id","transformers.ByT5Tokenizer"),p(D,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(D,"href","#transformers.ByT5Tokenizer"),p(E,"class","relative group"),p(le,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(q,"class","docstring"),p(P,"class","docstring"),p(C,"class","docstring"),p(I,"class","docstring"),p(m,"class","docstring"),p(pe,"href","/docs/transformers/pr_16143/en/model_doc/byt5#transformers.ByT5Tokenizer")},m(e,l){t(document.head,T),f(e,te,l),f(e,u,l),t(u,_),t(_,he),y(F,he,null),t(u,lt),t(u,ue),t(ue,dt),f(e,Ae,l),f(e,x,l),t(x,B),t(B,_e),y(N,_e,null),t(x,ct),t(x,ke),t(ke,pt),f(e,je,l),f(e,L,l),t(L,ft),t(L,O),t(O,mt),t(L,ht),f(e,De,l),f(e,oe,l),t(oe,ut),f(e,Pe,l),f(e,se,l),t(se,ge),t(ge,_t),f(e,Ce,l),f(e,$,l),t($,kt),t($,R),t(R,gt),t($,yt),t($,M),t(M,bt),t($,vt),f(e,Ie,l),f(e,A,l),t(A,wt),t(A,ne),t(ne,Tt),t(A,$t),f(e,Se,l),f(e,re,l),t(re,qt),f(e,Fe,l),f(e,z,l),t(z,j),t(j,ye),y(G,ye,null),t(z,xt),t(z,be),t(be,zt),f(e,Ne,l),f(e,ae,l),t(ae,Et),f(e,Oe,l),y(W,e,l),f(e,Re,l),f(e,ie,l),t(ie,Bt),f(e,Me,l),y(U,e,l),f(e,Ge,l),f(e,E,l),t(E,D),t(D,ve),y(V,ve,null),t(E,Lt),t(E,we),t(we,At),f(e,We,l),f(e,m,l),y(H,m,null),t(m,jt),t(m,Te),t(Te,Dt),t(m,Pt),t(m,X),t(X,Ct),t(X,le),t(le,It),t(X,St),t(m,Ft),t(m,q),y(K,q,null),t(q,Nt),t(q,$e),t($e,Ot),t(q,Rt),t(q,J),t(J,de),t(de,Mt),t(de,qe),t(qe,Gt),t(J,Wt),t(J,ce),t(ce,Ut),t(ce,xe),t(xe,Vt),t(m,Ht),t(m,P),y(Q,P,null),t(P,Xt),t(P,ze),t(ze,Kt),t(m,Jt),t(m,C),y(Y,C,null),t(C,Qt),t(C,Ee),t(Ee,Yt),t(m,Zt),t(m,I),y(Z,I,null),t(I,eo),t(I,ee),t(ee,to),t(ee,Be),t(Be,oo),t(ee,so),f(e,Ue,l),f(e,S,l),t(S,no),t(S,pe),t(pe,ro),t(S,ao),Ve=!0},p:Vo,i(e){Ve||(b(F.$$.fragment,e),b(N.$$.fragment,e),b(G.$$.fragment,e),b(W.$$.fragment,e),b(U.$$.fragment,e),b(V.$$.fragment,e),b(H.$$.fragment,e),b(K.$$.fragment,e),b(Q.$$.fragment,e),b(Y.$$.fragment,e),b(Z.$$.fragment,e),Ve=!0)},o(e){v(F.$$.fragment,e),v(N.$$.fragment,e),v(G.$$.fragment,e),v(W.$$.fragment,e),v(U.$$.fragment,e),v(V.$$.fragment,e),v(H.$$.fragment,e),v(K.$$.fragment,e),v(Q.$$.fragment,e),v(Y.$$.fragment,e),v(Z.$$.fragment,e),Ve=!1},d(e){o(T),e&&o(te),e&&o(u),w(F),e&&o(Ae),e&&o(x),w(N),e&&o(je),e&&o(L),e&&o(De),e&&o(oe),e&&o(Pe),e&&o(se),e&&o(Ce),e&&o($),e&&o(Ie),e&&o(A),e&&o(Se),e&&o(re),e&&o(Fe),e&&o(z),w(G),e&&o(Ne),e&&o(ae),e&&o(Oe),w(W,e),e&&o(Re),e&&o(ie),e&&o(Me),w(U,e),e&&o(Ge),e&&o(E),w(V),e&&o(We),e&&o(m),w(H),w(K),w(Q),w(Y),w(Z),e&&o(Ue),e&&o(S)}}}const Xo={local:"byt5",sections:[{local:"overview",sections:[{local:"example",title:"Example"}],title:"Overview"},{local:"transformers.ByT5Tokenizer",title:"ByT5Tokenizer"}],title:"ByT5"};function Ko(it,T,te){let{fw:u}=T;return it.$$set=_=>{"fw"in _&&te(0,u=_.fw)},[u]}class ts extends Mo{constructor(T){super();Go(this,T,Ko,Ho,Wo,{fw:0})}}export{ts as default,Xo as metadata};
299